Compare commits
155 Commits
Author | SHA1 | Date |
---|---|---|
|
d4aaa03301 | |
|
d68dba20c1 | |
|
8116538556 | |
|
3822048e6b | |
|
e05270e598 | |
|
2cca5c7b9b | |
|
1f8a323665 | |
|
e1ae65a48d | |
|
e415df936d | |
|
848737e327 | |
|
7796ee7342 | |
|
60ebb33b50 | |
|
b2ac70f5f6 | |
|
52b263ac66 | |
|
15dea31154 | |
|
ab616f9498 | |
|
f0b406c37a | |
|
3d4b07e86e | |
|
02690b8365 | |
|
f0c983093a | |
|
cd6ca368d5 | |
|
1aefde8ed4 | |
|
b30993eef0 | |
|
cca88b3eea | |
|
bf6f49e0e9 | |
|
777c131fbe | |
|
45f86226cf | |
|
a340f0c2f1 | |
|
5c87849f67 | |
|
74ab386d87 | |
|
10c73119cb | |
|
7190529693 | |
|
e249089ec8 | |
|
5660c73d9a | |
|
52307b64cc | |
|
64e607db74 | |
|
a926e627d3 | |
|
5cad620894 | |
|
d054d8ac2f | |
|
e53ed3411c | |
|
aedbc2ceb0 | |
|
cafb074620 | |
|
0bf6f5d1a4 | |
|
7c9d691a04 | |
|
4d6ad26d87 | |
|
18a15df503 | |
|
b4bcae43b0 | |
|
0c8073eedd | |
|
5a81dbe90b | |
|
9c3c0930af | |
|
731b5fb370 | |
|
7e337bfe13 | |
|
511117fe4e | |
|
997e14318d | |
|
4db4a5b1ce | |
|
3c5abded83 | |
|
5fbc681ee5 | |
|
612fa07845 | |
|
38abfaf4f3 | |
|
cad36b3a19 | |
|
fb3be39b50 | |
|
7cf69832a8 | |
|
99fdab86bb | |
|
afd9e9fe3c | |
|
cec3407126 | |
|
23fa1ba3b7 | |
|
4711bd86af | |
|
a81a67a7bc | |
|
b31732332c | |
|
d321adf22c | |
|
b4ec6d533e | |
|
a0a347eda4 | |
|
158569dd24 | |
|
4337246d09 | |
|
cb6d583225 | |
|
cdc6e30425 | |
|
feceeacfb7 | |
|
2a9ae049af | |
|
40c9e62ebd | |
|
78505d46fc | |
|
04da438d12 | |
|
60c9717760 | |
|
19e233cc46 | |
|
8bc771e619 | |
|
8b6758e79e | |
|
4bee58a863 | |
|
ee5cf8f642 | |
|
317b9e0557 | |
|
d1bfcb4ce9 | |
|
5823457de9 | |
|
9c828e963e | |
|
2fa7c40f6b | |
|
d899d9982f | |
|
7f5b517f37 | |
|
fe178726e8 | |
|
976fe3ab11 | |
|
4952a60c10 | |
|
eda6ba65cb | |
|
44d58fee37 | |
|
7819702b67 | |
|
3959bb9330 | |
|
7b1cdbe1f1 | |
|
de812d2f18 | |
|
b06c210dda | |
|
57caa35900 | |
|
365f099b16 | |
|
1d63a078f0 | |
|
e963bd411f | |
|
683930fbcc | |
|
b8f69fbffa | |
|
3811569f29 | |
|
44362c6a00 | |
|
604a9451da | |
|
086bc6d226 | |
|
f8ae582fa3 | |
|
c11f533637 | |
|
5bc3a0a6dd | |
|
3ce1e2ca42 | |
|
88b27ea0bc | |
|
414cdc6336 | |
|
f2315f2804 | |
|
8c6f9771c9 | |
|
23efe2cb04 | |
|
7e46bff143 | |
|
b2c1f26ce9 | |
|
17e403c3dc | |
|
e96000b379 | |
|
233fcdf3a1 | |
|
5d3b05372b | |
|
f3fd5f46c4 | |
|
a15d556f95 | |
|
a2ba39026e | |
|
f60bad399b | |
|
1600f0d591 | |
|
ff0cce2fc8 | |
|
87da08ab39 | |
|
494b8cd95a | |
|
620c8fa6a8 | |
|
1c059171db | |
|
cfa7dba465 | |
|
9f6cecacd4 | |
|
8aec90c152 | |
|
900ab7abcc | |
|
7950aa5ab3 | |
|
c21159037a | |
|
e2c7d9000a | |
|
63ceb47d82 | |
|
75c0c1f5a0 | |
|
37b93913cc | |
|
7b1a1f197e | |
|
657d8867bf | |
|
2a0ef8ec19 | |
|
f3b1b67607 | |
|
938d17c0cf | |
|
ad335784fe |
|
@ -0,0 +1,2 @@
|
|||
[build]
|
||||
rustflags = ["--cfg", "tokio_unstable"]
|
|
@ -0,0 +1,16 @@
|
|||
# Set to true to add reviewers to pull requests
|
||||
addReviewers: true
|
||||
|
||||
# Set to true to add assignees to pull requests
|
||||
addAssignees: author
|
||||
|
||||
# A list of reviewers to be added to pull requests (GitHub user name)
|
||||
reviewers:
|
||||
- gaius-qi
|
||||
- yxxhero
|
||||
- chlins
|
||||
- CormickKneey
|
||||
- xujihui1985
|
||||
|
||||
# A number of reviewers added to the pull request
|
||||
numberOfReviewers: 3
|
|
@ -0,0 +1,11 @@
|
|||
name: "Auto Assign"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened, ready_for_review]
|
||||
|
||||
jobs:
|
||||
add-assignee:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: kentaro-m/auto-assign-action@9f6dbe84a80c6e7639d1b9698048b201052a2a94
|
|
@ -26,6 +26,8 @@ jobs:
|
|||
|
||||
- name: Install Protoc
|
||||
uses: arduino/setup-protoc@v2
|
||||
with:
|
||||
repo-token: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
@ -55,6 +57,8 @@ jobs:
|
|||
|
||||
- name: Install Protoc
|
||||
uses: arduino/setup-protoc@v2
|
||||
with:
|
||||
repo-token: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
|
|
@ -86,7 +86,7 @@ jobs:
|
|||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
|
||||
- name: Run Trivy vulnerability scanner in tarball mode
|
||||
uses: aquasecurity/trivy-action@6c175e9c4083a92bbca2f9724c8a5e33bc2d97a5
|
||||
uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4
|
||||
with:
|
||||
image-ref: dragonflyoss/client:${{ steps.get_version.outputs.VERSION }}
|
||||
severity: 'CRITICAL,HIGH'
|
||||
|
@ -94,7 +94,7 @@ jobs:
|
|||
output: 'trivy-results.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47
|
||||
uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
|
||||
|
@ -181,7 +181,7 @@ jobs:
|
|||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
|
||||
- name: Run Trivy vulnerability scanner in tarball mode
|
||||
uses: aquasecurity/trivy-action@6c175e9c4083a92bbca2f9724c8a5e33bc2d97a5
|
||||
uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4
|
||||
with:
|
||||
image-ref: dragonflyoss/client:${{ steps.get_version.outputs.VERSION }}-debug
|
||||
severity: 'CRITICAL,HIGH'
|
||||
|
@ -189,7 +189,7 @@ jobs:
|
|||
output: 'trivy-results.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47
|
||||
uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
|
||||
|
@ -276,7 +276,7 @@ jobs:
|
|||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
|
||||
- name: Run Trivy vulnerability scanner in tarball mode
|
||||
uses: aquasecurity/trivy-action@6c175e9c4083a92bbca2f9724c8a5e33bc2d97a5
|
||||
uses: aquasecurity/trivy-action@dc5a429b52fcf669ce959baa2c2dd26090d2a6c4
|
||||
with:
|
||||
image-ref: dragonflyoss/dfinit:${{ steps.get_version.outputs.VERSION }}
|
||||
severity: 'CRITICAL,HIGH'
|
||||
|
@ -284,7 +284,7 @@ jobs:
|
|||
output: 'trivy-results.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@45775bd8235c68ba998cffa5171334d58593da47
|
||||
uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
|
||||
|
|
|
@ -15,18 +15,21 @@ jobs:
|
|||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Rust cache
|
||||
- name: Rust cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
|
||||
- name: Install Protoc
|
||||
uses: arduino/setup-protoc@v2
|
||||
with:
|
||||
repo-token: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
toolchain: 1.85.0
|
||||
|
||||
- name: Set up Clang
|
||||
uses: egor-tensin/setup-clang@v1
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
name: PR Label
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, labeled, unlabeled, synchronize]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
classify:
|
||||
name: Classify PR
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: PR impact specified
|
||||
uses: mheap/github-action-required-labels@8afbe8ae6ab7647d0c9f0cfa7c2f939650d22509 # v5.5
|
||||
with:
|
||||
mode: exactly
|
||||
count: 1
|
||||
labels: 'bug, enhancement, documentation, dependencies'
|
|
@ -52,7 +52,7 @@ jobs:
|
|||
target: ${{ matrix.target }}
|
||||
|
||||
- name: Install cargo-deb
|
||||
uses: taiki-e/cache-cargo-install-action@44857e0ff6d186da8fe49f9ac9eedae5bbc37a93
|
||||
uses: taiki-e/cache-cargo-install-action@b33c63d3b3c85540f4eba8a4f71a5cc0ce030855
|
||||
with:
|
||||
# Don't upgrade cargo-deb, refer to https://github.com/kornelski/cargo-deb/issues/169.
|
||||
tool: cargo-deb@2.10.0
|
||||
|
@ -96,7 +96,6 @@ jobs:
|
|||
mkdir -p "$dirname"
|
||||
mv "target/${{ matrix.target }}/release/dfget" "$dirname"
|
||||
mv "target/${{ matrix.target }}/release/dfdaemon" "$dirname"
|
||||
mv "target/${{ matrix.target }}/release/dfstore" "$dirname"
|
||||
mv "target/${{ matrix.target }}/release/dfcache" "$dirname"
|
||||
mv "target/${{ matrix.target }}/release/dfinit" "$dirname"
|
||||
mv CONTRIBUTING.md LICENSE README.md "$dirname"
|
||||
|
@ -120,7 +119,7 @@ jobs:
|
|||
contents: write
|
||||
steps:
|
||||
- name: Download Release Artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: releases
|
||||
pattern: release-*
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
name: Close stale issues and PRs
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
id: stale
|
||||
with:
|
||||
delete-branch: true
|
||||
days-before-close: 7
|
||||
days-before-stale: 90
|
||||
days-before-pr-close: 7
|
||||
days-before-pr-stale: 120
|
||||
stale-issue-label: "stale"
|
||||
exempt-issue-labels: bug,wip,on-hold
|
||||
exempt-pr-labels: bug,wip,on-hold
|
||||
exempt-all-milestones: true
|
||||
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity.'
|
||||
close-issue-message: 'This issue was closed because it has been stalled for 7 days with no activity.'
|
||||
stale-pr-message: 'This PR is stale because it has been open 120 days with no activity.'
|
||||
close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.'
|
File diff suppressed because it is too large
Load Diff
47
Cargo.toml
47
Cargo.toml
|
@ -12,7 +12,7 @@ members = [
|
|||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.2.23"
|
||||
version = "1.0.10"
|
||||
authors = ["The Dragonfly Developers"]
|
||||
homepage = "https://d7y.io/"
|
||||
repository = "https://github.com/dragonflyoss/client.git"
|
||||
|
@ -22,15 +22,15 @@ readme = "README.md"
|
|||
edition = "2021"
|
||||
|
||||
[workspace.dependencies]
|
||||
dragonfly-client = { path = "dragonfly-client", version = "0.2.23" }
|
||||
dragonfly-client-core = { path = "dragonfly-client-core", version = "0.2.23" }
|
||||
dragonfly-client-config = { path = "dragonfly-client-config", version = "0.2.23" }
|
||||
dragonfly-client-storage = { path = "dragonfly-client-storage", version = "0.2.23" }
|
||||
dragonfly-client-backend = { path = "dragonfly-client-backend", version = "0.2.23" }
|
||||
dragonfly-client-util = { path = "dragonfly-client-util", version = "0.2.23" }
|
||||
dragonfly-client-init = { path = "dragonfly-client-init", version = "0.2.23" }
|
||||
dragonfly-api = "=2.1.36"
|
||||
thiserror = "1.0"
|
||||
dragonfly-client = { path = "dragonfly-client", version = "1.0.10" }
|
||||
dragonfly-client-core = { path = "dragonfly-client-core", version = "1.0.10" }
|
||||
dragonfly-client-config = { path = "dragonfly-client-config", version = "1.0.10" }
|
||||
dragonfly-client-storage = { path = "dragonfly-client-storage", version = "1.0.10" }
|
||||
dragonfly-client-backend = { path = "dragonfly-client-backend", version = "1.0.10" }
|
||||
dragonfly-client-util = { path = "dragonfly-client-util", version = "1.0.10" }
|
||||
dragonfly-client-init = { path = "dragonfly-client-init", version = "1.0.10" }
|
||||
dragonfly-api = "2.1.57"
|
||||
thiserror = "2.0"
|
||||
futures = "0.3.31"
|
||||
reqwest = { version = "0.12.4", features = [
|
||||
"stream",
|
||||
|
@ -41,11 +41,12 @@ reqwest = { version = "0.12.4", features = [
|
|||
"brotli",
|
||||
"zstd",
|
||||
"deflate",
|
||||
"blocking",
|
||||
] }
|
||||
reqwest-middleware = "0.4"
|
||||
rcgen = { version = "0.12.1", features = ["x509-parser"] }
|
||||
hyper = { version = "1.6", features = ["full"] }
|
||||
hyper-util = { version = "0.1.10", features = [
|
||||
hyper-util = { version = "0.1.16", features = [
|
||||
"client",
|
||||
"client-legacy",
|
||||
"tokio",
|
||||
|
@ -58,10 +59,10 @@ http-range-header = "0.4.2"
|
|||
tracing = "0.1"
|
||||
url = "2.5.4"
|
||||
rustls = { version = "0.22.4", features = ["tls12"] }
|
||||
rustls-pki-types = "1.11.0"
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
sha2 = "0.10"
|
||||
crc32fast = "1.4.2"
|
||||
crc32fast = "1.5.0"
|
||||
uuid = { version = "1.16", features = ["v4"] }
|
||||
hex = "0.4"
|
||||
rocksdb = "0.22.0"
|
||||
|
@ -70,16 +71,16 @@ serde_yaml = "0.9"
|
|||
http = "1"
|
||||
tonic = { version = "0.12.2", features = ["tls"] }
|
||||
tonic-reflection = "0.12.3"
|
||||
tokio = { version = "1.44.2", features = ["full"] }
|
||||
tokio-util = { version = "0.7.14", features = ["full"] }
|
||||
tokio = { version = "1.47.1", features = ["full", "tracing"] }
|
||||
tokio-util = { version = "0.7.16", features = ["full"] }
|
||||
tokio-stream = "0.1.17"
|
||||
validator = { version = "0.16", features = ["derive"] }
|
||||
warp = "0.3.5"
|
||||
headers = "0.4.0"
|
||||
headers = "0.4.1"
|
||||
regex = "1.11.1"
|
||||
humantime = "2.1.0"
|
||||
prost-wkt-types = "0.6"
|
||||
chrono = { version = "0.4.40", features = ["serde", "clock"] }
|
||||
chrono = { version = "0.4.41", features = ["serde", "clock"] }
|
||||
openssl = { version = "0.10", features = ["vendored"] }
|
||||
opendal = { version = "0.48.0", features = [
|
||||
"services-s3",
|
||||
|
@ -90,20 +91,22 @@ opendal = { version = "0.48.0", features = [
|
|||
"services-cos",
|
||||
"services-webhdfs",
|
||||
] }
|
||||
clap = { version = "4.5.35", features = ["derive"] }
|
||||
clap = { version = "4.5.45", features = ["derive"] }
|
||||
anyhow = "1.0.98"
|
||||
toml_edit = "0.22.24"
|
||||
toml = "0.8.20"
|
||||
toml_edit = "0.22.26"
|
||||
toml = "0.8.23"
|
||||
bytesize = { version = "1.3.3", features = ["serde"] }
|
||||
bytesize-serde = "0.2.1"
|
||||
percent-encoding = "2.3.1"
|
||||
tempfile = "3.19.1"
|
||||
tempfile = "3.20.0"
|
||||
tokio-rustls = "0.25.0-alpha.4"
|
||||
serde_json = "1.0.140"
|
||||
serde_json = "1.0.142"
|
||||
lru = "0.12.5"
|
||||
fs2 = "0.4.3"
|
||||
lazy_static = "1.5"
|
||||
bytes = "1.10"
|
||||
local-ip-address = "0.6.5"
|
||||
sysinfo = { version = "0.32.1", default-features = false, features = ["component", "disk", "network", "system", "user"] }
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
|
|
|
@ -20,9 +20,9 @@ You can find the full documentation on the [d7y.io](https://d7y.io).
|
|||
Join the conversation and help the community.
|
||||
|
||||
- **Slack Channel**: [#dragonfly](https://cloud-native.slack.com/messages/dragonfly/) on [CNCF Slack](https://slack.cncf.io/)
|
||||
- **Discussion Group**: <dragonfly-discuss@googlegroups.com>
|
||||
- **Developer Group**: <dragonfly-developers@googlegroups.com>
|
||||
- **Github Discussions**: [Dragonfly Discussion Forum](https://github.com/dragonflyoss/dragonfly/discussions)
|
||||
- **Developer Group**: <dragonfly-developers@googlegroups.com>
|
||||
- **Maintainer Group**: <dragonfly-maintainers@googlegroups.com>
|
||||
- **Twitter**: [@dragonfly_oss](https://twitter.com/dragonfly_oss)
|
||||
- **DingTalk**: [22880028764](https://qr.dingtalk.com/action/joingroup?code=v1,k1,pkV9IbsSyDusFQdByPSK3HfCG61ZCLeb8b/lpQ3uUqI=&_dt_no_comment=1&origin=11)
|
||||
|
||||
|
@ -30,7 +30,3 @@ Join the conversation and help the community.
|
|||
|
||||
You should check out our
|
||||
[CONTRIBUTING](./CONTRIBUTING.md) and develop the project together.
|
||||
|
||||
## License
|
||||
|
||||
[](https://app.fossa.com/projects/git%2Bgithub.com%2Fdragonflyoss%2Fclient?ref=badge_large)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM public.ecr.aws/docker/library/rust:1.82.0 AS builder
|
||||
FROM public.ecr.aws/docker/library/rust:1.85.0 AS builder
|
||||
|
||||
WORKDIR /app/client
|
||||
|
||||
|
@ -7,6 +7,7 @@ RUN apt-get update && apt-get install -y \
|
|||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY .cargo ./cargo
|
||||
|
||||
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
|
||||
COPY dragonfly-client/src ./dragonfly-client/src
|
||||
|
@ -34,7 +35,13 @@ COPY dragonfly-client-util/src ./dragonfly-client-util/src
|
|||
COPY dragonfly-client-init/Cargo.toml ./dragonfly-client-init/Cargo.toml
|
||||
COPY dragonfly-client-init/src ./dragonfly-client-init/src
|
||||
|
||||
RUN cargo build --release --verbose --bin dfget --bin dfdaemon --bin dfstore --bin dfcache
|
||||
ARG TARGETPLATFORM
|
||||
RUN case "${TARGETPLATFORM}" in \
|
||||
"linux/arm64") export JEMALLOC_SYS_WITH_LG_PAGE=16;; \
|
||||
esac && \
|
||||
cargo build --release --verbose --bin dfget --bin dfdaemon --bin dfcache
|
||||
|
||||
RUN cargo install tokio-console --locked --root /usr/local
|
||||
|
||||
FROM public.ecr.aws/docker/library/alpine:3.20 AS health
|
||||
|
||||
|
@ -52,18 +59,21 @@ RUN if [ "$(uname -m)" = "ppc64le" ]; then \
|
|||
FROM public.ecr.aws/docker/library/golang:1.23.0-alpine3.20 AS pprof
|
||||
|
||||
RUN go install github.com/google/pprof@latest
|
||||
RUN go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest
|
||||
|
||||
FROM public.ecr.aws/debian/debian:bookworm-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends curl bash-completion procps infiniband-diags ibverbs-utils \
|
||||
apache2-utils ca-certificates binutils dnsutils iputils-ping llvm dstat sysstat net-tools \
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends iperf3 fio curl \
|
||||
iotop sysstat bash-completion procps apache2-utils ca-certificates binutils \
|
||||
dnsutils iputils-ping llvm graphviz lsof strace dstat net-tools \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /app/client/target/release/dfget /usr/local/bin/dfget
|
||||
COPY --from=builder /app/client/target/release/dfdaemon /usr/local/bin/dfdaemon
|
||||
COPY --from=builder /app/client/target/release/dfstore /usr/local/bin/dfstore
|
||||
COPY --from=builder /app/client/target/release/dfcache /usr/local/bin/dfcache
|
||||
COPY --from=builder /usr/local/bin/tokio-console /usr/local/bin/
|
||||
COPY --from=pprof /go/bin/pprof /bin/pprof
|
||||
COPY --from=pprof /go/bin/grpcurl /bin/grpcurl
|
||||
COPY --from=health /bin/grpc_health_probe /bin/grpc_health_probe
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/dfdaemon"]
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM public.ecr.aws/docker/library/rust:1.82.0 AS builder
|
||||
FROM public.ecr.aws/docker/library/rust:1.85.0 AS builder
|
||||
|
||||
WORKDIR /app/client
|
||||
|
||||
|
@ -7,6 +7,7 @@ RUN apt-get update && apt-get install -y \
|
|||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY .cargo ./cargo
|
||||
|
||||
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
|
||||
COPY dragonfly-client/src ./dragonfly-client/src
|
||||
|
@ -34,10 +35,15 @@ COPY dragonfly-client-util/src ./dragonfly-client-util/src
|
|||
COPY dragonfly-client-init/Cargo.toml ./dragonfly-client-init/Cargo.toml
|
||||
COPY dragonfly-client-init/src ./dragonfly-client-init/src
|
||||
|
||||
RUN cargo build --verbose --bin dfget --bin dfdaemon --bin dfstore --bin dfcache
|
||||
ARG TARGETPLATFORM
|
||||
RUN case "${TARGETPLATFORM}" in \
|
||||
"linux/arm64") export JEMALLOC_SYS_WITH_LG_PAGE=16;; \
|
||||
esac && \
|
||||
cargo build --verbose --bin dfget --bin dfdaemon --bin dfcache
|
||||
|
||||
RUN cargo install flamegraph --root /usr/local
|
||||
RUN cargo install bottom --locked --root /usr/local
|
||||
RUN cargo install tokio-console --locked --root /usr/local
|
||||
|
||||
FROM public.ecr.aws/docker/library/alpine:3.20 AS health
|
||||
|
||||
|
@ -55,21 +61,23 @@ RUN if [ "$(uname -m)" = "ppc64le" ]; then \
|
|||
FROM public.ecr.aws/docker/library/golang:1.23.0-alpine3.20 AS pprof
|
||||
|
||||
RUN go install github.com/google/pprof@latest
|
||||
RUN go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest
|
||||
|
||||
FROM public.ecr.aws/debian/debian:bookworm-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends iperf3 fio wget curl infiniband-diags ibverbs-utils \
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends iperf3 fio curl infiniband-diags ibverbs-utils \
|
||||
iotop sysstat bash-completion procps apache2-utils ca-certificates binutils bpfcc-tools \
|
||||
dnsutils iputils-ping vim linux-perf llvm graphviz lsof socat strace dstat net-tools \
|
||||
dnsutils iputils-ping vim linux-perf llvm lsof socat strace dstat net-tools \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /app/client/target/debug/dfget /usr/local/bin/dfget
|
||||
COPY --from=builder /app/client/target/debug/dfdaemon /usr/local/bin/dfdaemon
|
||||
COPY --from=builder /app/client/target/debug/dfstore /usr/local/bin/dfstore
|
||||
COPY --from=builder /app/client/target/debug/dfcache /usr/local/bin/dfcache
|
||||
COPY --from=builder /usr/local/bin/flamegraph /usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/btm /usr/local/bin/
|
||||
COPY --from=builder /usr/local/bin/tokio-console /usr/local/bin/
|
||||
COPY --from=pprof /go/bin/pprof /bin/pprof
|
||||
COPY --from=pprof /go/bin/grpcurl /bin/grpcurl
|
||||
COPY --from=health /bin/grpc_health_probe /bin/grpc_health_probe
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/dfdaemon"]
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
FROM public.ecr.aws/docker/library/rust:1.82.0 AS builder
|
||||
FROM public.ecr.aws/docker/library/rust:1.85.0 AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
openssl libclang-dev pkg-config protobuf-compiler \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
openssl libclang-dev pkg-config protobuf-compiler \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app/client
|
||||
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY .cargo ./cargo
|
||||
|
||||
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
|
||||
COPY dragonfly-client/src ./dragonfly-client/src
|
||||
|
@ -34,7 +35,11 @@ COPY dragonfly-client-util/src ./dragonfly-client-util/src
|
|||
COPY dragonfly-client-init/Cargo.toml ./dragonfly-client-init/Cargo.toml
|
||||
COPY dragonfly-client-init/src ./dragonfly-client-init/src
|
||||
|
||||
RUN cargo build --release --verbose --bin dfinit
|
||||
ARG TARGETPLATFORM
|
||||
RUN case "${TARGETPLATFORM}" in \
|
||||
"linux/arm64") export JEMALLOC_SYS_WITH_LG_PAGE=16;; \
|
||||
esac && \
|
||||
cargo build --release --verbose --bin dfinit
|
||||
|
||||
FROM public.ecr.aws/debian/debian:bookworm-slim
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ After=network-online.target
|
|||
After=network.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/dfdaemon --config /etc/dragonfly/dfdaemon.yaml --verbose
|
||||
ExecStart=/usr/bin/dfdaemon --config /etc/dragonfly/dfdaemon.yaml --console
|
||||
|
||||
Type=simple
|
||||
Environment=HOME=/root
|
||||
|
|
|
@ -69,7 +69,7 @@ cargo build --release --bin dfdaemon
|
|||
|
||||
```bash
|
||||
# prepare client.yaml by yourself.
|
||||
./target/release/dfdaemon --config client.yaml -l info --verbose
|
||||
./target/release/dfdaemon --config client.yaml -l info --console
|
||||
```
|
||||
|
||||
## FlameGraph
|
||||
|
|
|
@ -27,11 +27,11 @@ percent-encoding.workspace = true
|
|||
futures.workspace = true
|
||||
reqwest-retry = "0.7"
|
||||
reqwest-tracing = "0.5"
|
||||
libloading = "0.8.6"
|
||||
libloading = "0.8.8"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile.workspace = true
|
||||
wiremock = "0.6.3"
|
||||
wiremock = "0.6.4"
|
||||
rustls-pki-types.workspace = true
|
||||
rustls-pemfile.workspace = true
|
||||
hyper.workspace = true
|
||||
|
|
|
@ -14,7 +14,7 @@ cargo build --all && mv target/debug/libhdfs.so {plugin_dir}/backend/libhdfs.so
|
|||
## Run Client with Plugin
|
||||
|
||||
```shell
|
||||
$ cargo run --bin dfdaemon -- --config {config_dir}/config.yaml -l info --verbose
|
||||
$ cargo run --bin dfdaemon -- --config {config_dir}/config.yaml -l info --console
|
||||
INFO load [http] builtin backend
|
||||
INFO load [https] builtin backend
|
||||
INFO load [hdfs] plugin backend
|
||||
|
|
|
@ -31,6 +31,7 @@ pub const HDFS_SCHEME: &str = "hdfs";
|
|||
const DEFAULT_NAMENODE_PORT: u16 = 9870;
|
||||
|
||||
/// Hdfs is a struct that implements the Backend trait.
|
||||
#[derive(Default)]
|
||||
pub struct Hdfs {
|
||||
/// scheme is the scheme of the HDFS.
|
||||
scheme: String,
|
||||
|
@ -39,7 +40,6 @@ pub struct Hdfs {
|
|||
/// Hdfs implements the Backend trait.
|
||||
impl Hdfs {
|
||||
/// new returns a new HDFS backend.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
scheme: HDFS_SCHEME.to_string(),
|
||||
|
@ -47,7 +47,6 @@ impl Hdfs {
|
|||
}
|
||||
|
||||
/// operator initializes the operator with the parsed URL and HDFS config.
|
||||
#[instrument(skip_all)]
|
||||
pub fn operator(
|
||||
&self,
|
||||
url: Url,
|
||||
|
@ -84,7 +83,6 @@ impl Hdfs {
|
|||
#[tonic::async_trait]
|
||||
impl super::Backend for Hdfs {
|
||||
/// scheme returns the scheme of the HDFS backend.
|
||||
#[instrument(skip_all)]
|
||||
fn scheme(&self) -> String {
|
||||
self.scheme.clone()
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ pub struct HTTP {
|
|||
/// HTTP implements the http interface.
|
||||
impl HTTP {
|
||||
/// new returns a new HTTP.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(scheme: &str) -> Result<HTTP> {
|
||||
// Default TLS client config with no validation.
|
||||
let client_config_builder = rustls::ClientConfig::builder()
|
||||
|
@ -51,11 +50,22 @@ impl HTTP {
|
|||
.with_custom_certificate_verifier(NoVerifier::new())
|
||||
.with_no_client_auth();
|
||||
|
||||
// Disable automatic compression to prevent double-decompression issues.
|
||||
//
|
||||
// Problem scenario:
|
||||
// 1. Origin server supports gzip and returns "content-encoding: gzip" header.
|
||||
// 2. Backend decompresses the response and stores uncompressed content to disk.
|
||||
// 3. When user's client downloads via dfdaemon proxy, the original "content-encoding: gzip".
|
||||
// header is forwarded to it.
|
||||
// 4. User's client attempts to decompress the already-decompressed content, causing errors.
|
||||
//
|
||||
// Solution: Disable all compression formats (gzip, brotli, zstd, deflate) to ensure
|
||||
// we receive and store uncompressed content, eliminating the double-decompression issue.
|
||||
let client = reqwest::Client::builder()
|
||||
.gzip(true)
|
||||
.brotli(true)
|
||||
.zstd(true)
|
||||
.deflate(true)
|
||||
.no_gzip()
|
||||
.no_brotli()
|
||||
.no_zstd()
|
||||
.no_deflate()
|
||||
.use_preconfigured_tls(client_config_builder)
|
||||
.pool_max_idle_per_host(super::POOL_MAX_IDLE_PER_HOST)
|
||||
.tcp_keepalive(super::KEEP_ALIVE_INTERVAL)
|
||||
|
@ -75,7 +85,6 @@ impl HTTP {
|
|||
}
|
||||
|
||||
/// client returns a new reqwest client.
|
||||
#[instrument(skip_all)]
|
||||
fn client(
|
||||
&self,
|
||||
client_cert: Option<Vec<CertificateDer<'static>>>,
|
||||
|
@ -90,11 +99,22 @@ impl HTTP {
|
|||
.with_root_certificates(root_cert_store)
|
||||
.with_no_client_auth();
|
||||
|
||||
// Disable automatic compression to prevent double-decompression issues.
|
||||
//
|
||||
// Problem scenario:
|
||||
// 1. Origin server supports gzip and returns "content-encoding: gzip" header.
|
||||
// 2. Backend decompresses the response and stores uncompressed content to disk.
|
||||
// 3. When user's client downloads via dfdaemon proxy, the original "content-encoding: gzip".
|
||||
// header is forwarded to it.
|
||||
// 4. User's client attempts to decompress the already-decompressed content, causing errors.
|
||||
//
|
||||
// Solution: Disable all compression formats (gzip, brotli, zstd, deflate) to ensure
|
||||
// we receive and store uncompressed content, eliminating the double-decompression issue.
|
||||
let client = reqwest::Client::builder()
|
||||
.gzip(true)
|
||||
.brotli(true)
|
||||
.zstd(true)
|
||||
.deflate(true)
|
||||
.no_gzip()
|
||||
.no_brotli()
|
||||
.no_zstd()
|
||||
.no_deflate()
|
||||
.use_preconfigured_tls(client_config_builder)
|
||||
.build()?;
|
||||
|
||||
|
@ -117,7 +137,6 @@ impl HTTP {
|
|||
#[tonic::async_trait]
|
||||
impl super::Backend for HTTP {
|
||||
/// scheme returns the scheme of the HTTP backend.
|
||||
#[instrument(skip_all)]
|
||||
fn scheme(&self) -> String {
|
||||
self.scheme.clone()
|
||||
}
|
||||
|
@ -141,6 +160,13 @@ impl super::Backend for HTTP {
|
|||
.client(request.client_cert)?
|
||||
.get(&request.url)
|
||||
.headers(header)
|
||||
// Add Range header to ensure Content-Length is returned in response headers.
|
||||
// Some servers (especially when using Transfer-Encoding: chunked,
|
||||
// refer to https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Transfer-Encoding.) may not
|
||||
// include Content-Length in HEAD requests. Using "bytes=0-" requests the
|
||||
// entire file starting from byte 0, forcing the server to include file size
|
||||
// information in the response headers.
|
||||
.header(reqwest::header::RANGE, "bytes=0-")
|
||||
.timeout(request.timeout)
|
||||
.send()
|
||||
.await
|
||||
|
|
|
@ -23,11 +23,10 @@ use libloading::Library;
|
|||
use reqwest::header::HeaderMap;
|
||||
use rustls_pki_types::CertificateDer;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::{collections::HashMap, pin::Pin, time::Duration};
|
||||
use std::{fmt::Debug, fs};
|
||||
use tokio::io::{AsyncRead, AsyncReadExt};
|
||||
use tracing::{error, info, instrument, warn};
|
||||
use tracing::{error, info, warn};
|
||||
use url::Url;
|
||||
|
||||
pub mod hdfs;
|
||||
|
@ -47,7 +46,7 @@ const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(300);
|
|||
const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(20);
|
||||
|
||||
/// MAX_RETRY_TIMES is the max retry times for the request.
|
||||
const MAX_RETRY_TIMES: u32 = 3;
|
||||
const MAX_RETRY_TIMES: u32 = 1;
|
||||
|
||||
/// NAME is the name of the package.
|
||||
pub const NAME: &str = "backend";
|
||||
|
@ -167,7 +166,7 @@ where
|
|||
}
|
||||
|
||||
/// The File Entry of a directory, including some relevant file metadata.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
|
||||
pub struct DirEntry {
|
||||
/// url is the url of the entry.
|
||||
pub url: String,
|
||||
|
@ -227,7 +226,6 @@ pub struct BackendFactory {
|
|||
/// https://github.com/dragonflyoss/client/tree/main/dragonfly-client-backend/examples/plugin/.
|
||||
impl BackendFactory {
|
||||
/// new returns a new BackendFactory.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(plugin_dir: Option<&Path>) -> Result<Self> {
|
||||
let mut backend_factory = Self::default();
|
||||
backend_factory.load_builtin_backends()?;
|
||||
|
@ -242,14 +240,12 @@ impl BackendFactory {
|
|||
Ok(backend_factory)
|
||||
}
|
||||
|
||||
/// supported_download_directory returns whether the scheme supports directory download.
|
||||
#[instrument(skip_all)]
|
||||
pub fn supported_download_directory(scheme: &str) -> bool {
|
||||
object_storage::Scheme::from_str(scheme).is_ok() || scheme == hdfs::HDFS_SCHEME
|
||||
/// unsupported_download_directory returns whether the scheme does not support directory download.
|
||||
pub fn unsupported_download_directory(scheme: &str) -> bool {
|
||||
scheme == http::HTTP_SCHEME || scheme == http::HTTPS_SCHEME
|
||||
}
|
||||
|
||||
/// build returns the backend by the scheme of the url.
|
||||
#[instrument(skip_all)]
|
||||
pub fn build(&self, url: &str) -> Result<&(dyn Backend + Send + Sync)> {
|
||||
let url = Url::parse(url).or_err(ErrorType::ParseError)?;
|
||||
let scheme = url.scheme();
|
||||
|
@ -260,7 +256,6 @@ impl BackendFactory {
|
|||
}
|
||||
|
||||
/// load_builtin_backends loads the builtin backends.
|
||||
#[instrument(skip_all)]
|
||||
fn load_builtin_backends(&mut self) -> Result<()> {
|
||||
self.backends.insert(
|
||||
"http".to_string(),
|
||||
|
@ -330,13 +325,12 @@ impl BackendFactory {
|
|||
}
|
||||
|
||||
/// load_plugin_backends loads the plugin backends.
|
||||
#[instrument(skip_all)]
|
||||
fn load_plugin_backends(&mut self, plugin_dir: &Path) -> Result<()> {
|
||||
let backend_plugin_dir = plugin_dir.join(NAME);
|
||||
if !backend_plugin_dir.exists() {
|
||||
warn!(
|
||||
"skip loading plugin backends, because the plugin directory {} does not exist",
|
||||
plugin_dir.display()
|
||||
backend_plugin_dir.display()
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
@ -436,9 +430,15 @@ mod tests {
|
|||
|
||||
let result = BackendFactory::new(Some(&plugin_dir));
|
||||
assert!(result.is_err());
|
||||
assert_eq!(
|
||||
format!("{}", result.err().unwrap()),
|
||||
format!("PluginError cause: {}: file too short", lib_path.display()),
|
||||
let err_msg = format!("{}", result.err().unwrap());
|
||||
|
||||
assert!(
|
||||
err_msg.starts_with("PluginError cause:"),
|
||||
"error message should start with 'PluginError cause:'"
|
||||
);
|
||||
assert!(
|
||||
err_msg.contains(&lib_path.display().to_string()),
|
||||
"error message should contain library path"
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -177,7 +177,6 @@ pub struct ObjectStorage {
|
|||
/// ObjectStorage implements the ObjectStorage trait.
|
||||
impl ObjectStorage {
|
||||
/// Returns ObjectStorage that implements the Backend trait.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(scheme: Scheme) -> ClientResult<ObjectStorage> {
|
||||
// Initialize the reqwest client.
|
||||
let client = reqwest::Client::builder()
|
||||
|
@ -196,7 +195,6 @@ impl ObjectStorage {
|
|||
}
|
||||
|
||||
/// operator initializes the operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn operator(
|
||||
&self,
|
||||
parsed_url: &super::object_storage::ParsedURL,
|
||||
|
@ -223,7 +221,6 @@ impl ObjectStorage {
|
|||
}
|
||||
|
||||
/// s3_operator initializes the S3 operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn s3_operator(
|
||||
&self,
|
||||
parsed_url: &super::object_storage::ParsedURL,
|
||||
|
@ -276,7 +273,6 @@ impl ObjectStorage {
|
|||
}
|
||||
|
||||
/// gcs_operator initializes the GCS operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn gcs_operator(
|
||||
&self,
|
||||
parsed_url: &super::object_storage::ParsedURL,
|
||||
|
@ -311,7 +307,6 @@ impl ObjectStorage {
|
|||
}
|
||||
|
||||
/// abs_operator initializes the ABS operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn abs_operator(
|
||||
&self,
|
||||
parsed_url: &super::object_storage::ParsedURL,
|
||||
|
@ -354,7 +349,6 @@ impl ObjectStorage {
|
|||
}
|
||||
|
||||
/// oss_operator initializes the OSS operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn oss_operator(
|
||||
&self,
|
||||
parsed_url: &super::object_storage::ParsedURL,
|
||||
|
@ -398,7 +392,6 @@ impl ObjectStorage {
|
|||
}
|
||||
|
||||
/// obs_operator initializes the OBS operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn obs_operator(
|
||||
&self,
|
||||
parsed_url: &super::object_storage::ParsedURL,
|
||||
|
@ -487,7 +480,6 @@ impl ObjectStorage {
|
|||
#[tonic::async_trait]
|
||||
impl crate::Backend for ObjectStorage {
|
||||
/// scheme returns the scheme of the object storage.
|
||||
#[instrument(skip_all)]
|
||||
fn scheme(&self) -> String {
|
||||
self.scheme.to_string()
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ build = "build.rs"
|
|||
[dependencies]
|
||||
dragonfly-client-core.workspace = true
|
||||
dragonfly-client-util.workspace = true
|
||||
local-ip-address.workspace = true
|
||||
clap.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
|
@ -28,8 +29,9 @@ bytesize-serde.workspace = true
|
|||
tonic.workspace = true
|
||||
rustls-pki-types.workspace = true
|
||||
rcgen.workspace = true
|
||||
reqwest.workspace = true
|
||||
home = "0.5.11"
|
||||
local-ip-address = "0.6.3"
|
||||
hostname = "^0.4"
|
||||
humantime-serde = "1.1.1"
|
||||
serde_regex = "1.1.0"
|
||||
http-serde = "2.1.1"
|
||||
|
|
|
@ -143,13 +143,19 @@ fn default_download_rate_limit() -> ByteSize {
|
|||
/// default_download_piece_timeout is the default timeout for downloading a piece from source.
|
||||
#[inline]
|
||||
fn default_download_piece_timeout() -> Duration {
|
||||
Duration::from_secs(15)
|
||||
Duration::from_secs(120)
|
||||
}
|
||||
|
||||
/// default_collected_download_piece_timeout is the default timeout for collecting one piece from the parent in the stream.
|
||||
#[inline]
|
||||
fn default_collected_download_piece_timeout() -> Duration {
|
||||
Duration::from_secs(10)
|
||||
}
|
||||
|
||||
/// default_download_concurrent_piece_count is the default number of concurrent pieces to download.
|
||||
#[inline]
|
||||
fn default_download_concurrent_piece_count() -> u32 {
|
||||
16
|
||||
8
|
||||
}
|
||||
|
||||
/// default_download_max_schedule_count is the default max count of schedule.
|
||||
|
@ -158,6 +164,12 @@ fn default_download_max_schedule_count() -> u32 {
|
|||
5
|
||||
}
|
||||
|
||||
/// default_tracing_path is the default tracing path for dfdaemon.
|
||||
#[inline]
|
||||
fn default_tracing_path() -> Option<PathBuf> {
|
||||
Some(PathBuf::from("/v1/traces"))
|
||||
}
|
||||
|
||||
/// default_scheduler_announce_interval is the default interval to announce peer to the scheduler.
|
||||
#[inline]
|
||||
fn default_scheduler_announce_interval() -> Duration {
|
||||
|
@ -167,7 +179,7 @@ fn default_scheduler_announce_interval() -> Duration {
|
|||
/// default_scheduler_schedule_timeout is the default timeout for scheduling.
|
||||
#[inline]
|
||||
fn default_scheduler_schedule_timeout() -> Duration {
|
||||
Duration::from_secs(180)
|
||||
Duration::from_secs(3 * 60 * 60)
|
||||
}
|
||||
|
||||
/// default_dynconfig_refresh_interval is the default interval to refresh dynamic configuration from manager.
|
||||
|
@ -188,6 +200,13 @@ fn default_storage_keep() -> bool {
|
|||
false
|
||||
}
|
||||
|
||||
/// default_storage_write_piece_timeout is the default timeout for writing a piece to storage(e.g., disk
|
||||
/// or cache).
|
||||
#[inline]
|
||||
fn default_storage_write_piece_timeout() -> Duration {
|
||||
Duration::from_secs(90)
|
||||
}
|
||||
|
||||
/// default_storage_write_buffer_size is the default buffer size for writing piece to disk, default is 4MB.
|
||||
#[inline]
|
||||
fn default_storage_write_buffer_size() -> usize {
|
||||
|
@ -207,18 +226,6 @@ fn default_storage_cache_capacity() -> ByteSize {
|
|||
ByteSize::mib(64)
|
||||
}
|
||||
|
||||
/// default_seed_peer_cluster_id is the default cluster id of seed peer.
|
||||
#[inline]
|
||||
fn default_seed_peer_cluster_id() -> u64 {
|
||||
1
|
||||
}
|
||||
|
||||
/// default_seed_peer_keepalive_interval is the default interval to keepalive with manager.
|
||||
#[inline]
|
||||
fn default_seed_peer_keepalive_interval() -> Duration {
|
||||
Duration::from_secs(15)
|
||||
}
|
||||
|
||||
/// default_gc_interval is the default interval to do gc.
|
||||
#[inline]
|
||||
fn default_gc_interval() -> Duration {
|
||||
|
@ -231,6 +238,12 @@ fn default_gc_policy_task_ttl() -> Duration {
|
|||
Duration::from_secs(21_600)
|
||||
}
|
||||
|
||||
/// default_gc_policy_dist_threshold is the default threshold of the disk usage to do gc.
|
||||
#[inline]
|
||||
fn default_gc_policy_dist_threshold() -> ByteSize {
|
||||
ByteSize::default()
|
||||
}
|
||||
|
||||
/// default_gc_policy_dist_high_threshold_percent is the default high threshold percent of the disk usage.
|
||||
#[inline]
|
||||
fn default_gc_policy_dist_high_threshold_percent() -> u8 {
|
||||
|
@ -388,6 +401,12 @@ pub struct Host {
|
|||
|
||||
/// ip is the advertise ip of the host.
|
||||
pub ip: Option<IpAddr>,
|
||||
|
||||
/// scheduler_cluster_id is the ID of the cluster to which the scheduler belongs.
|
||||
/// NOTE: This field is used to identify the cluster to which the scheduler belongs.
|
||||
/// If this flag is set, the idc, location, hostname and ip will be ignored when listing schedulers.
|
||||
#[serde(rename = "schedulerClusterID")]
|
||||
pub scheduler_cluster_id: Option<u64>,
|
||||
}
|
||||
|
||||
/// Host implements Default.
|
||||
|
@ -398,6 +417,7 @@ impl Default for Host {
|
|||
location: None,
|
||||
hostname: default_host_hostname(),
|
||||
ip: None,
|
||||
scheduler_cluster_id: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -467,6 +487,14 @@ pub struct Download {
|
|||
#[serde(default = "default_download_piece_timeout", with = "humantime_serde")]
|
||||
pub piece_timeout: Duration,
|
||||
|
||||
/// collected_piece_timeout is the timeout for collecting one piece from the parent in the
|
||||
/// stream.
|
||||
#[serde(
|
||||
default = "default_collected_download_piece_timeout",
|
||||
with = "humantime_serde"
|
||||
)]
|
||||
pub collected_piece_timeout: Duration,
|
||||
|
||||
/// concurrent_piece_count is the number of concurrent pieces to download.
|
||||
#[serde(default = "default_download_concurrent_piece_count")]
|
||||
#[validate(range(min = 1))]
|
||||
|
@ -481,6 +509,7 @@ impl Default for Download {
|
|||
parent_selector: ParentSelector::default(),
|
||||
rate_limit: default_download_rate_limit(),
|
||||
piece_timeout: default_download_piece_timeout(),
|
||||
collected_piece_timeout: default_collected_download_piece_timeout(),
|
||||
concurrent_piece_count: default_download_concurrent_piece_count(),
|
||||
}
|
||||
}
|
||||
|
@ -743,8 +772,35 @@ pub struct Scheduler {
|
|||
)]
|
||||
pub announce_interval: Duration,
|
||||
|
||||
/// schedule_timeout is the timeout for scheduling. If the scheduling timeout, dfdaemon will back-to-source
|
||||
/// download if enable_back_to_source is true, otherwise dfdaemon will return download failed.
|
||||
/// schedule_timeout is timeout for the scheduler to respond to a scheduling request from dfdaemon, default is 3 hours.
|
||||
///
|
||||
/// If the scheduler's response time for a scheduling decision exceeds this timeout,
|
||||
/// dfdaemon will encounter a `TokioStreamElapsed(Elapsed(()))` error.
|
||||
///
|
||||
/// Behavior upon timeout:
|
||||
/// - If `enable_back_to_source` is `true`, dfdaemon will attempt to download directly
|
||||
/// from the source.
|
||||
/// - Otherwise (if `enable_back_to_source` is `false`), dfdaemon will report a download failure.
|
||||
///
|
||||
/// **Important Considerations Regarding Timeout Triggers**:
|
||||
/// This timeout isn't solely for the scheduler's direct response. It can also be triggered
|
||||
/// if the overall duration of the client's interaction with the scheduler for a task
|
||||
/// (e.g., client downloading initial pieces and reporting their status back to the scheduler)
|
||||
/// exceeds `schedule_timeout`. During such client-side processing and reporting,
|
||||
/// the scheduler might be awaiting these updates before sending its comprehensive
|
||||
/// scheduling response, and this entire period is subject to the `schedule_timeout`.
|
||||
///
|
||||
/// **Configuration Guidance**:
|
||||
/// To prevent premature timeouts, `schedule_timeout` should be configured to a value
|
||||
/// greater than the maximum expected time for the *entire scheduling interaction*.
|
||||
/// This includes:
|
||||
/// 1. The scheduler's own processing and response time.
|
||||
/// 2. The time taken by the client to download any initial pieces and download all pieces finished,
|
||||
/// as this communication is part of the scheduling phase.
|
||||
///
|
||||
/// Setting this value too low can lead to `TokioStreamElapsed` errors even if the
|
||||
/// network and scheduler are functioning correctly but the combined interaction time
|
||||
/// is longer than the configured timeout.
|
||||
#[serde(
|
||||
default = "default_scheduler_schedule_timeout",
|
||||
with = "humantime_serde"
|
||||
|
@ -856,18 +912,6 @@ pub struct SeedPeer {
|
|||
/// kind is the type of seed peer.
|
||||
#[serde(default, rename = "type")]
|
||||
pub kind: HostType,
|
||||
|
||||
/// cluster_id is the cluster id of the seed peer cluster.
|
||||
#[serde(default = "default_seed_peer_cluster_id", rename = "clusterID")]
|
||||
#[validate(range(min = 1))]
|
||||
pub cluster_id: u64,
|
||||
|
||||
/// keepalive_interval is the interval to keep alive with manager.
|
||||
#[serde(
|
||||
default = "default_seed_peer_keepalive_interval",
|
||||
with = "humantime_serde"
|
||||
)]
|
||||
pub keepalive_interval: Duration,
|
||||
}
|
||||
|
||||
/// SeedPeer implements Default.
|
||||
|
@ -876,8 +920,6 @@ impl Default for SeedPeer {
|
|||
SeedPeer {
|
||||
enable: false,
|
||||
kind: HostType::Normal,
|
||||
cluster_id: default_seed_peer_cluster_id(),
|
||||
keepalive_interval: default_seed_peer_keepalive_interval(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -940,6 +982,14 @@ pub struct Storage {
|
|||
#[serde(default = "default_storage_keep")]
|
||||
pub keep: bool,
|
||||
|
||||
/// write_piece_timeout is the timeout for writing a piece to storage(e.g., disk
|
||||
/// or cache).
|
||||
#[serde(
|
||||
default = "default_storage_write_piece_timeout",
|
||||
with = "humantime_serde"
|
||||
)]
|
||||
pub write_piece_timeout: Duration,
|
||||
|
||||
/// write_buffer_size is the buffer size for writing piece to disk, default is 128KB.
|
||||
#[serde(default = "default_storage_write_buffer_size")]
|
||||
pub write_buffer_size: usize,
|
||||
|
@ -951,31 +1001,33 @@ pub struct Storage {
|
|||
/// cache_capacity is the cache capacity for downloading, default is 100.
|
||||
///
|
||||
/// Cache storage:
|
||||
/// 1. Users can create preheating jobs and preheat tasks to memory and disk by setting `load_to_cache` to `true`.
|
||||
/// For more details, refer to https://github.com/dragonflyoss/api/blob/main/proto/common.proto#L443.
|
||||
/// 1. Users can preheat task by caching to memory (via CacheTask) or to disk (via Task).
|
||||
/// For more details, refer to https://github.com/dragonflyoss/api/blob/main/proto/dfdaemon.proto#L174.
|
||||
/// 2. If the download hits the memory cache, it will be faster than reading from the disk, because there is no
|
||||
/// page cache for the first read.
|
||||
///
|
||||
/// ```text
|
||||
/// 1.Preheat
|
||||
/// |
|
||||
/// |
|
||||
/// +--------------------------------------------------+
|
||||
/// | | Peer |
|
||||
/// | | +-----------+ |
|
||||
/// | | -- Partial -->| Cache | |
|
||||
/// | | | +-----------+ |
|
||||
/// | v | | | |
|
||||
/// | Download | Miss | |
|
||||
/// | Task -->| | --- Hit ------>|<-- 2.Download
|
||||
/// | | | ^ |
|
||||
/// | | v | |
|
||||
/// | | +-----------+ | |
|
||||
/// | -- Full -->| Disk |---------- |
|
||||
/// | +-----------+ |
|
||||
/// | |
|
||||
/// +--------------------------------------------------+
|
||||
/// ```
|
||||
///```text
|
||||
/// +--------+
|
||||
/// │ Source │
|
||||
/// +--------+
|
||||
/// ^ ^ Preheat
|
||||
/// │ │ |
|
||||
/// +-----------------+ │ │ +----------------------------+
|
||||
/// │ Other Peers │ │ │ │ Peer | │
|
||||
/// │ │ │ │ │ v │
|
||||
/// │ +----------+ │ │ │ │ +----------+ │
|
||||
/// │ │ Cache |<--|----------|<-Miss--| Cache |--Hit-->|<----Download CacheTask
|
||||
/// │ +----------+ │ │ │ +----------+ │
|
||||
/// │ │ │ │ │
|
||||
/// │ +----------+ │ │ │ +----------+ │
|
||||
/// │ │ Disk |<--|----------|<-Miss--| Disk |--Hit-->|<----Download Task
|
||||
/// │ +----------+ │ │ +----------+ │
|
||||
/// │ │ │ ^ │
|
||||
/// │ │ │ | │
|
||||
/// +-----------------+ +----------------------------+
|
||||
/// |
|
||||
/// Preheat
|
||||
///```
|
||||
#[serde(with = "bytesize_serde", default = "default_storage_cache_capacity")]
|
||||
pub cache_capacity: ByteSize,
|
||||
}
|
||||
|
@ -987,6 +1039,7 @@ impl Default for Storage {
|
|||
server: StorageServer::default(),
|
||||
dir: crate::default_storage_dir(),
|
||||
keep: default_storage_keep(),
|
||||
write_piece_timeout: default_storage_write_piece_timeout(),
|
||||
write_buffer_size: default_storage_write_buffer_size(),
|
||||
read_buffer_size: default_storage_read_buffer_size(),
|
||||
cache_capacity: default_storage_cache_capacity(),
|
||||
|
@ -1006,6 +1059,19 @@ pub struct Policy {
|
|||
)]
|
||||
pub task_ttl: Duration,
|
||||
|
||||
/// dist_threshold optionally defines a specific disk capacity to be used as the base for
|
||||
/// calculating GC trigger points with `dist_high_threshold_percent` and `dist_low_threshold_percent`.
|
||||
///
|
||||
/// - If a value is provided (e.g., "500GB"), the percentage-based thresholds (`dist_high_threshold_percent`,
|
||||
/// `dist_low_threshold_percent`) are applied relative to this specified capacity.
|
||||
/// - If not provided or set to 0 (the default behavior), these percentage-based thresholds are applied
|
||||
/// relative to the total actual disk space.
|
||||
///
|
||||
/// This allows dfdaemon to effectively manage a logical portion of the disk for its cache,
|
||||
/// rather than always considering the entire disk volume.
|
||||
#[serde(with = "bytesize_serde", default = "default_gc_policy_dist_threshold")]
|
||||
pub dist_threshold: ByteSize,
|
||||
|
||||
/// dist_high_threshold_percent is the high threshold percent of the disk usage.
|
||||
/// If the disk usage is greater than the threshold, dfdaemon will do gc.
|
||||
#[serde(default = "default_gc_policy_dist_high_threshold_percent")]
|
||||
|
@ -1023,6 +1089,7 @@ pub struct Policy {
|
|||
impl Default for Policy {
|
||||
fn default() -> Self {
|
||||
Policy {
|
||||
dist_threshold: default_gc_policy_dist_threshold(),
|
||||
task_ttl: default_gc_policy_task_ttl(),
|
||||
dist_high_threshold_percent: default_gc_policy_dist_high_threshold_percent(),
|
||||
dist_low_threshold_percent: default_gc_policy_dist_low_threshold_percent(),
|
||||
|
@ -1382,11 +1449,37 @@ pub struct Stats {
|
|||
}
|
||||
|
||||
/// Tracing is the tracing configuration for dfdaemon.
|
||||
#[derive(Debug, Clone, Default, Validate, Deserialize)]
|
||||
#[derive(Debug, Clone, Validate, Deserialize)]
|
||||
#[serde(default, rename_all = "camelCase")]
|
||||
pub struct Tracing {
|
||||
/// addr is the address to report tracing log.
|
||||
pub addr: Option<String>,
|
||||
/// Protocol specifies the communication protocol for the tracing server.
|
||||
/// Supported values: "http", "https", "grpc" (default: None).
|
||||
/// This determines how tracing logs are transmitted to the server.
|
||||
pub protocol: Option<String>,
|
||||
|
||||
/// endpoint is the endpoint to report tracing log, example: "localhost:4317".
|
||||
pub endpoint: Option<String>,
|
||||
|
||||
/// path is the path to report tracing log, example: "/v1/traces" if the protocol is "http" or
|
||||
/// "https".
|
||||
#[serde(default = "default_tracing_path")]
|
||||
pub path: Option<PathBuf>,
|
||||
|
||||
/// headers is the headers to report tracing log.
|
||||
#[serde(with = "http_serde::header_map")]
|
||||
pub headers: reqwest::header::HeaderMap,
|
||||
}
|
||||
|
||||
/// Tracing implements Default.
|
||||
impl Default for Tracing {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
protocol: None,
|
||||
endpoint: None,
|
||||
path: default_tracing_path(),
|
||||
headers: reqwest::header::HeaderMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Config is the configuration for dfdaemon.
|
||||
|
@ -1896,11 +1989,6 @@ key: /etc/ssl/private/client.pem
|
|||
let default_seed_peer = SeedPeer::default();
|
||||
assert!(!default_seed_peer.enable);
|
||||
assert_eq!(default_seed_peer.kind, HostType::Normal);
|
||||
assert_eq!(default_seed_peer.cluster_id, 1);
|
||||
assert_eq!(
|
||||
default_seed_peer.keepalive_interval,
|
||||
default_seed_peer_keepalive_interval()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1908,20 +1996,9 @@ key: /etc/ssl/private/client.pem
|
|||
let valid_seed_peer = SeedPeer {
|
||||
enable: true,
|
||||
kind: HostType::Weak,
|
||||
cluster_id: 5,
|
||||
keepalive_interval: Duration::from_secs(90),
|
||||
};
|
||||
|
||||
assert!(valid_seed_peer.validate().is_ok());
|
||||
|
||||
let invalid_seed_peer = SeedPeer {
|
||||
enable: true,
|
||||
kind: HostType::Weak,
|
||||
cluster_id: 0,
|
||||
keepalive_interval: Duration::from_secs(90),
|
||||
};
|
||||
|
||||
assert!(invalid_seed_peer.validate().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1938,8 +2015,6 @@ key: /etc/ssl/private/client.pem
|
|||
|
||||
assert!(seed_peer.enable);
|
||||
assert_eq!(seed_peer.kind, HostType::Super);
|
||||
assert_eq!(seed_peer.cluster_id, 2);
|
||||
assert_eq!(seed_peer.keepalive_interval, Duration::from_secs(60));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1969,6 +2044,7 @@ key: /etc/ssl/private/client.pem
|
|||
},
|
||||
"dir": "/tmp/storage",
|
||||
"keep": true,
|
||||
"writePieceTimeout": "20s",
|
||||
"writeBufferSize": 8388608,
|
||||
"readBufferSize": 8388608,
|
||||
"cacheCapacity": "256MB"
|
||||
|
@ -1979,6 +2055,7 @@ key: /etc/ssl/private/client.pem
|
|||
assert_eq!(storage.server.protocol, "http".to_string());
|
||||
assert_eq!(storage.dir, PathBuf::from("/tmp/storage"));
|
||||
assert!(storage.keep);
|
||||
assert_eq!(storage.write_piece_timeout, Duration::from_secs(20));
|
||||
assert_eq!(storage.write_buffer_size, 8 * 1024 * 1024);
|
||||
assert_eq!(storage.read_buffer_size, 8 * 1024 * 1024);
|
||||
assert_eq!(storage.cache_capacity, ByteSize::mb(256));
|
||||
|
@ -1988,18 +2065,18 @@ key: /etc/ssl/private/client.pem
|
|||
fn validate_policy() {
|
||||
let valid_policy = Policy {
|
||||
task_ttl: Duration::from_secs(12 * 3600),
|
||||
dist_threshold: ByteSize::mb(100),
|
||||
dist_high_threshold_percent: 90,
|
||||
dist_low_threshold_percent: 70,
|
||||
};
|
||||
|
||||
assert!(valid_policy.validate().is_ok());
|
||||
|
||||
let invalid_policy = Policy {
|
||||
task_ttl: Duration::from_secs(12 * 3600),
|
||||
dist_threshold: ByteSize::mb(100),
|
||||
dist_high_threshold_percent: 100,
|
||||
dist_low_threshold_percent: 70,
|
||||
};
|
||||
|
||||
assert!(invalid_policy.validate().is_err());
|
||||
}
|
||||
|
||||
|
@ -2099,12 +2176,19 @@ key: /etc/ssl/private/client.pem
|
|||
fn deserialize_tracing_correctly() {
|
||||
let json_data = r#"
|
||||
{
|
||||
"addr": "http://tracing.example.com"
|
||||
"protocol": "http",
|
||||
"endpoint": "tracing.example.com",
|
||||
"path": "/v1/traces",
|
||||
"headers": {
|
||||
"X-Custom-Header": "value"
|
||||
}
|
||||
}"#;
|
||||
|
||||
let tracing: Tracing = serde_json::from_str(json_data).unwrap();
|
||||
|
||||
assert_eq!(tracing.addr, Some("http://tracing.example.com".to_string()));
|
||||
assert_eq!(tracing.protocol, Some("http".to_string()));
|
||||
assert_eq!(tracing.endpoint, Some("tracing.example.com".to_string()));
|
||||
assert_eq!(tracing.path, Some(PathBuf::from("/v1/traces")));
|
||||
assert!(tracing.headers.contains_key("X-Custom-Header"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
/*
|
||||
* Copyright 2024 The Dragonfly Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// NAME is the name of dfstore.
|
||||
pub const NAME: &str = "dfstore";
|
||||
|
||||
/// default_dfstore_log_dir is the default log directory for dfstore.
|
||||
pub fn default_dfstore_log_dir() -> PathBuf {
|
||||
crate::default_log_dir().join(NAME)
|
||||
}
|
|
@ -21,7 +21,6 @@ pub mod dfcache;
|
|||
pub mod dfdaemon;
|
||||
pub mod dfget;
|
||||
pub mod dfinit;
|
||||
pub mod dfstore;
|
||||
|
||||
/// SERVICE_NAME is the name of the service.
|
||||
pub const SERVICE_NAME: &str = "dragonfly";
|
||||
|
@ -105,7 +104,7 @@ pub fn default_lock_dir() -> PathBuf {
|
|||
/// default_plugin_dir is the default plugin directory for client.
|
||||
pub fn default_plugin_dir() -> PathBuf {
|
||||
#[cfg(target_os = "linux")]
|
||||
return PathBuf::from("/var/lib/dragonfly/plugins/");
|
||||
return PathBuf::from("/usr/local/lib/dragonfly/plugins/");
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
return home::home_dir().unwrap().join(".dragonfly").join("plugins");
|
||||
|
|
|
@ -62,6 +62,10 @@ pub enum DFError {
|
|||
#[error{"piece {0} state is failed"}]
|
||||
PieceStateIsFailed(String),
|
||||
|
||||
/// DownloadPieceFinished is the error when the download piece finished timeout.
|
||||
#[error{"download piece {0} finished timeout"}]
|
||||
DownloadPieceFinished(String),
|
||||
|
||||
/// WaitForPieceFinishedTimeout is the error when the wait for piece finished timeout.
|
||||
#[error{"wait for piece {0} finished timeout"}]
|
||||
WaitForPieceFinishedTimeout(String),
|
||||
|
|
|
@ -23,7 +23,6 @@ tokio.workspace = true
|
|||
anyhow.workspace = true
|
||||
tracing.workspace = true
|
||||
toml_edit.workspace = true
|
||||
toml.workspace = true
|
||||
url.workspace = true
|
||||
tempfile.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
|
|
@ -64,12 +64,8 @@ struct Args {
|
|||
)]
|
||||
log_max_files: usize,
|
||||
|
||||
#[arg(
|
||||
long = "verbose",
|
||||
default_value_t = false,
|
||||
help = "Specify whether to print log"
|
||||
)]
|
||||
verbose: bool,
|
||||
#[arg(long, default_value_t = false, help = "Specify whether to print log")]
|
||||
console: bool,
|
||||
|
||||
#[arg(
|
||||
short = 'V',
|
||||
|
@ -94,7 +90,12 @@ async fn main() -> Result<(), anyhow::Error> {
|
|||
args.log_level,
|
||||
args.log_max_files,
|
||||
None,
|
||||
args.verbose,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
args.console,
|
||||
);
|
||||
|
||||
// Load config.
|
||||
|
|
|
@ -50,8 +50,6 @@ impl ContainerRuntime {
|
|||
/// run runs the container runtime to initialize runtime environment for the dfdaemon.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) -> Result<()> {
|
||||
// If containerd is enabled, override the default containerd
|
||||
// configuration.
|
||||
match &self.engine {
|
||||
None => Ok(()),
|
||||
Some(Engine::Containerd(containerd)) => containerd.run().await,
|
||||
|
|
|
@ -22,18 +22,16 @@ tracing.workspace = true
|
|||
prost-wkt-types.workspace = true
|
||||
tokio.workspace = true
|
||||
tokio-util.workspace = true
|
||||
sha2.workspace = true
|
||||
crc32fast.workspace = true
|
||||
fs2.workspace = true
|
||||
lru.workspace = true
|
||||
bytes.workspace = true
|
||||
bytesize.workspace = true
|
||||
num_cpus = "1.0"
|
||||
num_cpus = "1.17"
|
||||
bincode = "1.3.3"
|
||||
rayon = "1.10.0"
|
||||
walkdir = "2.5.0"
|
||||
|
||||
[dev-dependencies]
|
||||
tempdir = "0.3"
|
||||
tempfile.workspace = true
|
||||
criterion = "0.5"
|
||||
|
||||
[[bench]]
|
||||
|
|
|
@ -76,31 +76,33 @@ impl Task {
|
|||
/// Cache is the cache for storing piece content by LRU algorithm.
|
||||
///
|
||||
/// Cache storage:
|
||||
/// 1. Users can create preheating jobs and preheat tasks to memory and disk by setting `load_to_cache` to `true`.
|
||||
/// For more details, refer to https://github.com/dragonflyoss/api/blob/main/proto/common.proto#L443.
|
||||
/// 1. Users can preheat task by caching to memory (via CacheTask) or to disk (via Task).
|
||||
/// For more details, refer to https://github.com/dragonflyoss/api/blob/main/proto/dfdaemon.proto#L174.
|
||||
/// 2. If the download hits the memory cache, it will be faster than reading from the disk, because there is no
|
||||
/// page cache for the first read.
|
||||
///
|
||||
/// ```text
|
||||
/// 1.Preheat
|
||||
/// |
|
||||
/// |
|
||||
/// +--------------------------------------------------+
|
||||
/// | | Peer |
|
||||
/// | | +-----------+ |
|
||||
/// | | -- Partial -->| Cache | |
|
||||
/// | | | +-----------+ |
|
||||
/// | v | | | |
|
||||
/// | Download | Miss | |
|
||||
/// | Task -->| | --- Hit ------>|<-- 2.Download
|
||||
/// | | | ^ |
|
||||
/// | | v | |
|
||||
/// | | +-----------+ | |
|
||||
/// | -- Full -->| Disk |---------- |
|
||||
/// | +-----------+ |
|
||||
/// | |
|
||||
/// +--------------------------------------------------+
|
||||
/// ```
|
||||
///```text
|
||||
/// +--------+
|
||||
/// │ Source │
|
||||
/// +--------+
|
||||
/// ^ ^ Preheat
|
||||
/// │ │ |
|
||||
/// +-----------------+ │ │ +----------------------------+
|
||||
/// │ Other Peers │ │ │ │ Peer | │
|
||||
/// │ │ │ │ │ v │
|
||||
/// │ +----------+ │ │ │ │ +----------+ │
|
||||
/// │ │ Cache |<--|----------|<-Miss--| Cache |--Hit-->|<----Download CacheTask
|
||||
/// │ +----------+ │ │ │ +----------+ │
|
||||
/// │ │ │ │ │
|
||||
/// │ +----------+ │ │ │ +----------+ │
|
||||
/// │ │ Disk |<--|----------|<-Miss--| Disk |--Hit-->|<----Download Task
|
||||
/// │ +----------+ │ │ +----------+ │
|
||||
/// │ │ │ ^ │
|
||||
/// │ │ │ | │
|
||||
/// +-----------------+ +----------------------------+
|
||||
/// |
|
||||
/// Preheat
|
||||
///```
|
||||
/// Task is the metadata of the task.
|
||||
#[derive(Clone)]
|
||||
pub struct Cache {
|
||||
|
|
|
@ -14,9 +14,11 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
use bytesize::ByteSize;
|
||||
use dragonfly_api::common::v2::Range;
|
||||
use dragonfly_client_config::dfdaemon::Config;
|
||||
use dragonfly_client_core::{Error, Result};
|
||||
use dragonfly_client_util::fs::fallocate;
|
||||
use std::cmp::{max, min};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
@ -26,6 +28,7 @@ use tokio::io::{
|
|||
};
|
||||
use tokio_util::io::InspectReader;
|
||||
use tracing::{error, info, instrument, warn};
|
||||
use walkdir::WalkDir;
|
||||
|
||||
/// DEFAULT_CONTENT_DIR is the default directory for store content.
|
||||
pub const DEFAULT_CONTENT_DIR: &str = "content";
|
||||
|
@ -66,7 +69,6 @@ pub struct WritePersistentCacheTaskResponse {
|
|||
/// Content implements the content storage.
|
||||
impl Content {
|
||||
/// new returns a new content.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(config: Arc<Config>, dir: &Path) -> Result<Content> {
|
||||
let dir = dir.join(DEFAULT_CONTENT_DIR);
|
||||
|
||||
|
@ -84,21 +86,45 @@ impl Content {
|
|||
}
|
||||
|
||||
/// available_space returns the available space of the disk.
|
||||
#[instrument(skip_all)]
|
||||
pub fn available_space(&self) -> Result<u64> {
|
||||
let dist_threshold = self.config.gc.policy.dist_threshold;
|
||||
if dist_threshold != ByteSize::default() {
|
||||
let usage_space = WalkDir::new(&self.dir)
|
||||
.into_iter()
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter_map(|entry| entry.metadata().ok())
|
||||
.filter(|metadata| metadata.is_file())
|
||||
.fold(0, |acc, m| acc + m.len());
|
||||
|
||||
if usage_space >= dist_threshold.as_u64() {
|
||||
warn!(
|
||||
"usage space {} is greater than dist threshold {}, no need to calculate available space",
|
||||
usage_space, dist_threshold
|
||||
);
|
||||
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
return Ok(dist_threshold.as_u64() - usage_space);
|
||||
}
|
||||
|
||||
let stat = fs2::statvfs(&self.dir)?;
|
||||
Ok(stat.available_space())
|
||||
}
|
||||
|
||||
/// total_space returns the total space of the disk.
|
||||
#[instrument(skip_all)]
|
||||
pub fn total_space(&self) -> Result<u64> {
|
||||
// If the dist_threshold is set, return it directly.
|
||||
let dist_threshold = self.config.gc.policy.dist_threshold;
|
||||
if dist_threshold != ByteSize::default() {
|
||||
return Ok(dist_threshold.as_u64());
|
||||
}
|
||||
|
||||
let stat = fs2::statvfs(&self.dir)?;
|
||||
Ok(stat.total_space())
|
||||
}
|
||||
|
||||
/// has_enough_space checks if the storage has enough space to store the content.
|
||||
#[instrument(skip_all)]
|
||||
pub fn has_enough_space(&self, content_length: u64) -> Result<bool> {
|
||||
let available_space = self.available_space()?;
|
||||
if available_space < content_length {
|
||||
|
@ -114,7 +140,6 @@ impl Content {
|
|||
}
|
||||
|
||||
/// is_same_dev_inode checks if the source and target are the same device and inode.
|
||||
#[instrument(skip_all)]
|
||||
async fn is_same_dev_inode<P: AsRef<Path>, Q: AsRef<Path>>(
|
||||
&self,
|
||||
source: P,
|
||||
|
@ -140,7 +165,6 @@ impl Content {
|
|||
}
|
||||
|
||||
/// is_same_dev_inode_as_task checks if the task and target are the same device and inode.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn is_same_dev_inode_as_task(&self, task_id: &str, to: &Path) -> Result<bool> {
|
||||
let task_path = self.get_task_path(task_id);
|
||||
self.is_same_dev_inode(&task_path, to).await
|
||||
|
@ -151,7 +175,8 @@ impl Content {
|
|||
/// Behavior of `create_task`:
|
||||
/// 1. If the task already exists, return the task path.
|
||||
/// 2. If the task does not exist, create the task directory and file.
|
||||
pub async fn create_task(&self, task_id: &str) -> Result<PathBuf> {
|
||||
#[instrument(skip_all)]
|
||||
pub async fn create_task(&self, task_id: &str, length: u64) -> Result<PathBuf> {
|
||||
let task_path = self.get_task_path(task_id);
|
||||
if task_path.exists() {
|
||||
return Ok(task_path);
|
||||
|
@ -162,12 +187,16 @@ impl Content {
|
|||
error!("create {:?} failed: {}", task_dir, err);
|
||||
})?;
|
||||
|
||||
fs::File::create(task_dir.join(task_id))
|
||||
let f = fs::File::create(task_dir.join(task_id))
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!("create {:?} failed: {}", task_dir, err);
|
||||
})?;
|
||||
|
||||
fallocate(&f, length).await.inspect_err(|err| {
|
||||
error!("fallocate {:?} failed: {}", task_dir, err);
|
||||
})?;
|
||||
|
||||
Ok(task_dir.join(task_id))
|
||||
}
|
||||
|
||||
|
@ -239,7 +268,6 @@ impl Content {
|
|||
}
|
||||
|
||||
/// delete_task deletes the task content.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete_task(&self, task_id: &str) -> Result<()> {
|
||||
info!("delete task content: {}", task_id);
|
||||
let task_path = self.get_task_path(task_id);
|
||||
|
@ -331,6 +359,7 @@ impl Content {
|
|||
&self,
|
||||
task_id: &str,
|
||||
offset: u64,
|
||||
expected_length: u64,
|
||||
reader: &mut R,
|
||||
) -> Result<WritePieceResponse> {
|
||||
// Open the file and seek to the offset.
|
||||
|
@ -365,6 +394,13 @@ impl Content {
|
|||
error!("flush {:?} failed: {}", task_path, err);
|
||||
})?;
|
||||
|
||||
if length != expected_length {
|
||||
return Err(Error::Unknown(format!(
|
||||
"expected length {} but got {}",
|
||||
expected_length, length
|
||||
)));
|
||||
}
|
||||
|
||||
// Calculate the hash of the piece.
|
||||
Ok(WritePieceResponse {
|
||||
length,
|
||||
|
@ -373,7 +409,6 @@ impl Content {
|
|||
}
|
||||
|
||||
/// get_task_path returns the task path by task id.
|
||||
#[instrument(skip_all)]
|
||||
fn get_task_path(&self, task_id: &str) -> PathBuf {
|
||||
// The task needs split by the first 3 characters of task id(sha256) to
|
||||
// avoid too many files in one directory.
|
||||
|
@ -383,7 +418,6 @@ impl Content {
|
|||
|
||||
/// is_same_dev_inode_as_persistent_cache_task checks if the persistent cache task and target
|
||||
/// are the same device and inode.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn is_same_dev_inode_as_persistent_cache_task(
|
||||
&self,
|
||||
task_id: &str,
|
||||
|
@ -398,7 +432,12 @@ impl Content {
|
|||
/// Behavior of `create_persistent_cache_task`:
|
||||
/// 1. If the persistent cache task already exists, return the persistent cache task path.
|
||||
/// 2. If the persistent cache task does not exist, create the persistent cache task directory and file.
|
||||
pub async fn create_persistent_cache_task(&self, task_id: &str) -> Result<PathBuf> {
|
||||
#[instrument(skip_all)]
|
||||
pub async fn create_persistent_cache_task(
|
||||
&self,
|
||||
task_id: &str,
|
||||
length: u64,
|
||||
) -> Result<PathBuf> {
|
||||
let task_path = self.get_persistent_cache_task_path(task_id);
|
||||
if task_path.exists() {
|
||||
return Ok(task_path);
|
||||
|
@ -412,12 +451,16 @@ impl Content {
|
|||
error!("create {:?} failed: {}", task_dir, err);
|
||||
})?;
|
||||
|
||||
fs::File::create(task_dir.join(task_id))
|
||||
let f = fs::File::create(task_dir.join(task_id))
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!("create {:?} failed: {}", task_dir, err);
|
||||
})?;
|
||||
|
||||
fallocate(&f, length).await.inspect_err(|err| {
|
||||
error!("fallocate {:?} failed: {}", task_dir, err);
|
||||
})?;
|
||||
|
||||
Ok(task_dir.join(task_id))
|
||||
}
|
||||
|
||||
|
@ -538,6 +581,7 @@ impl Content {
|
|||
&self,
|
||||
task_id: &str,
|
||||
offset: u64,
|
||||
expected_length: u64,
|
||||
reader: &mut R,
|
||||
) -> Result<WritePieceResponse> {
|
||||
// Open the file and seek to the offset.
|
||||
|
@ -572,6 +616,13 @@ impl Content {
|
|||
error!("flush {:?} failed: {}", task_path, err);
|
||||
})?;
|
||||
|
||||
if length != expected_length {
|
||||
return Err(Error::Unknown(format!(
|
||||
"expected length {} but got {}",
|
||||
expected_length, length
|
||||
)));
|
||||
}
|
||||
|
||||
// Calculate the hash of the piece.
|
||||
Ok(WritePieceResponse {
|
||||
length,
|
||||
|
@ -580,7 +631,6 @@ impl Content {
|
|||
}
|
||||
|
||||
/// delete_task deletes the persistent cache task content.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete_persistent_cache_task(&self, task_id: &str) -> Result<()> {
|
||||
info!("delete persistent cache task content: {}", task_id);
|
||||
let persistent_cache_task_path = self.get_persistent_cache_task_path(task_id);
|
||||
|
@ -593,7 +643,6 @@ impl Content {
|
|||
}
|
||||
|
||||
/// get_persistent_cache_task_path returns the persistent cache task path by task id.
|
||||
#[instrument(skip_all)]
|
||||
fn get_persistent_cache_task_path(&self, task_id: &str) -> PathBuf {
|
||||
// The persistent cache task needs split by the first 3 characters of task id(sha256) to
|
||||
// avoid too many files in one directory.
|
||||
|
@ -621,31 +670,31 @@ pub fn calculate_piece_range(offset: u64, length: u64, range: Option<Range>) ->
|
|||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Cursor;
|
||||
use tempdir::TempDir;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_task() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "60409bd0ec44160f44c53c39b3fe1c5fdfb23faded0228c68bee83bc15a200e3";
|
||||
let task_path = content.create_task(task_id).await.unwrap();
|
||||
let task_path = content.create_task(task_id, 0).await.unwrap();
|
||||
assert!(task_path.exists());
|
||||
assert_eq!(task_path, temp_dir.path().join("content/tasks/604/60409bd0ec44160f44c53c39b3fe1c5fdfb23faded0228c68bee83bc15a200e3"));
|
||||
|
||||
let task_path_exists = content.create_task(task_id).await.unwrap();
|
||||
let task_path_exists = content.create_task(task_id, 0).await.unwrap();
|
||||
assert_eq!(task_path, task_path_exists);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hard_link_task() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4";
|
||||
content.create_task(task_id).await.unwrap();
|
||||
content.create_task(task_id, 0).await.unwrap();
|
||||
|
||||
let to = temp_dir
|
||||
.path()
|
||||
|
@ -658,12 +707,12 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_copy_task() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "bfd3c02fb31a7373e25b405fd5fd3082987ccfbaf210889153af9e65bbf13002";
|
||||
content.create_task(task_id).await.unwrap();
|
||||
content.create_task(task_id, 64).await.unwrap();
|
||||
|
||||
let to = temp_dir
|
||||
.path()
|
||||
|
@ -674,12 +723,12 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_delete_task() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "4e19f03b0fceb38f23ff4f657681472a53ef335db3660ae5494912570b7a2bb7";
|
||||
let task_path = content.create_task(task_id).await.unwrap();
|
||||
let task_path = content.create_task(task_id, 0).await.unwrap();
|
||||
assert!(task_path.exists());
|
||||
|
||||
content.delete_task(task_id).await.unwrap();
|
||||
|
@ -688,16 +737,19 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_read_piece() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "c794a3bbae81e06d1c8d362509bdd42a7c105b0fb28d80ffe27f94b8f04fc845";
|
||||
content.create_task(task_id).await.unwrap();
|
||||
content.create_task(task_id, 13).await.unwrap();
|
||||
|
||||
let data = b"hello, world!";
|
||||
let mut reader = Cursor::new(data);
|
||||
content.write_piece(task_id, 0, &mut reader).await.unwrap();
|
||||
content
|
||||
.write_piece(task_id, 0, 13, &mut reader)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut reader = content.read_piece(task_id, 0, 13, None).await.unwrap();
|
||||
let mut buffer = Vec::new();
|
||||
|
@ -723,43 +775,55 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_write_piece() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "60b48845606946cea72084f14ed5cce61ec96e69f80a30f891a6963dccfd5b4f";
|
||||
content.create_task(task_id).await.unwrap();
|
||||
content.create_task(task_id, 4).await.unwrap();
|
||||
|
||||
let data = b"test";
|
||||
let mut reader = Cursor::new(data);
|
||||
let response = content.write_piece(task_id, 0, &mut reader).await.unwrap();
|
||||
let response = content
|
||||
.write_piece(task_id, 0, 4, &mut reader)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(response.length, 4);
|
||||
assert!(!response.hash.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_persistent_task() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "c4f108ab1d2b8cfdffe89ea9676af35123fa02e3c25167d62538f630d5d44745";
|
||||
let task_path = content.create_persistent_cache_task(task_id).await.unwrap();
|
||||
let task_path = content
|
||||
.create_persistent_cache_task(task_id, 0)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(task_path.exists());
|
||||
assert_eq!(task_path, temp_dir.path().join("content/persistent-cache-tasks/c4f/c4f108ab1d2b8cfdffe89ea9676af35123fa02e3c25167d62538f630d5d44745"));
|
||||
|
||||
let task_path_exists = content.create_persistent_cache_task(task_id).await.unwrap();
|
||||
let task_path_exists = content
|
||||
.create_persistent_cache_task(task_id, 0)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(task_path, task_path_exists);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_hard_link_persistent_cache_task() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "5e81970eb2b048910cc84cab026b951f2ceac0a09c72c0717193bb6e466e11cd";
|
||||
content.create_persistent_cache_task(task_id).await.unwrap();
|
||||
content
|
||||
.create_persistent_cache_task(task_id, 0)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let to = temp_dir
|
||||
.path()
|
||||
|
@ -778,12 +842,15 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_copy_persistent_cache_task() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "194b9c2018429689fb4e596a506c7e9db564c187b9709b55b33b96881dfb6dd5";
|
||||
content.create_persistent_cache_task(task_id).await.unwrap();
|
||||
content
|
||||
.create_persistent_cache_task(task_id, 64)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let to = temp_dir
|
||||
.path()
|
||||
|
@ -797,12 +864,15 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_delete_persistent_cache_task() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "17430ba545c3ce82790e9c9f77e64dca44bb6d6a0c9e18be175037c16c73713d";
|
||||
let task_path = content.create_persistent_cache_task(task_id).await.unwrap();
|
||||
let task_path = content
|
||||
.create_persistent_cache_task(task_id, 0)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(task_path.exists());
|
||||
|
||||
content.delete_persistent_cache_task(task_id).await.unwrap();
|
||||
|
@ -811,17 +881,20 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_read_persistent_cache_piece() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "9cb27a4af09aee4eb9f904170217659683f4a0ea7cd55e1a9fbcb99ddced659a";
|
||||
content.create_persistent_cache_task(task_id).await.unwrap();
|
||||
content
|
||||
.create_persistent_cache_task(task_id, 13)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let data = b"hello, world!";
|
||||
let mut reader = Cursor::new(data);
|
||||
content
|
||||
.write_persistent_cache_piece(task_id, 0, &mut reader)
|
||||
.write_persistent_cache_piece(task_id, 0, 13, &mut reader)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
@ -852,17 +925,20 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_write_persistent_cache_piece() {
|
||||
let temp_dir = TempDir::new("content").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let config = Arc::new(Config::default());
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let task_id = "ca1afaf856e8a667fbd48093ca3ca1b8eeb4bf735912fbe551676bc5817a720a";
|
||||
content.create_persistent_cache_task(task_id).await.unwrap();
|
||||
content
|
||||
.create_persistent_cache_task(task_id, 4)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let data = b"test";
|
||||
let mut reader = Cursor::new(data);
|
||||
let response = content
|
||||
.write_persistent_cache_piece(task_id, 0, &mut reader)
|
||||
.write_persistent_cache_piece(task_id, 0, 4, &mut reader)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(response.length, 4);
|
||||
|
@ -872,14 +948,36 @@ mod tests {
|
|||
#[tokio::test]
|
||||
async fn test_has_enough_space() {
|
||||
let config = Arc::new(Config::default());
|
||||
let dir = PathBuf::from("/tmp/dragonfly_test");
|
||||
let content = Content::new(config, &dir).await.unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let has_space = content.has_enough_space(1).unwrap();
|
||||
assert!(has_space);
|
||||
|
||||
let has_space = content.has_enough_space(u64::MAX).unwrap();
|
||||
assert!(!has_space);
|
||||
|
||||
let mut config = Config::default();
|
||||
config.gc.policy.dist_threshold = ByteSize::mib(10);
|
||||
let config = Arc::new(config);
|
||||
let content = Content::new(config, temp_dir.path()).await.unwrap();
|
||||
|
||||
let file_path = Path::new(temp_dir.path())
|
||||
.join(DEFAULT_CONTENT_DIR)
|
||||
.join(DEFAULT_TASK_DIR)
|
||||
.join("1mib");
|
||||
let mut file = File::create(&file_path).await.unwrap();
|
||||
let buffer = vec![0u8; ByteSize::mib(1).as_u64() as usize];
|
||||
file.write_all(&buffer).await.unwrap();
|
||||
file.flush().await.unwrap();
|
||||
|
||||
let has_space = content
|
||||
.has_enough_space(ByteSize::mib(9).as_u64() + 1)
|
||||
.unwrap();
|
||||
assert!(!has_space);
|
||||
|
||||
let has_space = content.has_enough_space(ByteSize::mib(9).as_u64()).unwrap();
|
||||
assert!(has_space);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
|
|
@ -25,9 +25,9 @@ use std::path::PathBuf;
|
|||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::io::AsyncRead;
|
||||
use tokio::time::sleep;
|
||||
use tokio_util::either::Either;
|
||||
use tokio_util::io::InspectReader;
|
||||
use tracing::{debug, error, instrument, warn};
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
|
||||
pub mod cache;
|
||||
pub mod content;
|
||||
|
@ -55,7 +55,6 @@ pub struct Storage {
|
|||
/// Storage implements the storage.
|
||||
impl Storage {
|
||||
/// new returns a new storage.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(config: Arc<Config>, dir: &Path, log_dir: PathBuf) -> Result<Self> {
|
||||
let metadata = metadata::Metadata::new(config.clone(), dir, &log_dir)?;
|
||||
let content = content::Content::new(config.clone(), dir).await?;
|
||||
|
@ -70,19 +69,16 @@ impl Storage {
|
|||
}
|
||||
|
||||
/// total_space returns the total space of the disk.
|
||||
#[instrument(skip_all)]
|
||||
pub fn total_space(&self) -> Result<u64> {
|
||||
self.content.total_space()
|
||||
}
|
||||
|
||||
/// available_space returns the available space of the disk.
|
||||
#[instrument(skip_all)]
|
||||
pub fn available_space(&self) -> Result<u64> {
|
||||
self.content.available_space()
|
||||
}
|
||||
|
||||
/// has_enough_space checks if the storage has enough space to store the content.
|
||||
#[instrument(skip_all)]
|
||||
pub fn has_enough_space(&self, content_length: u64) -> Result<bool> {
|
||||
self.content.has_enough_space(content_length)
|
||||
}
|
||||
|
@ -101,39 +97,34 @@ impl Storage {
|
|||
|
||||
/// is_same_dev_inode_as_task checks if the task content is on the same device inode as the
|
||||
/// destination.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn is_same_dev_inode_as_task(&self, id: &str, to: &Path) -> Result<bool> {
|
||||
self.content.is_same_dev_inode_as_task(id, to).await
|
||||
}
|
||||
|
||||
/// prepare_download_task_started prepares the metadata of the task when the task downloads
|
||||
/// started.
|
||||
pub async fn prepare_download_task_started(&self, id: &str) -> Result<metadata::Task> {
|
||||
self.metadata.download_task_started(id, None, None, None)
|
||||
}
|
||||
|
||||
/// download_task_started updates the metadata of the task and create task content
|
||||
/// when the task downloads started.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_task_started(
|
||||
&self,
|
||||
id: &str,
|
||||
piece_length: Option<u64>,
|
||||
content_length: Option<u64>,
|
||||
piece_length: u64,
|
||||
content_length: u64,
|
||||
response_header: Option<HeaderMap>,
|
||||
load_to_cache: bool,
|
||||
) -> Result<metadata::Task> {
|
||||
let metadata = self.metadata.download_task_started(
|
||||
self.content.create_task(id, content_length).await?;
|
||||
|
||||
self.metadata.download_task_started(
|
||||
id,
|
||||
piece_length,
|
||||
content_length,
|
||||
Some(piece_length),
|
||||
Some(content_length),
|
||||
response_header,
|
||||
)?;
|
||||
|
||||
self.content.create_task(id).await?;
|
||||
if load_to_cache {
|
||||
if let Some(content_length) = content_length {
|
||||
let mut cache = self.cache.clone();
|
||||
cache.put_task(id, content_length).await;
|
||||
debug!("put task to cache: {}", id);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(metadata)
|
||||
)
|
||||
}
|
||||
|
||||
/// download_task_finished updates the metadata of the task when the task downloads finished.
|
||||
|
@ -201,7 +192,7 @@ impl Storage {
|
|||
|
||||
let mut cache = self.cache.clone();
|
||||
cache.delete_task(id).await.unwrap_or_else(|err| {
|
||||
error!("delete task from cache failed: {}", err);
|
||||
info!("delete task from cache failed: {}", err);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -221,7 +212,6 @@ impl Storage {
|
|||
|
||||
/// is_same_dev_inode_as_persistent_cache_task checks if the persistent cache task content is on the same device inode as the
|
||||
/// destination.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn is_same_dev_inode_as_persistent_cache_task(
|
||||
&self,
|
||||
id: &str,
|
||||
|
@ -248,7 +238,9 @@ impl Storage {
|
|||
content_length,
|
||||
)?;
|
||||
|
||||
self.content.create_persistent_cache_task(id).await?;
|
||||
self.content
|
||||
.create_persistent_cache_task(id, content_length)
|
||||
.await?;
|
||||
Ok(metadata)
|
||||
}
|
||||
|
||||
|
@ -290,7 +282,9 @@ impl Storage {
|
|||
created_at,
|
||||
)?;
|
||||
|
||||
self.content.create_persistent_cache_task(id).await?;
|
||||
self.content
|
||||
.create_persistent_cache_task(id, content_length)
|
||||
.await?;
|
||||
Ok(metadata)
|
||||
}
|
||||
|
||||
|
@ -382,7 +376,7 @@ impl Storage {
|
|||
) -> Result<metadata::Piece> {
|
||||
let response = self
|
||||
.content
|
||||
.write_persistent_cache_piece(task_id, offset, reader)
|
||||
.write_persistent_cache_piece(task_id, offset, length, reader)
|
||||
.await?;
|
||||
let digest = Digest::new(Algorithm::Crc32, response.hash);
|
||||
|
||||
|
@ -412,6 +406,7 @@ impl Storage {
|
|||
}
|
||||
|
||||
/// download_piece_from_source_finished is used for downloading piece from source.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_piece_from_source_finished<R: AsyncRead + Unpin + ?Sized>(
|
||||
&self,
|
||||
|
@ -420,25 +415,32 @@ impl Storage {
|
|||
offset: u64,
|
||||
length: u64,
|
||||
reader: &mut R,
|
||||
load_to_cache: bool,
|
||||
timeout: Duration,
|
||||
) -> Result<metadata::Piece> {
|
||||
let response = if load_to_cache {
|
||||
let mut buffer = Vec::with_capacity(length as usize);
|
||||
let mut tee = InspectReader::new(reader, |bytes| {
|
||||
buffer.extend_from_slice(bytes);
|
||||
});
|
||||
tokio::select! {
|
||||
piece = self.handle_downloaded_from_source_finished(piece_id, task_id, offset, length, reader) => {
|
||||
piece
|
||||
}
|
||||
_ = sleep(timeout) => {
|
||||
Err(Error::DownloadPieceFinished(piece_id.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let response = self.content.write_piece(task_id, offset, &mut tee).await?;
|
||||
|
||||
self.cache
|
||||
.write_piece(task_id, piece_id, bytes::Bytes::from(buffer))
|
||||
.await?;
|
||||
debug!("put piece to cache: {}", piece_id);
|
||||
|
||||
response
|
||||
} else {
|
||||
self.content.write_piece(task_id, offset, reader).await?
|
||||
};
|
||||
// handle_downloaded_from_source_finished handles the downloaded piece from source.
|
||||
#[instrument(skip_all)]
|
||||
async fn handle_downloaded_from_source_finished<R: AsyncRead + Unpin + ?Sized>(
|
||||
&self,
|
||||
piece_id: &str,
|
||||
task_id: &str,
|
||||
offset: u64,
|
||||
length: u64,
|
||||
reader: &mut R,
|
||||
) -> Result<metadata::Piece> {
|
||||
let response = self
|
||||
.content
|
||||
.write_piece(task_id, offset, length, reader)
|
||||
.await?;
|
||||
|
||||
let digest = Digest::new(Algorithm::Crc32, response.hash);
|
||||
self.metadata.download_piece_finished(
|
||||
|
@ -462,25 +464,35 @@ impl Storage {
|
|||
expected_digest: &str,
|
||||
parent_id: &str,
|
||||
reader: &mut R,
|
||||
load_to_cache: bool,
|
||||
timeout: Duration,
|
||||
) -> Result<metadata::Piece> {
|
||||
let response = if load_to_cache {
|
||||
let mut buffer = Vec::with_capacity(length as usize);
|
||||
let mut tee = InspectReader::new(reader, |bytes| {
|
||||
buffer.extend_from_slice(bytes);
|
||||
});
|
||||
tokio::select! {
|
||||
piece = self.handle_downloaded_piece_from_parent_finished(piece_id, task_id, offset, length, expected_digest, parent_id, reader) => {
|
||||
piece
|
||||
}
|
||||
_ = sleep(timeout) => {
|
||||
Err(Error::DownloadPieceFinished(piece_id.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let response = self.content.write_piece(task_id, offset, &mut tee).await?;
|
||||
|
||||
self.cache
|
||||
.write_piece(task_id, piece_id, bytes::Bytes::from(buffer))
|
||||
.await?;
|
||||
debug!("put piece to cache: {}", piece_id);
|
||||
|
||||
response
|
||||
} else {
|
||||
self.content.write_piece(task_id, offset, reader).await?
|
||||
};
|
||||
// handle_downloaded_piece_from_parent_finished handles the downloaded piece from parent.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
async fn handle_downloaded_piece_from_parent_finished<R: AsyncRead + Unpin + ?Sized>(
|
||||
&self,
|
||||
piece_id: &str,
|
||||
task_id: &str,
|
||||
offset: u64,
|
||||
length: u64,
|
||||
expected_digest: &str,
|
||||
parent_id: &str,
|
||||
reader: &mut R,
|
||||
) -> Result<metadata::Piece> {
|
||||
let response = self
|
||||
.content
|
||||
.write_piece(task_id, offset, length, reader)
|
||||
.await?;
|
||||
|
||||
let length = response.length;
|
||||
let digest = Digest::new(Algorithm::Crc32, response.hash);
|
||||
|
@ -575,7 +587,6 @@ impl Storage {
|
|||
}
|
||||
|
||||
/// get_piece returns the piece metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_piece(&self, piece_id: &str) -> Result<Option<metadata::Piece>> {
|
||||
self.metadata.get_piece(piece_id)
|
||||
}
|
||||
|
@ -587,13 +598,13 @@ impl Storage {
|
|||
}
|
||||
|
||||
/// get_pieces returns the piece metadatas.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_pieces(&self, task_id: &str) -> Result<Vec<metadata::Piece>> {
|
||||
self.metadata.get_pieces(task_id)
|
||||
}
|
||||
|
||||
/// piece_id returns the piece id.
|
||||
#[inline]
|
||||
#[instrument(skip_all)]
|
||||
pub fn piece_id(&self, task_id: &str, number: u32) -> String {
|
||||
self.metadata.piece_id(task_id, number)
|
||||
}
|
||||
|
@ -618,6 +629,7 @@ impl Storage {
|
|||
}
|
||||
|
||||
/// download_persistent_cache_piece_from_parent_finished is used for downloading persistent cache piece from parent.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_persistent_cache_piece_from_parent_finished<
|
||||
R: AsyncRead + Unpin + ?Sized,
|
||||
|
@ -626,13 +638,14 @@ impl Storage {
|
|||
piece_id: &str,
|
||||
task_id: &str,
|
||||
offset: u64,
|
||||
length: u64,
|
||||
expected_digest: &str,
|
||||
parent_id: &str,
|
||||
reader: &mut R,
|
||||
) -> Result<metadata::Piece> {
|
||||
let response = self
|
||||
.content
|
||||
.write_persistent_cache_piece(task_id, offset, reader)
|
||||
.write_persistent_cache_piece(task_id, offset, length, reader)
|
||||
.await?;
|
||||
|
||||
let length = response.length;
|
||||
|
@ -731,7 +744,6 @@ impl Storage {
|
|||
|
||||
/// persistent_cache_piece_id returns the persistent cache piece id.
|
||||
#[inline]
|
||||
#[instrument(skip_all)]
|
||||
pub fn persistent_cache_piece_id(&self, task_id: &str, number: u32) -> String {
|
||||
self.metadata.piece_id(task_id, number)
|
||||
}
|
||||
|
@ -739,12 +751,12 @@ impl Storage {
|
|||
/// wait_for_piece_finished waits for the piece to be finished.
|
||||
#[instrument(skip_all)]
|
||||
async fn wait_for_piece_finished(&self, piece_id: &str) -> Result<metadata::Piece> {
|
||||
// Initialize the timeout of piece.
|
||||
let piece_timeout = tokio::time::sleep(self.config.download.piece_timeout);
|
||||
tokio::pin!(piece_timeout);
|
||||
// Total timeout for downloading a piece, combining the download time and the time to write to storage.
|
||||
let wait_timeout = tokio::time::sleep(
|
||||
self.config.download.piece_timeout + self.config.storage.write_piece_timeout,
|
||||
);
|
||||
tokio::pin!(wait_timeout);
|
||||
|
||||
// Initialize the interval of piece.
|
||||
let mut wait_for_piece_count = 0;
|
||||
let mut interval = tokio::time::interval(DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL);
|
||||
loop {
|
||||
tokio::select! {
|
||||
|
@ -758,13 +770,8 @@ impl Storage {
|
|||
debug!("wait piece finished success");
|
||||
return Ok(piece);
|
||||
}
|
||||
|
||||
if wait_for_piece_count > 0 {
|
||||
debug!("wait piece finished");
|
||||
}
|
||||
wait_for_piece_count += 1;
|
||||
}
|
||||
_ = &mut piece_timeout => {
|
||||
_ = &mut wait_timeout => {
|
||||
self.metadata.wait_for_piece_finished_failed(piece_id).unwrap_or_else(|err| error!("delete piece metadata failed: {}", err));
|
||||
return Err(Error::WaitForPieceFinishedTimeout(piece_id.to_string()));
|
||||
}
|
||||
|
@ -778,12 +785,12 @@ impl Storage {
|
|||
&self,
|
||||
piece_id: &str,
|
||||
) -> Result<metadata::Piece> {
|
||||
// Initialize the timeout of piece.
|
||||
let piece_timeout = tokio::time::sleep(self.config.download.piece_timeout);
|
||||
tokio::pin!(piece_timeout);
|
||||
// Total timeout for downloading a piece, combining the download time and the time to write to storage.
|
||||
let wait_timeout = tokio::time::sleep(
|
||||
self.config.download.piece_timeout + self.config.storage.write_piece_timeout,
|
||||
);
|
||||
tokio::pin!(wait_timeout);
|
||||
|
||||
// Initialize the interval of piece.
|
||||
let mut wait_for_piece_count = 0;
|
||||
let mut interval = tokio::time::interval(DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL);
|
||||
loop {
|
||||
tokio::select! {
|
||||
|
@ -797,13 +804,8 @@ impl Storage {
|
|||
debug!("wait piece finished success");
|
||||
return Ok(piece);
|
||||
}
|
||||
|
||||
if wait_for_piece_count > 0 {
|
||||
debug!("wait piece finished");
|
||||
}
|
||||
wait_for_piece_count += 1;
|
||||
}
|
||||
_ = &mut piece_timeout => {
|
||||
_ = &mut wait_timeout => {
|
||||
self.metadata.wait_for_piece_finished_failed(piece_id).unwrap_or_else(|err| error!("delete piece metadata failed: {}", err));
|
||||
return Err(Error::WaitForPieceFinishedTimeout(piece_id.to_string()));
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ use chrono::{NaiveDateTime, Utc};
|
|||
use dragonfly_client_config::dfdaemon::Config;
|
||||
use dragonfly_client_core::{Error, Result};
|
||||
use dragonfly_client_util::{digest, http::headermap_to_hashmap};
|
||||
use rayon::prelude::*;
|
||||
use reqwest::header::HeaderMap;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
@ -527,7 +526,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
.collect::<Result<Vec<Box<[u8]>>>>()?;
|
||||
|
||||
tasks
|
||||
.par_iter()
|
||||
.iter()
|
||||
.map(|task| Task::deserialize_from(task))
|
||||
.collect()
|
||||
}
|
||||
|
@ -841,7 +840,6 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
}
|
||||
|
||||
/// get_piece gets the piece metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_piece(&self, piece_id: &str) -> Result<Option<Piece>> {
|
||||
self.db.get(piece_id.as_bytes())
|
||||
}
|
||||
|
@ -853,6 +851,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
}
|
||||
|
||||
/// get_pieces gets the piece metadatas.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_pieces(&self, task_id: &str) -> Result<Vec<Piece>> {
|
||||
let pieces = self
|
||||
.db
|
||||
|
@ -864,7 +863,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
.collect::<Result<Vec<Box<[u8]>>>>()?;
|
||||
|
||||
pieces
|
||||
.par_iter()
|
||||
.iter()
|
||||
.map(|piece| Piece::deserialize_from(piece))
|
||||
.collect()
|
||||
}
|
||||
|
@ -889,7 +888,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
.collect::<Result<Vec<Box<[u8]>>>>()?;
|
||||
|
||||
let piece_ids_refs = piece_ids
|
||||
.par_iter()
|
||||
.iter()
|
||||
.map(|id| {
|
||||
let id_ref = id.as_ref();
|
||||
info!(
|
||||
|
@ -907,7 +906,6 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
|
||||
/// piece_id returns the piece id.
|
||||
#[inline]
|
||||
#[instrument(skip_all)]
|
||||
pub fn piece_id(&self, task_id: &str, number: u32) -> String {
|
||||
format!("{}-{}", task_id, number)
|
||||
}
|
||||
|
@ -940,7 +938,7 @@ impl Metadata<RocksdbStorageEngine> {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempdir::TempDir;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[test]
|
||||
fn test_calculate_digest() {
|
||||
|
@ -958,7 +956,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn should_create_metadata() {
|
||||
let dir = TempDir::new("metadata").unwrap();
|
||||
let dir = tempdir().unwrap();
|
||||
let log_dir = dir.path().join("log");
|
||||
let metadata = Metadata::new(Arc::new(Config::default()), dir.path(), &log_dir).unwrap();
|
||||
assert!(metadata.get_tasks().unwrap().is_empty());
|
||||
|
@ -970,7 +968,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_task_lifecycle() {
|
||||
let dir = TempDir::new("metadata").unwrap();
|
||||
let dir = tempdir().unwrap();
|
||||
let log_dir = dir.path().join("log");
|
||||
let metadata = Metadata::new(Arc::new(Config::default()), dir.path(), &log_dir).unwrap();
|
||||
let task_id = "d3c4e940ad06c47fc36ac67801e6f8e36cb400e2391708620bc7e865b102062c";
|
||||
|
@ -1030,7 +1028,7 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_piece_lifecycle() {
|
||||
let dir = TempDir::new("metadata").unwrap();
|
||||
let dir = tempdir().unwrap();
|
||||
let log_dir = dir.path().join("log");
|
||||
let metadata = Metadata::new(Arc::new(Config::default()), dir.path(), &log_dir).unwrap();
|
||||
let task_id = "d3c4e940ad06c47fc36ac67801e6f8e36cb400e2391708620bc7e865b102062c";
|
||||
|
|
|
@ -24,7 +24,7 @@ use std::{
|
|||
ops::Deref,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
use tracing::{info, instrument, warn};
|
||||
use tracing::{info, warn};
|
||||
|
||||
/// RocksdbStorageEngine is a storage engine based on rocksdb.
|
||||
pub struct RocksdbStorageEngine {
|
||||
|
@ -67,7 +67,6 @@ impl RocksdbStorageEngine {
|
|||
const DEFAULT_LOG_MAX_FILES: usize = 10;
|
||||
|
||||
/// open opens a rocksdb storage engine with the given directory and column families.
|
||||
#[instrument(skip_all)]
|
||||
pub fn open(dir: &Path, log_dir: &PathBuf, cf_names: &[&str], keep: bool) -> Result<Self> {
|
||||
info!("initializing metadata directory: {:?} {:?}", dir, cf_names);
|
||||
// Initialize rocksdb options.
|
||||
|
@ -135,7 +134,6 @@ impl RocksdbStorageEngine {
|
|||
/// RocksdbStorageEngine implements the storage engine operations.
|
||||
impl Operations for RocksdbStorageEngine {
|
||||
/// get gets the object by key.
|
||||
#[instrument(skip_all)]
|
||||
fn get<O: DatabaseObject>(&self, key: &[u8]) -> Result<Option<O>> {
|
||||
let cf = cf_handle::<O>(self)?;
|
||||
let value = self.get_cf(cf, key).or_err(ErrorType::StorageError)?;
|
||||
|
@ -146,7 +144,6 @@ impl Operations for RocksdbStorageEngine {
|
|||
}
|
||||
|
||||
/// is_exist checks if the object exists by key.
|
||||
#[instrument(skip_all)]
|
||||
fn is_exist<O: DatabaseObject>(&self, key: &[u8]) -> Result<bool> {
|
||||
let cf = cf_handle::<O>(self)?;
|
||||
Ok(self
|
||||
|
@ -156,7 +153,6 @@ impl Operations for RocksdbStorageEngine {
|
|||
}
|
||||
|
||||
/// put puts the object by key.
|
||||
#[instrument(skip_all)]
|
||||
fn put<O: DatabaseObject>(&self, key: &[u8], value: &O) -> Result<()> {
|
||||
let cf = cf_handle::<O>(self)?;
|
||||
self.put_cf(cf, key, value.serialized()?)
|
||||
|
@ -165,7 +161,6 @@ impl Operations for RocksdbStorageEngine {
|
|||
}
|
||||
|
||||
/// delete deletes the object by key.
|
||||
#[instrument(skip_all)]
|
||||
fn delete<O: DatabaseObject>(&self, key: &[u8]) -> Result<()> {
|
||||
let cf = cf_handle::<O>(self)?;
|
||||
let mut options = WriteOptions::default();
|
||||
|
@ -177,7 +172,6 @@ impl Operations for RocksdbStorageEngine {
|
|||
}
|
||||
|
||||
/// iter iterates all objects.
|
||||
#[instrument(skip_all)]
|
||||
fn iter<O: DatabaseObject>(&self) -> Result<impl Iterator<Item = Result<(Box<[u8]>, O)>>> {
|
||||
let cf = cf_handle::<O>(self)?;
|
||||
let iter = self.iterator_cf(cf, rocksdb::IteratorMode::Start);
|
||||
|
@ -188,7 +182,6 @@ impl Operations for RocksdbStorageEngine {
|
|||
}
|
||||
|
||||
/// iter_raw iterates all objects without serialization.
|
||||
#[instrument(skip_all)]
|
||||
fn iter_raw<O: DatabaseObject>(
|
||||
&self,
|
||||
) -> Result<impl Iterator<Item = Result<(Box<[u8]>, Box<[u8]>)>>> {
|
||||
|
@ -202,7 +195,6 @@ impl Operations for RocksdbStorageEngine {
|
|||
}
|
||||
|
||||
/// prefix_iter iterates all objects with prefix.
|
||||
#[instrument(skip_all)]
|
||||
fn prefix_iter<O: DatabaseObject>(
|
||||
&self,
|
||||
prefix: &[u8],
|
||||
|
@ -216,7 +208,6 @@ impl Operations for RocksdbStorageEngine {
|
|||
}
|
||||
|
||||
/// prefix_iter_raw iterates all objects with prefix without serialization.
|
||||
#[instrument(skip_all)]
|
||||
fn prefix_iter_raw<O: DatabaseObject>(
|
||||
&self,
|
||||
prefix: &[u8],
|
||||
|
@ -229,7 +220,6 @@ impl Operations for RocksdbStorageEngine {
|
|||
}
|
||||
|
||||
/// batch_delete deletes objects by keys.
|
||||
#[instrument(skip_all)]
|
||||
fn batch_delete<O: DatabaseObject>(&self, keys: Vec<&[u8]>) -> Result<()> {
|
||||
let cf = cf_handle::<O>(self)?;
|
||||
let mut batch = rocksdb::WriteBatch::default();
|
||||
|
@ -262,7 +252,7 @@ where
|
|||
mod tests {
|
||||
use super::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tempdir::TempDir;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
|
||||
struct Object {
|
||||
|
@ -275,7 +265,7 @@ mod tests {
|
|||
}
|
||||
|
||||
fn create_test_engine() -> RocksdbStorageEngine {
|
||||
let temp_dir = TempDir::new("rocksdb_test").unwrap();
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let log_dir = temp_dir.path().to_path_buf();
|
||||
RocksdbStorageEngine::open(temp_dir.path(), &log_dir, &[Object::NAMESPACE], false).unwrap()
|
||||
}
|
||||
|
|
|
@ -13,7 +13,6 @@ edition.workspace = true
|
|||
dragonfly-client-core.workspace = true
|
||||
dragonfly-api.workspace = true
|
||||
reqwest.workspace = true
|
||||
hyper.workspace = true
|
||||
http-range-header.workspace = true
|
||||
http.workspace = true
|
||||
tracing.workspace = true
|
||||
|
@ -24,12 +23,15 @@ rustls-pki-types.workspace = true
|
|||
rustls-pemfile.workspace = true
|
||||
sha2.workspace = true
|
||||
uuid.workspace = true
|
||||
sysinfo.workspace = true
|
||||
hex.workspace = true
|
||||
openssl.workspace = true
|
||||
crc32fast.workspace = true
|
||||
openssl.workspace = true
|
||||
lazy_static.workspace = true
|
||||
bytesize.workspace = true
|
||||
lru.workspace = true
|
||||
tokio.workspace = true
|
||||
rustix = { version = "1.0.8", features = ["fs"] }
|
||||
base64 = "0.22.1"
|
||||
pnet = "0.35.0"
|
||||
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
use dragonfly_client_core::Result as ClientResult;
|
||||
use dragonfly_client_core::{Error as ClientError, Result as ClientResult};
|
||||
use sha2::Digest as Sha2Digest;
|
||||
use std::fmt;
|
||||
use std::io::Read;
|
||||
use std::io::{self, Read};
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use tracing::instrument;
|
||||
|
@ -112,9 +112,36 @@ impl FromStr for Digest {
|
|||
}
|
||||
|
||||
let algorithm = match parts[0] {
|
||||
"crc32" => Algorithm::Crc32,
|
||||
"sha256" => Algorithm::Sha256,
|
||||
"sha512" => Algorithm::Sha512,
|
||||
"crc32" => {
|
||||
if parts[1].len() != 10 {
|
||||
return Err(format!(
|
||||
"invalid crc32 digest length: {}, expected 10",
|
||||
parts[1].len()
|
||||
));
|
||||
}
|
||||
|
||||
Algorithm::Crc32
|
||||
}
|
||||
"sha256" => {
|
||||
if parts[1].len() != 64 {
|
||||
return Err(format!(
|
||||
"invalid sha256 digest length: {}, expected 64",
|
||||
parts[1].len()
|
||||
));
|
||||
}
|
||||
|
||||
Algorithm::Sha256
|
||||
}
|
||||
"sha512" => {
|
||||
if parts[1].len() != 128 {
|
||||
return Err(format!(
|
||||
"invalid sha512 digest length: {}, expected 128",
|
||||
parts[1].len()
|
||||
));
|
||||
}
|
||||
|
||||
Algorithm::Sha512
|
||||
}
|
||||
_ => return Err(format!("invalid digest algorithm: {}", parts[0])),
|
||||
};
|
||||
|
||||
|
@ -126,35 +153,54 @@ impl FromStr for Digest {
|
|||
#[instrument(skip_all)]
|
||||
pub fn calculate_file_digest(algorithm: Algorithm, path: &Path) -> ClientResult<Digest> {
|
||||
let f = std::fs::File::open(path)?;
|
||||
let mut reader = std::io::BufReader::new(f);
|
||||
let mut reader = io::BufReader::new(f);
|
||||
match algorithm {
|
||||
Algorithm::Crc32 => {
|
||||
let mut buffer = [0; 4096];
|
||||
let mut hasher = crc32fast::Hasher::new();
|
||||
loop {
|
||||
let n = reader.read(&mut buffer)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
hasher.update(&buffer[..n]);
|
||||
match reader.read(&mut buffer) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => hasher.update(&buffer[..n]),
|
||||
Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
}
|
||||
|
||||
Ok(Digest::new(algorithm, hasher.finalize().to_string()))
|
||||
}
|
||||
Algorithm::Sha256 => {
|
||||
let mut hasher = sha2::Sha256::new();
|
||||
std::io::copy(&mut reader, &mut hasher)?;
|
||||
io::copy(&mut reader, &mut hasher)?;
|
||||
Ok(Digest::new(algorithm, hex::encode(hasher.finalize())))
|
||||
}
|
||||
Algorithm::Sha512 => {
|
||||
let mut hasher = sha2::Sha512::new();
|
||||
std::io::copy(&mut reader, &mut hasher)?;
|
||||
io::copy(&mut reader, &mut hasher)?;
|
||||
Ok(Digest::new(algorithm, hex::encode(hasher.finalize())))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// verify_file_digest verifies the digest of a file against an expected digest.
|
||||
pub fn verify_file_digest(expected_digest: Digest, file_path: &Path) -> ClientResult<()> {
|
||||
let digest = match calculate_file_digest(expected_digest.algorithm(), file_path) {
|
||||
Ok(digest) => digest,
|
||||
Err(err) => {
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
if digest.to_string() != expected_digest.to_string() {
|
||||
return Err(ClientError::DigestMismatch(
|
||||
expected_digest.to_string(),
|
||||
digest.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -202,7 +248,31 @@ mod tests {
|
|||
|
||||
let expected_crc32 = "1475635037";
|
||||
let digest =
|
||||
calculate_file_digest(Algorithm::Crc32, path).expect("failed to calculate Sha512 hash");
|
||||
calculate_file_digest(Algorithm::Crc32, path).expect("failed to calculate Crc32 hash");
|
||||
assert_eq!(digest.encoded(), expected_crc32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_file_digest() {
|
||||
let content = b"test content";
|
||||
let temp_file = tempfile::NamedTempFile::new().expect("failed to create temp file");
|
||||
let path = temp_file.path();
|
||||
let mut file = File::create(path).expect("failed to create file");
|
||||
file.write_all(content).expect("failed to write to file");
|
||||
|
||||
let expected_sha256_digest = Digest::new(
|
||||
Algorithm::Sha256,
|
||||
"6ae8a75555209fd6c44157c0aed8016e763ff435a19cf186f76863140143ff72".to_string(),
|
||||
);
|
||||
assert!(verify_file_digest(expected_sha256_digest, path).is_ok());
|
||||
|
||||
let expected_sha512_digest = Digest::new(
|
||||
Algorithm::Sha512,
|
||||
"0cbf4caef38047bba9a24e621a961484e5d2a92176a859e7eb27df343dd34eb98d538a6c5f4da1ce302ec250b821cc001e46cc97a704988297185a4df7e99602".to_string(),
|
||||
);
|
||||
assert!(verify_file_digest(expected_sha512_digest, path).is_ok());
|
||||
|
||||
let expected_crc32_digest = Digest::new(Algorithm::Crc32, "1475635037".to_string());
|
||||
assert!(verify_file_digest(expected_crc32_digest, path).is_ok());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Copyright 2025 The Dragonfly Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
use dragonfly_client_core::Result;
|
||||
use tokio::fs;
|
||||
|
||||
/// fallocate allocates the space for the file and fills it with zero, only on Linux.
|
||||
#[allow(unused_variables)]
|
||||
pub async fn fallocate(f: &fs::File, length: u64) -> Result<()> {
|
||||
// No allocation needed for zero length. Avoids potential fallocate errors.
|
||||
if length == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
use dragonfly_client_core::Error;
|
||||
use rustix::fs::{fallocate, FallocateFlags};
|
||||
use std::os::unix::io::AsFd;
|
||||
use tokio::io;
|
||||
|
||||
// Set length (potential truncation).
|
||||
f.set_len(length).await?;
|
||||
let fd = f.as_fd();
|
||||
let offset = 0;
|
||||
let flags = FallocateFlags::KEEP_SIZE;
|
||||
|
||||
loop {
|
||||
match fallocate(fd, flags, offset, length) {
|
||||
Ok(_) => return Ok(()),
|
||||
Err(rustix::io::Errno::INTR) => continue,
|
||||
Err(err) => {
|
||||
return Err(Error::IO(io::Error::from_raw_os_error(err.raw_os_error())))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
Ok(())
|
||||
}
|
|
@ -20,7 +20,6 @@ use dragonfly_client_core::{
|
|||
Error, Result,
|
||||
};
|
||||
use http::header::{self, HeaderMap};
|
||||
use tracing::instrument;
|
||||
|
||||
/// Credentials is the credentials for the basic auth.
|
||||
pub struct Credentials {
|
||||
|
@ -34,7 +33,6 @@ pub struct Credentials {
|
|||
/// Credentials is the basic auth.
|
||||
impl Credentials {
|
||||
/// new returns a new Credentials.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(username: &str, password: &str) -> Credentials {
|
||||
Self {
|
||||
username: username.to_string(),
|
||||
|
|
|
@ -21,12 +21,10 @@ use dragonfly_client_core::{
|
|||
};
|
||||
use reqwest::header::{HeaderMap, HeaderName, HeaderValue};
|
||||
use std::collections::HashMap;
|
||||
use tracing::instrument;
|
||||
|
||||
pub mod basic_auth;
|
||||
|
||||
/// headermap_to_hashmap converts a headermap to a hashmap.
|
||||
#[instrument(skip_all)]
|
||||
pub fn headermap_to_hashmap(header: &HeaderMap<HeaderValue>) -> HashMap<String, String> {
|
||||
let mut hashmap: HashMap<String, String> = HashMap::with_capacity(header.len());
|
||||
for (k, v) in header {
|
||||
|
@ -39,7 +37,6 @@ pub fn headermap_to_hashmap(header: &HeaderMap<HeaderValue>) -> HashMap<String,
|
|||
}
|
||||
|
||||
/// hashmap_to_headermap converts a hashmap to a headermap.
|
||||
#[instrument(skip_all)]
|
||||
pub fn hashmap_to_headermap(header: &HashMap<String, String>) -> Result<HeaderMap<HeaderValue>> {
|
||||
let mut headermap = HeaderMap::with_capacity(header.len());
|
||||
for (k, v) in header {
|
||||
|
@ -52,7 +49,6 @@ pub fn hashmap_to_headermap(header: &HashMap<String, String>) -> Result<HeaderMa
|
|||
}
|
||||
|
||||
/// header_vec_to_hashmap converts a vector of header string to a hashmap.
|
||||
#[instrument(skip_all)]
|
||||
pub fn header_vec_to_hashmap(raw_header: Vec<String>) -> Result<HashMap<String, String>> {
|
||||
let mut header = HashMap::with_capacity(raw_header.len());
|
||||
for h in raw_header {
|
||||
|
@ -65,13 +61,11 @@ pub fn header_vec_to_hashmap(raw_header: Vec<String>) -> Result<HashMap<String,
|
|||
}
|
||||
|
||||
/// header_vec_to_headermap converts a vector of header string to a reqwest headermap.
|
||||
#[instrument(skip_all)]
|
||||
pub fn header_vec_to_headermap(raw_header: Vec<String>) -> Result<HeaderMap> {
|
||||
hashmap_to_headermap(&header_vec_to_hashmap(raw_header)?)
|
||||
}
|
||||
|
||||
/// get_range gets the range from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_range(header: &HeaderMap, content_length: u64) -> Result<Option<Range>> {
|
||||
match header.get(reqwest::header::RANGE) {
|
||||
Some(range) => {
|
||||
|
@ -85,7 +79,6 @@ pub fn get_range(header: &HeaderMap, content_length: u64) -> Result<Option<Range
|
|||
/// parse_range_header parses a Range header string as per RFC 7233,
|
||||
/// supported Range Header: "Range": "bytes=100-200", "Range": "bytes=-50",
|
||||
/// "Range": "bytes=150-", "Range": "bytes=0-0,-1".
|
||||
#[instrument(skip_all)]
|
||||
pub fn parse_range_header(range_header_value: &str, content_length: u64) -> Result<Range> {
|
||||
let parsed_ranges =
|
||||
http_range_header::parse_range_header(range_header_value).or_err(ErrorType::ParseError)?;
|
||||
|
|
|
@ -20,9 +20,8 @@ use dragonfly_client_core::{
|
|||
Result,
|
||||
};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::io::Read;
|
||||
use std::io::{self, Read};
|
||||
use std::path::PathBuf;
|
||||
use tracing::instrument;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
|
@ -32,6 +31,34 @@ const SEED_PEER_SUFFIX: &str = "seed";
|
|||
/// PERSISTENT_CACHE_TASK_SUFFIX is the suffix of the persistent cache task.
|
||||
const PERSISTENT_CACHE_TASK_SUFFIX: &str = "persistent-cache-task";
|
||||
|
||||
/// TaskIDParameter is the parameter of the task id.
|
||||
pub enum TaskIDParameter {
|
||||
/// Content uses the content to generate the task id.
|
||||
Content(String),
|
||||
/// URLBased uses the url, piece_length, tag, application and filtered_query_params to generate
|
||||
/// the task id.
|
||||
URLBased {
|
||||
url: String,
|
||||
piece_length: Option<u64>,
|
||||
tag: Option<String>,
|
||||
application: Option<String>,
|
||||
filtered_query_params: Vec<String>,
|
||||
},
|
||||
}
|
||||
|
||||
/// PersistentCacheTaskIDParameter is the parameter of the persistent cache task id.
|
||||
pub enum PersistentCacheTaskIDParameter {
|
||||
/// Content uses the content to generate the persistent cache task id.
|
||||
Content(String),
|
||||
/// FileContentBased uses the file path, piece_length, tag and application to generate the persistent cache task id.
|
||||
FileContentBased {
|
||||
path: PathBuf,
|
||||
piece_length: Option<u64>,
|
||||
tag: Option<String>,
|
||||
application: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
/// IDGenerator is used to generate the id for the resources.
|
||||
#[derive(Debug)]
|
||||
pub struct IDGenerator {
|
||||
|
@ -48,7 +75,6 @@ pub struct IDGenerator {
|
|||
/// IDGenerator implements the IDGenerator.
|
||||
impl IDGenerator {
|
||||
/// new creates a new IDGenerator.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(ip: String, hostname: String, is_seed_peer: bool) -> Self {
|
||||
IDGenerator {
|
||||
ip,
|
||||
|
@ -59,7 +85,6 @@ impl IDGenerator {
|
|||
|
||||
/// host_id generates the host id.
|
||||
#[inline]
|
||||
#[instrument(skip_all)]
|
||||
pub fn host_id(&self) -> String {
|
||||
if self.is_seed_peer {
|
||||
return format!("{}-{}-{}", self.ip, self.hostname, "seed");
|
||||
|
@ -70,106 +95,124 @@ impl IDGenerator {
|
|||
|
||||
/// task_id generates the task id.
|
||||
#[inline]
|
||||
#[instrument(skip_all)]
|
||||
pub fn task_id(
|
||||
&self,
|
||||
url: &str,
|
||||
piece_length: Option<u64>,
|
||||
tag: Option<&str>,
|
||||
application: Option<&str>,
|
||||
filtered_query_params: Vec<String>,
|
||||
) -> Result<String> {
|
||||
// Filter the query parameters.
|
||||
let url = Url::parse(url).or_err(ErrorType::ParseError)?;
|
||||
let query = url
|
||||
.query_pairs()
|
||||
.filter(|(k, _)| !filtered_query_params.contains(&k.to_string()));
|
||||
pub fn task_id(&self, parameter: TaskIDParameter) -> Result<String> {
|
||||
match parameter {
|
||||
TaskIDParameter::Content(content) => {
|
||||
Ok(hex::encode(Sha256::digest(content.as_bytes())))
|
||||
}
|
||||
TaskIDParameter::URLBased {
|
||||
url,
|
||||
piece_length,
|
||||
tag,
|
||||
application,
|
||||
filtered_query_params,
|
||||
} => {
|
||||
// Filter the query parameters.
|
||||
let url = Url::parse(url.as_str()).or_err(ErrorType::ParseError)?;
|
||||
let query = url
|
||||
.query_pairs()
|
||||
.filter(|(k, _)| !filtered_query_params.contains(&k.to_string()));
|
||||
|
||||
let mut artifact_url = url.clone();
|
||||
if query.clone().count() == 0 {
|
||||
artifact_url.set_query(None);
|
||||
} else {
|
||||
artifact_url.query_pairs_mut().clear().extend_pairs(query);
|
||||
let mut artifact_url = url.clone();
|
||||
if query.clone().count() == 0 {
|
||||
artifact_url.set_query(None);
|
||||
} else {
|
||||
artifact_url.query_pairs_mut().clear().extend_pairs(query);
|
||||
}
|
||||
|
||||
let artifact_url_str = artifact_url.to_string();
|
||||
let final_url = if artifact_url_str.ends_with('/') && artifact_url.path() == "/" {
|
||||
artifact_url_str.trim_end_matches('/').to_string()
|
||||
} else {
|
||||
artifact_url_str
|
||||
};
|
||||
|
||||
// Initialize the hasher.
|
||||
let mut hasher = Sha256::new();
|
||||
|
||||
// Add the url to generate the task id.
|
||||
hasher.update(final_url);
|
||||
|
||||
// Add the tag to generate the task id.
|
||||
if let Some(tag) = tag {
|
||||
hasher.update(tag);
|
||||
}
|
||||
|
||||
// Add the application to generate the task id.
|
||||
if let Some(application) = application {
|
||||
hasher.update(application);
|
||||
}
|
||||
|
||||
// Add the piece length to generate the task id.
|
||||
if let Some(piece_length) = piece_length {
|
||||
hasher.update(piece_length.to_string());
|
||||
}
|
||||
|
||||
hasher.update(TaskType::Standard.as_str_name().as_bytes());
|
||||
|
||||
// Generate the task id.
|
||||
Ok(hex::encode(hasher.finalize()))
|
||||
}
|
||||
}
|
||||
|
||||
let artifact_url_str = artifact_url.to_string();
|
||||
let final_url = if artifact_url_str.ends_with('/') && artifact_url.path() == "/" {
|
||||
artifact_url_str.trim_end_matches('/').to_string()
|
||||
} else {
|
||||
artifact_url_str
|
||||
};
|
||||
|
||||
// Initialize the hasher.
|
||||
let mut hasher = Sha256::new();
|
||||
|
||||
// Add the url to generate the task id.
|
||||
hasher.update(final_url);
|
||||
|
||||
// Add the tag to generate the task id.
|
||||
if let Some(tag) = tag {
|
||||
hasher.update(tag);
|
||||
}
|
||||
|
||||
// Add the application to generate the task id.
|
||||
if let Some(application) = application {
|
||||
hasher.update(application);
|
||||
}
|
||||
|
||||
// Add the piece length to generate the task id.
|
||||
if let Some(piece_length) = piece_length {
|
||||
hasher.update(piece_length.to_string());
|
||||
}
|
||||
|
||||
// Generate the task id.
|
||||
Ok(hex::encode(hasher.finalize()))
|
||||
}
|
||||
|
||||
/// persistent_cache_task_id generates the persistent cache task id.
|
||||
#[inline]
|
||||
#[instrument(skip_all)]
|
||||
pub fn persistent_cache_task_id(
|
||||
&self,
|
||||
path: &PathBuf,
|
||||
piece_length: Option<u64>,
|
||||
tag: Option<&str>,
|
||||
application: Option<&str>,
|
||||
parameter: PersistentCacheTaskIDParameter,
|
||||
) -> Result<String> {
|
||||
// Calculate the hash of the file.
|
||||
let f = std::fs::File::open(path)?;
|
||||
let mut buffer = [0; 4096];
|
||||
let mut reader = std::io::BufReader::with_capacity(buffer.len(), f);
|
||||
let mut hasher = crc32fast::Hasher::new();
|
||||
loop {
|
||||
let n = reader.read(&mut buffer)?;
|
||||
if n == 0 {
|
||||
break;
|
||||
|
||||
match parameter {
|
||||
PersistentCacheTaskIDParameter::Content(content) => {
|
||||
hasher.update(content.as_bytes());
|
||||
Ok(hasher.finalize().to_string())
|
||||
}
|
||||
PersistentCacheTaskIDParameter::FileContentBased {
|
||||
path,
|
||||
piece_length,
|
||||
tag,
|
||||
application,
|
||||
} => {
|
||||
// Calculate the hash of the file.
|
||||
let f = std::fs::File::open(path)?;
|
||||
let mut buffer = [0; 4096];
|
||||
let mut reader = io::BufReader::with_capacity(buffer.len(), f);
|
||||
loop {
|
||||
match reader.read(&mut buffer) {
|
||||
Ok(0) => break,
|
||||
Ok(n) => hasher.update(&buffer[..n]),
|
||||
Err(ref err) if err.kind() == io::ErrorKind::Interrupted => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
}
|
||||
|
||||
hasher.update(&buffer[..n]);
|
||||
// Add the tag to generate the persistent cache task id.
|
||||
if let Some(tag) = tag {
|
||||
hasher.update(tag.as_bytes());
|
||||
}
|
||||
|
||||
// Add the application to generate the persistent cache task id.
|
||||
if let Some(application) = application {
|
||||
hasher.update(application.as_bytes());
|
||||
}
|
||||
|
||||
// Add the piece length to generate the persistent cache task id.
|
||||
if let Some(piece_length) = piece_length {
|
||||
hasher.update(piece_length.to_string().as_bytes());
|
||||
}
|
||||
|
||||
hasher.update(TaskType::PersistentCache.as_str_name().as_bytes());
|
||||
|
||||
// Generate the task id by crc32.
|
||||
Ok(hasher.finalize().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
// Add the tag to generate the persistent cache task id.
|
||||
if let Some(tag) = tag {
|
||||
hasher.update(tag.as_bytes());
|
||||
}
|
||||
|
||||
// Add the application to generate the persistent cache task id.
|
||||
if let Some(application) = application {
|
||||
hasher.update(application.as_bytes());
|
||||
}
|
||||
|
||||
// Add the piece length to generate the persistent cache task id.
|
||||
if let Some(piece_length) = piece_length {
|
||||
hasher.update(piece_length.to_string().as_bytes());
|
||||
}
|
||||
|
||||
// Generate the task id by crc32.
|
||||
Ok(hasher.finalize().to_string())
|
||||
}
|
||||
|
||||
/// peer_id generates the peer id.
|
||||
#[inline]
|
||||
#[instrument(skip_all)]
|
||||
pub fn peer_id(&self) -> String {
|
||||
if self.is_seed_peer {
|
||||
return format!(
|
||||
|
@ -185,7 +228,6 @@ impl IDGenerator {
|
|||
}
|
||||
|
||||
/// task_type generates the task type by the task id.
|
||||
#[instrument(skip_all)]
|
||||
pub fn task_type(&self, id: &str) -> TaskType {
|
||||
if id.ends_with(PERSISTENT_CACHE_TASK_SUFFIX) {
|
||||
return TaskType::PersistentCache;
|
||||
|
@ -225,116 +267,140 @@ mod tests {
|
|||
let test_cases = vec![
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
"https://example.com",
|
||||
Some(1024_u64),
|
||||
Some("foo"),
|
||||
Some("bar"),
|
||||
vec![],
|
||||
"99a47b38e9d3321aebebd715bea0483c1400cef2f767f84d97458f9dcedff221",
|
||||
TaskIDParameter::URLBased {
|
||||
url: "https://example.com".to_string(),
|
||||
piece_length: Some(1024_u64),
|
||||
tag: Some("foo".to_string()),
|
||||
application: Some("bar".to_string()),
|
||||
filtered_query_params: vec![],
|
||||
},
|
||||
"27554d06dfc788c2c2c60e01960152ffbd4b145fc103fcb80b432b4dc238a6fe",
|
||||
),
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
"https://example.com",
|
||||
None,
|
||||
Some("foo"),
|
||||
Some("bar"),
|
||||
vec![],
|
||||
"160fa7f001d9d2e893130894fbb60a5fb006e1d61bff82955f2946582bc9de1d",
|
||||
TaskIDParameter::URLBased {
|
||||
url: "https://example.com".to_string(),
|
||||
piece_length: None,
|
||||
tag: Some("foo".to_string()),
|
||||
application: Some("bar".to_string()),
|
||||
filtered_query_params: vec![],
|
||||
},
|
||||
"06408fbf247ddaca478f8cb9565fe5591c28efd0994b8fea80a6a87d3203c5ca",
|
||||
),
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
"https://example.com",
|
||||
None,
|
||||
Some("foo"),
|
||||
None,
|
||||
vec![],
|
||||
"2773851c628744fb7933003195db436ce397c1722920696c4274ff804d86920b",
|
||||
TaskIDParameter::URLBased {
|
||||
url: "https://example.com".to_string(),
|
||||
piece_length: None,
|
||||
tag: Some("foo".to_string()),
|
||||
application: None,
|
||||
filtered_query_params: vec![],
|
||||
},
|
||||
"3c3f230ef9f191dd2821510346a7bc138e4894bee9aee184ba250a3040701d2a",
|
||||
),
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
"https://example.com",
|
||||
None,
|
||||
None,
|
||||
Some("bar"),
|
||||
vec![],
|
||||
"63dee2822037636b0109876b58e95692233840753a882afa69b9b5ee82a6c57d",
|
||||
TaskIDParameter::URLBased {
|
||||
url: "https://example.com".to_string(),
|
||||
piece_length: None,
|
||||
tag: None,
|
||||
application: Some("bar".to_string()),
|
||||
filtered_query_params: vec![],
|
||||
},
|
||||
"c9f9261b7305c24371244f9f149f5d4589ed601348fdf22d7f6f4b10658fdba2",
|
||||
),
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
"https://example.com",
|
||||
Some(1024_u64),
|
||||
None,
|
||||
None,
|
||||
vec![],
|
||||
"40c21de3ad2f1470ca1a19a2ad2577803a1829851f6cf862ffa2d4577ae51d38",
|
||||
TaskIDParameter::URLBased {
|
||||
url: "https://example.com".to_string(),
|
||||
piece_length: Some(1024_u64),
|
||||
tag: None,
|
||||
application: None,
|
||||
filtered_query_params: vec![],
|
||||
},
|
||||
"9f7c9aafbc6f30f8f41a96ca77eeae80c5b60964b3034b0ee43ccf7b2f9e52b8",
|
||||
),
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
"https://example.com?foo=foo&bar=bar",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
vec!["foo".to_string(), "bar".to_string()],
|
||||
"100680ad546ce6a577f42f52df33b4cfdca756859e664b8d7de329b150d09ce9",
|
||||
TaskIDParameter::URLBased {
|
||||
url: "https://example.com?foo=foo&bar=bar".to_string(),
|
||||
piece_length: None,
|
||||
tag: None,
|
||||
application: None,
|
||||
filtered_query_params: vec!["foo".to_string(), "bar".to_string()],
|
||||
},
|
||||
"457b4328cde278e422c9e243f7bfd1e97f511fec43a80f535cf6b0ef6b086776",
|
||||
),
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
TaskIDParameter::Content("This is a test file".to_string()),
|
||||
"e2d0fe1585a63ec6009c8016ff8dda8b17719a637405a4e23c0ff81339148249",
|
||||
),
|
||||
];
|
||||
|
||||
for (generator, url, piece_length, tag, application, filtered_query_params, expected_id) in
|
||||
test_cases
|
||||
{
|
||||
let task_id = generator
|
||||
.task_id(url, piece_length, tag, application, filtered_query_params)
|
||||
.unwrap();
|
||||
for (generator, parameter, expected_id) in test_cases {
|
||||
let task_id = generator.task_id(parameter).unwrap();
|
||||
assert_eq!(task_id, expected_id);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_generate_persistent_cache_task_id() {
|
||||
let dir = tempdir().unwrap();
|
||||
let file_path = dir.path().join("testfile");
|
||||
let mut f = File::create(&file_path).unwrap();
|
||||
f.write_all("This is a test file".as_bytes()).unwrap();
|
||||
|
||||
let test_cases = vec![
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
"This is a test file",
|
||||
Some(1024_u64),
|
||||
Some("tag1"),
|
||||
Some("app1"),
|
||||
"223755482",
|
||||
PersistentCacheTaskIDParameter::FileContentBased {
|
||||
path: file_path.clone(),
|
||||
piece_length: Some(1024_u64),
|
||||
tag: Some("tag1".to_string()),
|
||||
application: Some("app1".to_string()),
|
||||
},
|
||||
"3490958009",
|
||||
),
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
"This is a test file",
|
||||
None,
|
||||
None,
|
||||
Some("app1"),
|
||||
"1152081721",
|
||||
PersistentCacheTaskIDParameter::FileContentBased {
|
||||
path: file_path.clone(),
|
||||
piece_length: None,
|
||||
tag: None,
|
||||
application: Some("app1".to_string()),
|
||||
},
|
||||
"735741469",
|
||||
),
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
"This is a test file",
|
||||
None,
|
||||
Some("tag1"),
|
||||
None,
|
||||
"990623045",
|
||||
PersistentCacheTaskIDParameter::FileContentBased {
|
||||
path: file_path.clone(),
|
||||
piece_length: None,
|
||||
tag: Some("tag1".to_string()),
|
||||
application: None,
|
||||
},
|
||||
"3954905097",
|
||||
),
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
"This is a test file",
|
||||
Some(1024_u64),
|
||||
None,
|
||||
None,
|
||||
"1293485139",
|
||||
PersistentCacheTaskIDParameter::FileContentBased {
|
||||
path: file_path.clone(),
|
||||
piece_length: Some(1024_u64),
|
||||
tag: None,
|
||||
application: None,
|
||||
},
|
||||
"4162557545",
|
||||
),
|
||||
(
|
||||
IDGenerator::new("127.0.0.1".to_string(), "localhost".to_string(), false),
|
||||
PersistentCacheTaskIDParameter::Content("This is a test file".to_string()),
|
||||
"107352521",
|
||||
),
|
||||
];
|
||||
|
||||
for (generator, file_content, piece_length, tag, application, expected_id) in test_cases {
|
||||
let dir = tempdir().unwrap();
|
||||
let file_path = dir.path().join("testfile");
|
||||
let mut f = File::create(&file_path).unwrap();
|
||||
f.write_all(file_content.as_bytes()).unwrap();
|
||||
|
||||
let task_id = generator
|
||||
.persistent_cache_task_id(&file_path, piece_length, tag, application)
|
||||
.unwrap();
|
||||
for (generator, parameter, expected_id) in test_cases {
|
||||
let task_id = generator.persistent_cache_task_id(parameter).unwrap();
|
||||
assert_eq!(task_id, expected_id);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
|
||||
pub mod digest;
|
||||
pub mod fs;
|
||||
pub mod http;
|
||||
pub mod id_generator;
|
||||
pub mod net;
|
||||
|
|
|
@ -14,48 +14,15 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
use bytesize::{ByteSize, MB};
|
||||
use bytesize::ByteSize;
|
||||
use pnet::datalink::{self, NetworkInterface};
|
||||
use std::cmp::min;
|
||||
use std::net::IpAddr;
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
use tracing::warn;
|
||||
|
||||
/// get_interface_by_ip returns the name of the network interface that has the specified IP
|
||||
/// address.
|
||||
pub fn get_interface_by_ip(ip: IpAddr) -> Option<NetworkInterface> {
|
||||
for interface in datalink::interfaces() {
|
||||
for ip_network in interface.ips.iter() {
|
||||
if ip_network.ip() == ip {
|
||||
return Some(interface);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// get_interface_speed_by_ip returns the speed of the network interface that has the specified IP
|
||||
/// address in Mbps.
|
||||
pub fn get_interface_speed(interface_name: &str) -> Option<u64> {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let speed_path = format!("/sys/class/net/{}/speed", interface_name);
|
||||
std::fs::read_to_string(&speed_path)
|
||||
.ok()
|
||||
.and_then(|speed_str| speed_str.trim().parse::<u64>().ok())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
warn!(
|
||||
"can not get interface {} speed on non-linux platform",
|
||||
interface_name
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use sysinfo::Networks;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{info, warn};
|
||||
|
||||
/// Interface represents a network interface with its information.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
|
@ -63,23 +30,201 @@ pub struct Interface {
|
|||
/// name is the name of the network interface.
|
||||
pub name: String,
|
||||
|
||||
// bandwidth is the bandwidth of the network interface in Mbps.
|
||||
/// bandwidth is the bandwidth of the network interface in bps.
|
||||
pub bandwidth: u64,
|
||||
|
||||
// network_data_mutex is a mutex to protect access to network data.
|
||||
network_data_mutex: Arc<Mutex<()>>,
|
||||
}
|
||||
|
||||
/// get_interface_info returns the network interface information for the specified IP address.
|
||||
pub fn get_interface_info(ip: IpAddr, rate_limit: ByteSize) -> Option<Interface> {
|
||||
let rate_limit = rate_limit.as_u64() / MB * 8; // convert to Mbps
|
||||
/// NetworkData represents the network data for a specific interface,
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct NetworkData {
|
||||
/// max_rx_bandwidth is the maximum receive bandwidth of the interface in bps.
|
||||
pub max_rx_bandwidth: u64,
|
||||
|
||||
let interface = get_interface_by_ip(ip)?;
|
||||
match get_interface_speed(&interface.name) {
|
||||
Some(speed) => Some(Interface {
|
||||
name: interface.name,
|
||||
bandwidth: min(speed, rate_limit),
|
||||
}),
|
||||
None => Some(Interface {
|
||||
name: interface.name,
|
||||
bandwidth: rate_limit,
|
||||
}),
|
||||
/// rx_bandwidth is the current receive bandwidth of the interface in bps.
|
||||
pub rx_bandwidth: Option<u64>,
|
||||
|
||||
/// max_tx_bandwidth is the maximum transmit bandwidth of the interface in bps.
|
||||
pub max_tx_bandwidth: u64,
|
||||
|
||||
/// tx_bandwidth is the current transmit bandwidth of the interface in bps.
|
||||
pub tx_bandwidth: Option<u64>,
|
||||
}
|
||||
|
||||
/// Interface methods provide functionality to get network interface information.
|
||||
impl Interface {
|
||||
/// DEFAULT_NETWORKS_REFRESH_INTERVAL is the default interval for refreshing network data.
|
||||
const DEFAULT_NETWORKS_REFRESH_INTERVAL: Duration = Duration::from_secs(2);
|
||||
|
||||
/// new creates a new Interface instance based on the provided IP address and rate limit.
|
||||
pub fn new(ip: IpAddr, rate_limit: ByteSize) -> Interface {
|
||||
let rate_limit = Self::byte_size_to_bits(rate_limit); // convert to bps
|
||||
let Some(interface) = Self::get_network_interface_by_ip(ip) else {
|
||||
warn!(
|
||||
"can not find interface for IP address {}, network interface unknown with bandwidth {} bps",
|
||||
ip, rate_limit
|
||||
);
|
||||
return Interface {
|
||||
name: "unknown".to_string(),
|
||||
bandwidth: rate_limit,
|
||||
network_data_mutex: Arc::new(Mutex::new(())),
|
||||
};
|
||||
};
|
||||
|
||||
match Self::get_speed(&interface.name) {
|
||||
Some(speed) => {
|
||||
let bandwidth = min(Self::megabits_to_bits(speed), rate_limit);
|
||||
info!(
|
||||
"network interface {} with bandwidth {} bps",
|
||||
interface.name, bandwidth
|
||||
);
|
||||
|
||||
Interface {
|
||||
name: interface.name,
|
||||
bandwidth,
|
||||
network_data_mutex: Arc::new(Mutex::new(())),
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!(
|
||||
"can not get speed, network interface {} with bandwidth {} bps",
|
||||
interface.name, rate_limit
|
||||
);
|
||||
|
||||
Interface {
|
||||
name: interface.name,
|
||||
bandwidth: rate_limit,
|
||||
network_data_mutex: Arc::new(Mutex::new(())),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// get_network_data retrieves the network data for the interface.
|
||||
pub async fn get_network_data(&self) -> NetworkData {
|
||||
// Lock the mutex to ensure exclusive access to network data.
|
||||
let _guard = self.network_data_mutex.lock().await;
|
||||
|
||||
// Initialize sysinfo network.
|
||||
let mut networks = Networks::new_with_refreshed_list();
|
||||
|
||||
// Sleep to calculate the network traffic difference over
|
||||
// the DEFAULT_NETWORKS_REFRESH_INTERVAL.
|
||||
tokio::time::sleep(Self::DEFAULT_NETWORKS_REFRESH_INTERVAL).await;
|
||||
|
||||
// Refresh network information.
|
||||
networks.refresh();
|
||||
let Some(network_data) = networks.get(self.name.as_str()) else {
|
||||
warn!("can not find network data for interface {}", self.name);
|
||||
return NetworkData {
|
||||
max_rx_bandwidth: self.bandwidth,
|
||||
max_tx_bandwidth: self.bandwidth,
|
||||
..Default::default()
|
||||
};
|
||||
};
|
||||
|
||||
// Calculate the receive and transmit bandwidth in bits per second.
|
||||
let rx_bandwidth = (Self::bytes_to_bits(network_data.received()) as f64
|
||||
/ Self::DEFAULT_NETWORKS_REFRESH_INTERVAL.as_secs_f64())
|
||||
.round() as u64;
|
||||
|
||||
// Calculate the transmit bandwidth in bits per second.
|
||||
let tx_bandwidth = (Self::bytes_to_bits(network_data.transmitted()) as f64
|
||||
/ Self::DEFAULT_NETWORKS_REFRESH_INTERVAL.as_secs_f64())
|
||||
.round() as u64;
|
||||
|
||||
NetworkData {
|
||||
max_rx_bandwidth: self.bandwidth,
|
||||
rx_bandwidth: Some(rx_bandwidth),
|
||||
max_tx_bandwidth: self.bandwidth,
|
||||
tx_bandwidth: Some(tx_bandwidth),
|
||||
}
|
||||
}
|
||||
|
||||
/// get_speed returns the speed of the network interface in Mbps.
|
||||
pub fn get_speed(name: &str) -> Option<u64> {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let speed_path = format!("/sys/class/net/{}/speed", name);
|
||||
std::fs::read_to_string(&speed_path)
|
||||
.ok()
|
||||
.and_then(|speed_str| speed_str.trim().parse::<u64>().ok())
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
warn!("can not get interface {} speed on non-linux platform", name);
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// get_network_interface_by_ip returns the network interface that has the specified
|
||||
/// IP address.
|
||||
pub fn get_network_interface_by_ip(ip: IpAddr) -> Option<NetworkInterface> {
|
||||
datalink::interfaces()
|
||||
.into_iter()
|
||||
.find(|interface| interface.ips.iter().any(|ip_net| ip_net.ip() == ip))
|
||||
}
|
||||
|
||||
/// byte_size_to_bits converts a ByteSize to bits.
|
||||
pub fn byte_size_to_bits(size: ByteSize) -> u64 {
|
||||
size.as_u64() * 8
|
||||
}
|
||||
|
||||
/// megabits_to_bit converts megabits to bits.
|
||||
pub fn megabits_to_bits(size: u64) -> u64 {
|
||||
size * 1_000_000 // 1 Mbit = 1,000,000 bits
|
||||
}
|
||||
|
||||
/// bytes_to_bits converts bytes to bits.
|
||||
pub fn bytes_to_bits(size: u64) -> u64 {
|
||||
size * 8 // 1 byte = 8 bits
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bytesize::ByteSize;
|
||||
|
||||
#[test]
|
||||
fn test_byte_size_to_bits() {
|
||||
let test_cases = vec![
|
||||
(ByteSize::kb(1), 8_000u64),
|
||||
(ByteSize::mb(1), 8_000_000u64),
|
||||
(ByteSize::gb(1), 8_000_000_000u64),
|
||||
(ByteSize::b(0), 0u64),
|
||||
];
|
||||
|
||||
for (input, expected) in test_cases {
|
||||
let result = Interface::byte_size_to_bits(input);
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_megabits_to_bits() {
|
||||
let test_cases = vec![
|
||||
(1u64, 1_000_000u64),
|
||||
(1000u64, 1_000_000_000u64),
|
||||
(0u64, 0u64),
|
||||
];
|
||||
|
||||
for (input, expected) in test_cases {
|
||||
let result = Interface::megabits_to_bits(input);
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bytes_to_bits() {
|
||||
let test_cases = vec![(1u64, 8u64), (1000u64, 8_000u64), (0u64, 0u64)];
|
||||
|
||||
for (input, expected) in test_cases {
|
||||
let result = Interface::bytes_to_bits(input);
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,10 +18,6 @@ path = "src/bin/dfdaemon/main.rs"
|
|||
name = "dfget"
|
||||
path = "src/bin/dfget/main.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "dfstore"
|
||||
path = "src/bin/dfstore/main.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "dfcache"
|
||||
path = "src/bin/dfcache/main.rs"
|
||||
|
@ -38,8 +34,6 @@ hyper.workspace = true
|
|||
hyper-util.workspace = true
|
||||
hyper-rustls.workspace = true
|
||||
tracing.workspace = true
|
||||
validator.workspace = true
|
||||
humantime.workspace = true
|
||||
serde.workspace = true
|
||||
chrono.workspace = true
|
||||
prost-wkt-types.workspace = true
|
||||
|
@ -59,34 +53,40 @@ clap.workspace = true
|
|||
anyhow.workspace = true
|
||||
bytes.workspace = true
|
||||
bytesize.workspace = true
|
||||
humantime.workspace = true
|
||||
uuid.workspace = true
|
||||
percent-encoding.workspace = true
|
||||
tokio-rustls.workspace = true
|
||||
serde_json.workspace = true
|
||||
lru.workspace = true
|
||||
fs2.workspace = true
|
||||
lazy_static.workspace = true
|
||||
futures.workspace = true
|
||||
tracing-log = "0.2"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "time", "chrono"] }
|
||||
local-ip-address.workspace = true
|
||||
sysinfo.workspace = true
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "time", "chrono"] }
|
||||
tracing-panic = "0.1.2"
|
||||
tracing-opentelemetry = "0.30.0"
|
||||
opentelemetry = { version = "0.29.1", default-features = false, features = ["trace"] }
|
||||
opentelemetry-otlp = { version = "0.29.0", default-features = false, features = ["trace", "grpc-tonic", "http-proto", "reqwest-blocking-client"] }
|
||||
opentelemetry_sdk = { version = "0.29.0", default-features = false, features = ["trace", "rt-tokio"] }
|
||||
opentelemetry-semantic-conventions = { version = "0.30.0", features = ["semconv_experimental"] }
|
||||
rolling-file = "0.2.0"
|
||||
tracing-opentelemetry = "0.18.0"
|
||||
opentelemetry = { version = "0.18.0", default-features = false, features = ["trace", "rt-tokio"] }
|
||||
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
|
||||
pprof = { version = "0.14", features = ["flamegraph", "protobuf-codec"] }
|
||||
pprof = { version = "0.15", features = ["flamegraph", "protobuf-codec"] }
|
||||
prometheus = { version = "0.13", features = ["process"] }
|
||||
tonic-health = "0.12.3"
|
||||
sysinfo = "0.32.1"
|
||||
tower = { version = "0.4.13", features = ["limit", "load-shed", "buffer"] }
|
||||
indicatif = "0.17.11"
|
||||
indicatif = "0.18.0"
|
||||
hashring = "0.3.6"
|
||||
fslock = "0.2.1"
|
||||
leaky-bucket = "1.1.2"
|
||||
http-body-util = "0.1.3"
|
||||
termion = "4.0.5"
|
||||
tabled = "0.18.0"
|
||||
tabled = "0.20.0"
|
||||
path-absolutize = "3.1.1"
|
||||
dashmap = "6.1.0"
|
||||
fastrand = "2.3.0"
|
||||
glob = "0.3.3"
|
||||
console-subscriber = "0.4.1"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile.workspace = true
|
||||
|
@ -117,11 +117,6 @@ assets = [
|
|||
"usr/bin/dfcache",
|
||||
"755",
|
||||
],
|
||||
[
|
||||
"../target/x86_64-unknown-linux-gnu/release/dfstore",
|
||||
"usr/bin/dfstore",
|
||||
"755",
|
||||
],
|
||||
[
|
||||
"../ci/dfdaemon.service",
|
||||
"lib/systemd/system/dfdaemon.service",
|
||||
|
@ -164,11 +159,6 @@ assets = [
|
|||
"usr/bin/dfcache",
|
||||
"755",
|
||||
],
|
||||
[
|
||||
"../target/x86_64-unknown-linux-musl/release/dfstore",
|
||||
"usr/bin/dfstore",
|
||||
"755",
|
||||
],
|
||||
[
|
||||
"../ci/dfdaemon.service",
|
||||
"lib/systemd/system/dfdaemon.service",
|
||||
|
@ -211,11 +201,6 @@ assets = [
|
|||
"usr/bin/dfcache",
|
||||
"755",
|
||||
],
|
||||
[
|
||||
"../target/aarch64-unknown-linux-gnu/release/dfstore",
|
||||
"usr/bin/dfstore",
|
||||
"755",
|
||||
],
|
||||
[
|
||||
"../ci/dfdaemon.service",
|
||||
"lib/systemd/system/dfdaemon.service",
|
||||
|
@ -258,11 +243,6 @@ assets = [
|
|||
"usr/bin/dfcache",
|
||||
"755",
|
||||
],
|
||||
[
|
||||
"../target/aarch64-unknown-linux-musl/release/dfstore",
|
||||
"usr/bin/dfstore",
|
||||
"755",
|
||||
],
|
||||
[
|
||||
"../ci/dfdaemon.service",
|
||||
"lib/systemd/system/dfdaemon.service",
|
||||
|
@ -290,7 +270,6 @@ assets = [
|
|||
{ source = "../target/x86_64-unknown-linux-gnu/release/dfget", dest = "/usr/bin/dfget", mode = "755" },
|
||||
{ source = "../target/x86_64-unknown-linux-gnu/release/dfdaemon", dest = "/usr/bin/dfdaemon", mode = "755" },
|
||||
{ source = "../target/x86_64-unknown-linux-gnu/release/dfcache", dest = "/usr/bin/dfcache", mode = "755" },
|
||||
{ source = "../target/x86_64-unknown-linux-gnu/release/dfstore", dest = "/usr/bin/dfstore", mode = "755" },
|
||||
{ source = "../ci/dfdaemon.service", dest = "/lib/systemd/system/dfdaemon.service", config = true, mode = "644" },
|
||||
{ source = "../CONTRIBUTING.md", dest = "/usr/share/doc/client/CONTRIBUTING.md", mode = "644", doc = true },
|
||||
{ source = "../LICENSE", dest = "/usr/share/doc/client/LICENSE.md", mode = "644", doc = true },
|
||||
|
@ -302,7 +281,6 @@ assets = [
|
|||
{ source = "../target/x86_64-unknown-linux-musl/release/dfget", dest = "/usr/bin/dfget", mode = "755" },
|
||||
{ source = "../target/x86_64-unknown-linux-musl/release/dfdaemon", dest = "/usr/bin/dfdaemon", mode = "755" },
|
||||
{ source = "../target/x86_64-unknown-linux-musl/release/dfcache", dest = "/usr/bin/dfcache", mode = "755" },
|
||||
{ source = "../target/x86_64-unknown-linux-musl/release/dfstore", dest = "/usr/bin/dfstore", mode = "755" },
|
||||
{ source = "../ci/dfdaemon.service", dest = "/lib/systemd/system/dfdaemon.service", config = true, mode = "644" },
|
||||
{ source = "../CONTRIBUTING.md", dest = "/usr/share/doc/client/CONTRIBUTING.md", mode = "644", doc = true },
|
||||
{ source = "../LICENSE", dest = "/usr/share/doc/client/LICENSE.md", mode = "644", doc = true },
|
||||
|
@ -315,7 +293,6 @@ assets = [
|
|||
{ source = "../target/aarch64-unknown-linux-gnu/release/dfget", dest = "/usr/bin/dfget", mode = "755" },
|
||||
{ source = "../target/aarch64-unknown-linux-gnu/release/dfdaemon", dest = "/usr/bin/dfdaemon", mode = "755" },
|
||||
{ source = "../target/aarch64-unknown-linux-gnu/release/dfcache", dest = "/usr/bin/dfcache", mode = "755" },
|
||||
{ source = "../target/aarch64-unknown-linux-gnu/release/dfstore", dest = "/usr/bin/dfstore", mode = "755" },
|
||||
{ source = "../ci/dfdaemon.service", dest = "/lib/systemd/system/dfdaemon.service", config = true, mode = "644" },
|
||||
{ source = "../CONTRIBUTING.md", dest = "/usr/share/doc/client/CONTRIBUTING.md", mode = "644", doc = true },
|
||||
{ source = "../LICENSE", dest = "/usr/share/doc/client/LICENSE.md", mode = "644", doc = true },
|
||||
|
@ -327,7 +304,6 @@ assets = [
|
|||
{ source = "../target/aarch64-unknown-linux-musl/release/dfget", dest = "/usr/bin/dfget", mode = "755" },
|
||||
{ source = "../target/aarch64-unknown-linux-musl/release/dfdaemon", dest = "/usr/bin/dfdaemon", mode = "755" },
|
||||
{ source = "../target/aarch64-unknown-linux-musl/release/dfcache", dest = "/usr/bin/dfcache", mode = "755" },
|
||||
{ source = "../target/aarch64-unknown-linux-musl/release/dfstore", dest = "/usr/bin/dfstore", mode = "755" },
|
||||
{ source = "../ci/dfdaemon.service", dest = "/lib/systemd/system/dfdaemon.service", config = true, mode = "644" },
|
||||
{ source = "../CONTRIBUTING.md", dest = "/usr/share/doc/client/CONTRIBUTING.md", mode = "644", doc = true },
|
||||
{ source = "../LICENSE", dest = "/usr/share/doc/client/LICENSE.md", mode = "644", doc = true },
|
||||
|
|
|
@ -14,10 +14,9 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
use crate::grpc::{manager::ManagerClient, scheduler::SchedulerClient};
|
||||
use crate::grpc::scheduler::SchedulerClient;
|
||||
use crate::shutdown;
|
||||
use dragonfly_api::common::v2::{Build, Cpu, Disk, Host, Memory, Network};
|
||||
use dragonfly_api::manager::v2::{DeleteSeedPeerRequest, SourceType, UpdateSeedPeerRequest};
|
||||
use dragonfly_api::scheduler::v2::{AnnounceHostRequest, DeleteHostRequest};
|
||||
use dragonfly_client_config::{
|
||||
dfdaemon::{Config, HostType},
|
||||
|
@ -25,91 +24,13 @@ use dragonfly_client_config::{
|
|||
};
|
||||
use dragonfly_client_core::error::{ErrorType, OrErr};
|
||||
use dragonfly_client_core::Result;
|
||||
use dragonfly_client_util::net::Interface;
|
||||
use std::env;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use sysinfo::System;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
/// ManagerAnnouncer is used to announce the dfdaemon information to the manager.
|
||||
pub struct ManagerAnnouncer {
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
/// manager_client is the grpc client of the manager.
|
||||
manager_client: Arc<ManagerClient>,
|
||||
|
||||
/// shutdown is used to shutdown the announcer.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
/// _shutdown_complete is used to notify the announcer is shutdown.
|
||||
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
/// ManagerAnnouncer implements the manager announcer of the dfdaemon.
|
||||
impl ManagerAnnouncer {
|
||||
/// new creates a new manager announcer.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
manager_client: Arc<ManagerClient>,
|
||||
shutdown: shutdown::Shutdown,
|
||||
shutdown_complete_tx: mpsc::UnboundedSender<()>,
|
||||
) -> Self {
|
||||
Self {
|
||||
config,
|
||||
manager_client,
|
||||
shutdown,
|
||||
_shutdown_complete: shutdown_complete_tx,
|
||||
}
|
||||
}
|
||||
|
||||
/// run announces the dfdaemon information to the manager.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) -> Result<()> {
|
||||
// Clone the shutdown channel.
|
||||
let mut shutdown = self.shutdown.clone();
|
||||
|
||||
// If the seed peer is enabled, we should announce the seed peer to the manager.
|
||||
if self.config.seed_peer.enable {
|
||||
// Register the seed peer to the manager.
|
||||
self.manager_client
|
||||
.update_seed_peer(UpdateSeedPeerRequest {
|
||||
source_type: SourceType::SeedPeerSource.into(),
|
||||
hostname: self.config.host.hostname.clone(),
|
||||
r#type: self.config.seed_peer.kind.to_string(),
|
||||
idc: self.config.host.idc.clone(),
|
||||
location: self.config.host.location.clone(),
|
||||
ip: self.config.host.ip.unwrap().to_string(),
|
||||
port: self.config.upload.server.port as i32,
|
||||
download_port: self.config.upload.server.port as i32,
|
||||
seed_peer_cluster_id: self.config.seed_peer.cluster_id,
|
||||
})
|
||||
.await?;
|
||||
|
||||
// Announce to scheduler shutting down with signals.
|
||||
shutdown.recv().await;
|
||||
|
||||
// Delete the seed peer from the manager.
|
||||
self.manager_client
|
||||
.delete_seed_peer(DeleteSeedPeerRequest {
|
||||
source_type: SourceType::SeedPeerSource.into(),
|
||||
hostname: self.config.host.hostname.clone(),
|
||||
ip: self.config.host.ip.unwrap().to_string(),
|
||||
seed_peer_cluster_id: self.config.seed_peer.cluster_id,
|
||||
})
|
||||
.await?;
|
||||
|
||||
info!("announce to manager shutting down");
|
||||
} else {
|
||||
shutdown.recv().await;
|
||||
info!("announce to manager shutting down");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
use tracing::{debug, error, info, instrument};
|
||||
|
||||
/// Announcer is used to announce the dfdaemon information to the manager and scheduler.
|
||||
pub struct SchedulerAnnouncer {
|
||||
|
@ -122,8 +43,8 @@ pub struct SchedulerAnnouncer {
|
|||
/// scheduler_client is the grpc client of the scheduler.
|
||||
scheduler_client: Arc<SchedulerClient>,
|
||||
|
||||
// system is the system information.
|
||||
system: Arc<Mutex<System>>,
|
||||
/// interface is the network interface.
|
||||
interface: Arc<Interface>,
|
||||
|
||||
/// shutdown is used to shutdown the announcer.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
@ -135,11 +56,11 @@ pub struct SchedulerAnnouncer {
|
|||
/// SchedulerAnnouncer implements the scheduler announcer of the dfdaemon.
|
||||
impl SchedulerAnnouncer {
|
||||
/// new creates a new scheduler announcer.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(
|
||||
config: Arc<Config>,
|
||||
host_id: String,
|
||||
scheduler_client: Arc<SchedulerClient>,
|
||||
interface: Arc<Interface>,
|
||||
shutdown: shutdown::Shutdown,
|
||||
shutdown_complete_tx: mpsc::UnboundedSender<()>,
|
||||
) -> Result<Self> {
|
||||
|
@ -147,7 +68,7 @@ impl SchedulerAnnouncer {
|
|||
config,
|
||||
host_id,
|
||||
scheduler_client,
|
||||
system: Arc::new(Mutex::new(System::new_all())),
|
||||
interface,
|
||||
shutdown,
|
||||
_shutdown_complete: shutdown_complete_tx,
|
||||
};
|
||||
|
@ -155,13 +76,12 @@ impl SchedulerAnnouncer {
|
|||
// Initialize the scheduler announcer.
|
||||
announcer
|
||||
.scheduler_client
|
||||
.init_announce_host(announcer.make_announce_host_request(Duration::ZERO)?)
|
||||
.init_announce_host(announcer.make_announce_host_request(Duration::ZERO).await?)
|
||||
.await?;
|
||||
Ok(announcer)
|
||||
}
|
||||
|
||||
/// run announces the dfdaemon information to the scheduler.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
let mut shutdown = self.shutdown.clone();
|
||||
|
@ -171,7 +91,7 @@ impl SchedulerAnnouncer {
|
|||
loop {
|
||||
tokio::select! {
|
||||
_ = interval.tick() => {
|
||||
let request = match self.make_announce_host_request(interval.period()) {
|
||||
let request = match self.make_announce_host_request(interval.period()).await {
|
||||
Ok(request) => request,
|
||||
Err(err) => {
|
||||
error!("make announce host request failed: {}", err);
|
||||
|
@ -200,7 +120,7 @@ impl SchedulerAnnouncer {
|
|||
|
||||
/// make_announce_host_request makes the announce host request.
|
||||
#[instrument(skip_all)]
|
||||
fn make_announce_host_request(&self, interval: Duration) -> Result<AnnounceHostRequest> {
|
||||
async fn make_announce_host_request(&self, interval: Duration) -> Result<AnnounceHostRequest> {
|
||||
// If the seed peer is enabled, we should announce the seed peer to the scheduler.
|
||||
let host_type = if self.config.seed_peer.enable {
|
||||
self.config.seed_peer.kind
|
||||
|
@ -209,7 +129,7 @@ impl SchedulerAnnouncer {
|
|||
};
|
||||
|
||||
// Refresh the system information.
|
||||
let mut sys = self.system.lock().unwrap();
|
||||
let mut sys = System::new_all();
|
||||
sys.refresh_all();
|
||||
|
||||
// Get the process information.
|
||||
|
@ -236,25 +156,25 @@ impl SchedulerAnnouncer {
|
|||
free: sys.free_memory(),
|
||||
};
|
||||
|
||||
// Wait for getting the network data.
|
||||
let network_data = self.interface.get_network_data().await;
|
||||
debug!(
|
||||
"network data: rx bandwidth {}/{} bps, tx bandwidth {}/{} bps",
|
||||
network_data.rx_bandwidth.unwrap_or(0),
|
||||
network_data.max_rx_bandwidth,
|
||||
network_data.tx_bandwidth.unwrap_or(0),
|
||||
network_data.max_tx_bandwidth
|
||||
);
|
||||
|
||||
// Get the network information.
|
||||
let network = Network {
|
||||
// TODO: Get the count of the tcp connection.
|
||||
tcp_connection_count: 0,
|
||||
|
||||
// TODO: Get the count of the upload tcp connection.
|
||||
upload_tcp_connection_count: 0,
|
||||
idc: self.config.host.idc.clone(),
|
||||
location: self.config.host.location.clone(),
|
||||
|
||||
// TODO: Get the network download rate, refer to
|
||||
// https://docs.rs/sysinfo/latest/sysinfo/struct.NetworkData.html#method.received.
|
||||
download_rate: 0,
|
||||
download_rate_limit: self.config.download.rate_limit.as_u64(),
|
||||
|
||||
// TODO: Get the network download rate, refer to
|
||||
// https://docs.rs/sysinfo/latest/sysinfo/struct.NetworkData.html#method.transmitted
|
||||
upload_rate: 0,
|
||||
upload_rate_limit: self.config.upload.rate_limit.as_u64(),
|
||||
max_rx_bandwidth: network_data.max_rx_bandwidth,
|
||||
rx_bandwidth: network_data.rx_bandwidth,
|
||||
max_tx_bandwidth: network_data.max_tx_bandwidth,
|
||||
tx_bandwidth: network_data.tx_bandwidth,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Get the disk information.
|
||||
|
|
|
@ -23,7 +23,9 @@ use dragonfly_client_core::{
|
|||
error::{ErrorType, OrErr},
|
||||
Error, Result,
|
||||
};
|
||||
use dragonfly_client_util::fs::fallocate;
|
||||
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
|
||||
use local_ip_address::local_ip;
|
||||
use path_absolutize::*;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Duration;
|
||||
|
@ -84,6 +86,13 @@ pub struct ExportCommand {
|
|||
)]
|
||||
timeout: Duration,
|
||||
|
||||
#[arg(
|
||||
long = "digest",
|
||||
required = false,
|
||||
help = "Verify the integrity of the downloaded file using the specified digest, support sha256, sha512, crc32. If the digest is not specified, the downloaded file will not be verified. Format: <algorithm>:<digest>, e.g. sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef, crc32:12345678"
|
||||
)]
|
||||
digest: Option<String>,
|
||||
|
||||
#[arg(
|
||||
short = 'e',
|
||||
long = "endpoint",
|
||||
|
@ -114,17 +123,19 @@ pub struct ExportCommand {
|
|||
)]
|
||||
log_max_files: usize,
|
||||
|
||||
#[arg(
|
||||
long = "verbose",
|
||||
default_value_t = false,
|
||||
help = "Specify whether to print log"
|
||||
)]
|
||||
verbose: bool,
|
||||
#[arg(long, default_value_t = false, help = "Specify whether to print log")]
|
||||
console: bool,
|
||||
}
|
||||
|
||||
/// Implement the execute for ExportCommand.
|
||||
impl ExportCommand {
|
||||
/// execute executes the export command.
|
||||
/// Executes the export command with comprehensive validation and advanced error handling.
|
||||
///
|
||||
/// This function serves as the main entry point for the dfcache export command execution.
|
||||
/// It handles the complete workflow including argument parsing, validation, logging setup,
|
||||
/// dfdaemon client connection, and export operation execution. The function provides
|
||||
/// sophisticated error reporting with colored terminal output, including specialized
|
||||
/// handling for backend errors with HTTP status codes and headers.
|
||||
pub async fn execute(&self) -> Result<()> {
|
||||
// Parse command line arguments.
|
||||
Args::parse();
|
||||
|
@ -136,7 +147,12 @@ impl ExportCommand {
|
|||
self.log_level,
|
||||
self.log_max_files,
|
||||
None,
|
||||
self.verbose,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
self.console,
|
||||
);
|
||||
|
||||
// Validate the command line arguments.
|
||||
|
@ -426,7 +442,13 @@ impl ExportCommand {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// run runs the export command.
|
||||
/// Executes the export operation to retrieve cached files from the persistent cache system.
|
||||
///
|
||||
/// This function handles the core export functionality by downloading a cached file from the
|
||||
/// dfdaemon persistent cache system. It supports two transfer modes: direct file transfer
|
||||
/// by dfdaemon (hardlink/copy) or streaming piece content through the client for manual
|
||||
/// file assembly. The operation provides real-time progress feedback and handles file
|
||||
/// creation, directory setup, and efficient piece-by-piece writing with sparse file allocation.
|
||||
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
|
||||
// Dfcache needs to notify dfdaemon to transfer the piece content of downloading file via unix domain socket
|
||||
// when the `transfer_from_dfdaemon` is true. Otherwise, dfdaemon will download the file and hardlink or
|
||||
|
@ -455,6 +477,8 @@ impl ExportCommand {
|
|||
),
|
||||
need_piece_content,
|
||||
force_hard_link: self.force_hard_link,
|
||||
digest: self.digest.clone(),
|
||||
remote_ip: Some(local_ip().unwrap().to_string()),
|
||||
})
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
|
@ -488,8 +512,8 @@ impl ExportCommand {
|
|||
};
|
||||
|
||||
// Initialize progress bar.
|
||||
let pb = ProgressBar::new(0);
|
||||
pb.set_style(
|
||||
let progress_bar = ProgressBar::new(0);
|
||||
progress_bar.set_style(
|
||||
ProgressStyle::with_template(
|
||||
"[{elapsed_precise}] [{wide_bar}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})",
|
||||
)
|
||||
|
@ -510,7 +534,15 @@ impl ExportCommand {
|
|||
Some(download_persistent_cache_task_response::Response::DownloadPersistentCacheTaskStartedResponse(
|
||||
response,
|
||||
)) => {
|
||||
pb.set_length(response.content_length);
|
||||
if let Some(f) = &f {
|
||||
fallocate(f, response.content_length)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!("fallocate {:?} failed: {}", self.output, err);
|
||||
})?;
|
||||
}
|
||||
|
||||
progress_bar.set_length(response.content_length);
|
||||
}
|
||||
Some(download_persistent_cache_task_response::Response::DownloadPieceFinishedResponse(
|
||||
response,
|
||||
|
@ -534,18 +566,23 @@ impl ExportCommand {
|
|||
};
|
||||
|
||||
downloaded += piece.length;
|
||||
let position = min(downloaded + piece.length, pb.length().unwrap_or(0));
|
||||
pb.set_position(position);
|
||||
let position = min(downloaded + piece.length, progress_bar.length().unwrap_or(0));
|
||||
progress_bar.set_position(position);
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
|
||||
pb.finish_with_message("downloaded");
|
||||
progress_bar.finish_with_message("downloaded");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// validate_args validates the command line arguments.
|
||||
/// Validates command line arguments for the export operation to ensure safe file output.
|
||||
///
|
||||
/// This function performs essential validation of the output path to prevent file conflicts
|
||||
/// and ensure the target location is suitable for export operations. It checks parent
|
||||
/// directory existence, prevents accidental file overwrites, and validates path accessibility
|
||||
/// before allowing the export operation to proceed.
|
||||
fn validate_args(&self) -> Result<()> {
|
||||
let absolute_path = Path::new(&self.output).absolutize()?;
|
||||
match absolute_path.parent() {
|
||||
|
|
|
@ -24,6 +24,7 @@ use dragonfly_client_core::{
|
|||
Error, Result,
|
||||
};
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use local_ip_address::local_ip;
|
||||
use path_absolutize::*;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Duration;
|
||||
|
@ -42,11 +43,10 @@ pub struct ImportCommand {
|
|||
path: PathBuf,
|
||||
|
||||
#[arg(
|
||||
long = "id",
|
||||
required = false,
|
||||
help = "Specify the id of the persistent cache task. If id is none, dfdaemon will generate the new task id based on the file content, tag and application by crc32 algorithm."
|
||||
long = "content-for-calculating-task-id",
|
||||
help = "Specify the content used to calculate the persistent cache task ID. If it is set, use its value to calculate the task ID, Otherwise, calculate the persistent cache task ID based on url, piece-length, tag, application, and filtered-query-params."
|
||||
)]
|
||||
id: Option<String>,
|
||||
content_for_calculating_task_id: Option<String>,
|
||||
|
||||
#[arg(
|
||||
long = "persistent-replica-count",
|
||||
|
@ -122,17 +122,19 @@ pub struct ImportCommand {
|
|||
)]
|
||||
log_max_files: usize,
|
||||
|
||||
#[arg(
|
||||
long = "verbose",
|
||||
default_value_t = false,
|
||||
help = "Specify whether to print log"
|
||||
)]
|
||||
verbose: bool,
|
||||
#[arg(long, default_value_t = false, help = "Specify whether to print log")]
|
||||
console: bool,
|
||||
}
|
||||
|
||||
/// Implement the execute for ImportCommand.
|
||||
impl ImportCommand {
|
||||
/// execute executes the import sub command.
|
||||
/// Executes the import sub command with comprehensive validation and error handling.
|
||||
///
|
||||
/// This function serves as the main entry point for the dfcache import command execution.
|
||||
/// It handles the complete workflow including argument parsing, validation, logging setup,
|
||||
/// dfdaemon client connection, and import operation execution. The function provides
|
||||
/// detailed error reporting with colored terminal output and follows a fail-fast approach
|
||||
/// with immediate process termination on any critical failures.
|
||||
pub async fn execute(&self) -> Result<()> {
|
||||
// Parse command line arguments.
|
||||
Args::parse();
|
||||
|
@ -144,7 +146,12 @@ impl ImportCommand {
|
|||
self.log_level,
|
||||
self.log_max_files,
|
||||
None,
|
||||
self.verbose,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
self.console,
|
||||
);
|
||||
|
||||
// Validate the command line arguments.
|
||||
|
@ -325,23 +332,29 @@ impl ImportCommand {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// run runs the import sub command.
|
||||
/// Executes the cache import operation by uploading a file to the persistent cache system.
|
||||
///
|
||||
/// This function handles the core import functionality by uploading a local file to the
|
||||
/// dfdaemon persistent cache system. It provides visual feedback through a progress spinner,
|
||||
/// converts the file path to absolute format, and configures the cache task with specified
|
||||
/// parameters including TTL, replica count, and piece length. The operation is asynchronous
|
||||
/// and provides completion feedback with the generated task ID.
|
||||
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
|
||||
let absolute_path = Path::new(&self.path).absolutize()?;
|
||||
info!("import file: {}", absolute_path.to_string_lossy());
|
||||
|
||||
let pb = ProgressBar::new_spinner();
|
||||
pb.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL);
|
||||
pb.set_style(
|
||||
let progress_bar = ProgressBar::new_spinner();
|
||||
progress_bar.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL);
|
||||
progress_bar.set_style(
|
||||
ProgressStyle::with_template("{spinner:.blue} {msg}")
|
||||
.unwrap()
|
||||
.tick_strings(&["⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"]),
|
||||
);
|
||||
pb.set_message("Importing...");
|
||||
progress_bar.set_message("Importing...");
|
||||
|
||||
let persistent_cache_task = dfdaemon_download_client
|
||||
.upload_persistent_cache_task(UploadPersistentCacheTaskRequest {
|
||||
task_id: self.id.clone(),
|
||||
content_for_calculating_task_id: self.content_for_calculating_task_id.clone(),
|
||||
path: absolute_path.to_string_lossy().to_string(),
|
||||
persistent_replica_count: self.persistent_replica_count,
|
||||
tag: self.tag.clone(),
|
||||
|
@ -354,14 +367,20 @@ impl ImportCommand {
|
|||
prost_wkt_types::Duration::try_from(self.timeout)
|
||||
.or_err(ErrorType::ParseError)?,
|
||||
),
|
||||
remote_ip: Some(local_ip().unwrap().to_string()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
pb.finish_with_message(format!("Done: {}", persistent_cache_task.id));
|
||||
progress_bar.finish_with_message(format!("Done: {}", persistent_cache_task.id));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// validate_args validates the command line arguments.
|
||||
/// Validates command line arguments for the import operation to ensure safe and correct execution.
|
||||
///
|
||||
/// This function performs comprehensive validation of import-specific parameters to prevent
|
||||
/// invalid operations and ensure the import request meets all system requirements. It validates
|
||||
/// TTL boundaries, file existence and type, and piece length constraints before allowing the
|
||||
/// import operation to proceed.
|
||||
fn validate_args(&self) -> Result<()> {
|
||||
if self.ttl < Duration::from_secs(5 * 60)
|
||||
|| self.ttl > Duration::from_secs(7 * 24 * 60 * 60)
|
||||
|
@ -372,15 +391,6 @@ impl ImportCommand {
|
|||
)));
|
||||
}
|
||||
|
||||
if let Some(id) = self.id.as_ref() {
|
||||
if id.len() != 64 {
|
||||
return Err(Error::ValidationError(format!(
|
||||
"id length must be 64 bytes, but got {}",
|
||||
id.len()
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
if self.path.is_dir() {
|
||||
return Err(Error::ValidationError(format!(
|
||||
"path {} is a directory",
|
||||
|
|
|
@ -106,7 +106,12 @@ async fn main() -> anyhow::Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health.
|
||||
/// Creates and validates a dfdaemon download client with health checking.
|
||||
///
|
||||
/// This function establishes a connection to the dfdaemon service via Unix domain socket
|
||||
/// and performs a health check to ensure the service is running and ready to handle
|
||||
/// download requests. Only after successful health verification does it return the
|
||||
/// download client for actual use.
|
||||
pub async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownloadClient> {
|
||||
// Check dfdaemon's health.
|
||||
let health_client = HealthClient::new_unix(endpoint.clone()).await?;
|
||||
|
|
|
@ -22,6 +22,7 @@ use dragonfly_client_core::{
|
|||
Error, Result,
|
||||
};
|
||||
use humantime::format_duration;
|
||||
use local_ip_address::local_ip;
|
||||
use std::time::Duration;
|
||||
use tabled::{
|
||||
settings::{object::Rows, Alignment, Modify, Style},
|
||||
|
@ -67,17 +68,19 @@ pub struct StatCommand {
|
|||
)]
|
||||
log_max_files: usize,
|
||||
|
||||
#[arg(
|
||||
long = "verbose",
|
||||
default_value_t = false,
|
||||
help = "Specify whether to print log"
|
||||
)]
|
||||
verbose: bool,
|
||||
#[arg(long, default_value_t = false, help = "Specify whether to print log")]
|
||||
console: bool,
|
||||
}
|
||||
|
||||
/// Implement the execute for StatCommand.
|
||||
impl StatCommand {
|
||||
/// execute executes the stat command.
|
||||
/// Executes the stat command with comprehensive error handling and user feedback.
|
||||
///
|
||||
/// This function serves as the main entry point for the dfcache stat command execution.
|
||||
/// It handles the complete lifecycle including argument parsing, logging initialization,
|
||||
/// dfdaemon client setup, and command execution with detailed error reporting. The
|
||||
/// function provides colored terminal output for better user experience and exits
|
||||
/// with appropriate status codes on failure.
|
||||
pub async fn execute(&self) -> Result<()> {
|
||||
// Parse command line arguments.
|
||||
Args::parse();
|
||||
|
@ -89,7 +92,12 @@ impl StatCommand {
|
|||
self.log_level,
|
||||
self.log_max_files,
|
||||
None,
|
||||
self.verbose,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
self.console,
|
||||
);
|
||||
|
||||
// Get dfdaemon download client.
|
||||
|
@ -232,11 +240,17 @@ impl StatCommand {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// run runs the stat command.
|
||||
/// Executes the stat command to retrieve and display persistent cache task information.
|
||||
///
|
||||
/// This function queries the dfdaemon service for detailed information about a specific
|
||||
/// persistent cache task and presents it in a formatted table for user consumption.
|
||||
/// It handles data conversion from raw protocol buffer values to human-readable formats
|
||||
/// including byte sizes, durations, and timestamps with proper timezone conversion.
|
||||
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
|
||||
let task = dfdaemon_download_client
|
||||
.stat_persistent_cache_task(StatPersistentCacheTaskRequest {
|
||||
task_id: self.id.clone(),
|
||||
remote_ip: Some(local_ip().unwrap().to_string()),
|
||||
})
|
||||
.await?;
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
*/
|
||||
|
||||
use clap::Parser;
|
||||
use dragonfly_client::announcer::{ManagerAnnouncer, SchedulerAnnouncer};
|
||||
use dragonfly_client::announcer::SchedulerAnnouncer;
|
||||
use dragonfly_client::dynconfig::Dynconfig;
|
||||
use dragonfly_client::gc::GC;
|
||||
use dragonfly_client::grpc::{
|
||||
|
@ -30,10 +30,9 @@ use dragonfly_client::shutdown;
|
|||
use dragonfly_client::stats::Stats;
|
||||
use dragonfly_client::tracing::init_tracing;
|
||||
use dragonfly_client_backend::BackendFactory;
|
||||
use dragonfly_client_config::dfdaemon;
|
||||
use dragonfly_client_config::VersionValueParser;
|
||||
use dragonfly_client_config::{dfdaemon, VersionValueParser};
|
||||
use dragonfly_client_storage::Storage;
|
||||
use dragonfly_client_util::id_generator::IDGenerator;
|
||||
use dragonfly_client_util::{id_generator::IDGenerator, net::Interface};
|
||||
use std::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
@ -92,12 +91,8 @@ struct Args {
|
|||
)]
|
||||
log_max_files: usize,
|
||||
|
||||
#[arg(
|
||||
long = "verbose",
|
||||
default_value_t = true,
|
||||
help = "Specify whether to print log"
|
||||
)]
|
||||
verbose: bool,
|
||||
#[arg(long, default_value_t = true, help = "Specify whether to print log")]
|
||||
console: bool,
|
||||
|
||||
#[arg(
|
||||
short = 'V',
|
||||
|
@ -150,8 +145,13 @@ async fn main() -> Result<(), anyhow::Error> {
|
|||
args.log_dir.clone(),
|
||||
args.log_level,
|
||||
args.log_max_files,
|
||||
config.tracing.addr.to_owned(),
|
||||
args.verbose,
|
||||
config.tracing.protocol.clone(),
|
||||
config.tracing.endpoint.clone(),
|
||||
config.tracing.path.clone(),
|
||||
Some(config.tracing.headers.clone()),
|
||||
Some(config.host.clone()),
|
||||
config.seed_peer.enable,
|
||||
args.console,
|
||||
);
|
||||
|
||||
// Initialize storage.
|
||||
|
@ -229,6 +229,9 @@ async fn main() -> Result<(), anyhow::Error> {
|
|||
)?;
|
||||
let persistent_cache_task = Arc::new(persistent_cache_task);
|
||||
|
||||
let interface = Interface::new(config.host.ip.unwrap(), config.upload.rate_limit);
|
||||
let interface = Arc::new(interface);
|
||||
|
||||
// Initialize health server.
|
||||
let health = Health::new(
|
||||
SocketAddr::new(config.health.server.ip.unwrap(), config.health.server.port),
|
||||
|
@ -258,19 +261,12 @@ async fn main() -> Result<(), anyhow::Error> {
|
|||
shutdown_complete_tx.clone(),
|
||||
);
|
||||
|
||||
// Initialize manager announcer.
|
||||
let manager_announcer = ManagerAnnouncer::new(
|
||||
config.clone(),
|
||||
manager_client.clone(),
|
||||
shutdown.clone(),
|
||||
shutdown_complete_tx.clone(),
|
||||
);
|
||||
|
||||
// Initialize scheduler announcer.
|
||||
let scheduler_announcer = SchedulerAnnouncer::new(
|
||||
config.clone(),
|
||||
id_generator.host_id(),
|
||||
scheduler_client.clone(),
|
||||
interface.clone(),
|
||||
shutdown.clone(),
|
||||
shutdown_complete_tx.clone(),
|
||||
)
|
||||
|
@ -285,6 +281,7 @@ async fn main() -> Result<(), anyhow::Error> {
|
|||
SocketAddr::new(config.upload.server.ip.unwrap(), config.upload.server.port),
|
||||
task.clone(),
|
||||
persistent_cache_task.clone(),
|
||||
interface.clone(),
|
||||
shutdown.clone(),
|
||||
shutdown_complete_tx.clone(),
|
||||
);
|
||||
|
@ -333,10 +330,6 @@ async fn main() -> Result<(), anyhow::Error> {
|
|||
info!("stats server exited");
|
||||
},
|
||||
|
||||
_ = tokio::spawn(async move { manager_announcer.run().await.unwrap_or_else(|err| error!("announcer manager failed: {}", err))} ) => {
|
||||
info!("announcer manager exited");
|
||||
},
|
||||
|
||||
_ = tokio::spawn(async move { scheduler_announcer.run().await }) => {
|
||||
info!("announcer scheduler exited");
|
||||
},
|
||||
|
|
|
@ -17,29 +17,30 @@
|
|||
use bytesize::ByteSize;
|
||||
use clap::Parser;
|
||||
use dragonfly_api::common::v2::{Download, Hdfs, ObjectStorage, TaskType};
|
||||
use dragonfly_api::dfdaemon::v2::{download_task_response, DownloadTaskRequest};
|
||||
use dragonfly_api::dfdaemon::v2::{
|
||||
download_task_response, DownloadTaskRequest, ListTaskEntriesRequest,
|
||||
};
|
||||
use dragonfly_api::errordetails::v2::Backend;
|
||||
use dragonfly_client::grpc::dfdaemon_download::DfdaemonDownloadClient;
|
||||
use dragonfly_client::grpc::health::HealthClient;
|
||||
use dragonfly_client::metrics::{
|
||||
collect_backend_request_failure_metrics, collect_backend_request_finished_metrics,
|
||||
collect_backend_request_started_metrics,
|
||||
};
|
||||
use dragonfly_client::resource::piece::MIN_PIECE_LENGTH;
|
||||
use dragonfly_client::tracing::init_tracing;
|
||||
use dragonfly_client_backend::{hdfs, object_storage, BackendFactory, DirEntry, HeadRequest};
|
||||
use dragonfly_client_backend::{hdfs, object_storage, BackendFactory, DirEntry};
|
||||
use dragonfly_client_config::VersionValueParser;
|
||||
use dragonfly_client_config::{self, dfdaemon, dfget};
|
||||
use dragonfly_client_core::error::{BackendError, ErrorType, OrErr};
|
||||
use dragonfly_client_core::error::{ErrorType, OrErr};
|
||||
use dragonfly_client_core::{Error, Result};
|
||||
use dragonfly_client_util::http::{header_vec_to_hashmap, header_vec_to_headermap};
|
||||
use dragonfly_client_util::{fs::fallocate, http::header_vec_to_hashmap};
|
||||
use glob::Pattern;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressState, ProgressStyle};
|
||||
use local_ip_address::local_ip;
|
||||
use path_absolutize::*;
|
||||
use percent_encoding::percent_decode_str;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::{Component, Path, PathBuf};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::time::Duration;
|
||||
use std::{cmp::min, fmt::Write};
|
||||
use termion::{color, style};
|
||||
use tokio::fs::{self, OpenOptions};
|
||||
|
@ -108,6 +109,12 @@ struct Args {
|
|||
)]
|
||||
force_hard_link: bool,
|
||||
|
||||
#[arg(
|
||||
long = "content-for-calculating-task-id",
|
||||
help = "Specify the content used to calculate the task ID. If it is set, use its value to calculate the task ID, Otherwise, calculate the task ID based on URL, piece-length, tag, application, and filtered-query-params."
|
||||
)]
|
||||
content_for_calculating_task_id: Option<String>,
|
||||
|
||||
#[arg(
|
||||
short = 'O',
|
||||
long = "output",
|
||||
|
@ -132,12 +139,11 @@ struct Args {
|
|||
timeout: Duration,
|
||||
|
||||
#[arg(
|
||||
short = 'd',
|
||||
long = "digest",
|
||||
default_value = "",
|
||||
help = "Verify the integrity of the downloaded file using the specified digest, e.g. md5:86d3f3a95c324c9479bd8986968f4327"
|
||||
required = false,
|
||||
help = "Verify the integrity of the downloaded file using the specified digest, support sha256, sha512, crc32. If the digest is not specified, the downloaded file will not be verified. Format: <algorithm>:<digest>. Examples: sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef, crc32:12345678"
|
||||
)]
|
||||
digest: String,
|
||||
digest: Option<String>,
|
||||
|
||||
#[arg(
|
||||
short = 'p',
|
||||
|
@ -157,14 +163,14 @@ struct Args {
|
|||
#[arg(
|
||||
long = "application",
|
||||
default_value = "",
|
||||
help = "Different applications for the same url will be divided into different tasks"
|
||||
help = "Different applications for the same URL will be divided into different tasks"
|
||||
)]
|
||||
application: String,
|
||||
|
||||
#[arg(
|
||||
long = "tag",
|
||||
default_value = "",
|
||||
help = "Different tags for the same url will be divided into different tasks"
|
||||
help = "Different tags for the same URL will be divided into different tasks"
|
||||
)]
|
||||
tag: String,
|
||||
|
||||
|
@ -172,17 +178,24 @@ struct Args {
|
|||
short = 'H',
|
||||
long = "header",
|
||||
required = false,
|
||||
help = "Specify the header for downloading file, e.g. --header='Content-Type: application/json' --header='Accept: application/json'"
|
||||
help = "Specify the header for downloading file. Examples: --header='Content-Type: application/json' --header='Accept: application/json'"
|
||||
)]
|
||||
header: Option<Vec<String>>,
|
||||
|
||||
#[arg(
|
||||
long = "filtered-query-param",
|
||||
required = false,
|
||||
help = "Filter the query parameters of the downloaded URL. If the download URL is the same, it will be scheduled as the same task, e.g. --filtered-query-param='signature' --filtered-query-param='timeout'"
|
||||
help = "Filter the query parameters of the downloaded URL. If the download URL is the same, it will be scheduled as the same task. Examples: --filtered-query-param='signature' --filtered-query-param='timeout'"
|
||||
)]
|
||||
filtered_query_params: Option<Vec<String>>,
|
||||
|
||||
#[arg(
|
||||
long = "include-files",
|
||||
required = false,
|
||||
help = "Filter files to download in a directory using glob patterns relative to the root URL's path. Examples: --include-files='*.txt' --include-files='subdir/file.txt'"
|
||||
)]
|
||||
include_files: Option<Vec<String>>,
|
||||
|
||||
#[arg(
|
||||
long = "disable-back-to-source",
|
||||
default_value_t = false,
|
||||
|
@ -269,12 +282,8 @@ struct Args {
|
|||
)]
|
||||
log_max_files: usize,
|
||||
|
||||
#[arg(
|
||||
long = "verbose",
|
||||
default_value_t = false,
|
||||
help = "Specify whether to print log"
|
||||
)]
|
||||
verbose: bool,
|
||||
#[arg(long, default_value_t = false, help = "Specify whether to print log")]
|
||||
console: bool,
|
||||
|
||||
#[arg(
|
||||
short = 'V',
|
||||
|
@ -299,7 +308,12 @@ async fn main() -> anyhow::Result<()> {
|
|||
args.log_level,
|
||||
args.log_max_files,
|
||||
None,
|
||||
args.verbose,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
args.console,
|
||||
);
|
||||
|
||||
// Validate command line arguments.
|
||||
|
@ -589,7 +603,12 @@ async fn main() -> anyhow::Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// run runs the dfget command.
|
||||
/// Runs the dfget command to download files or directories from a given URL.
|
||||
///
|
||||
/// This function serves as the main entry point for the dfget download operation.
|
||||
/// It handles both single file downloads and directory downloads based on the URL format.
|
||||
/// The function performs path normalization, validates the URL scheme's capabilities,
|
||||
/// and delegates to the appropriate download handler.
|
||||
async fn run(mut args: Args, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
|
||||
// Get the absolute path of the output file.
|
||||
args.output = Path::new(&args.output).absolutize()?.into();
|
||||
|
@ -599,7 +618,7 @@ async fn run(mut args: Args, dfdaemon_download_client: DfdaemonDownloadClient) -
|
|||
// then download all files in the directory. Otherwise, download the single file.
|
||||
let scheme = args.url.scheme();
|
||||
if args.url.path().ends_with('/') {
|
||||
if !BackendFactory::supported_download_directory(scheme) {
|
||||
if BackendFactory::unsupported_download_directory(scheme) {
|
||||
return Err(Error::Unsupported(format!("{} download directory", scheme)));
|
||||
};
|
||||
|
||||
|
@ -609,7 +628,13 @@ async fn run(mut args: Args, dfdaemon_download_client: DfdaemonDownloadClient) -
|
|||
download(args, ProgressBar::new(0), dfdaemon_download_client).await
|
||||
}
|
||||
|
||||
/// download_dir downloads all files in the directory.
|
||||
/// Downloads all files in a directory from various storage backends (object storage, HDFS, etc.).
|
||||
///
|
||||
/// This function handles directory-based downloads by recursively fetching all entries
|
||||
/// in the specified directory. It supports filtering files based on include patterns,
|
||||
/// enforces download limits, and performs concurrent downloads with configurable
|
||||
/// concurrency control. The function creates the necessary directory structure
|
||||
/// locally and downloads files while preserving the remote directory hierarchy.
|
||||
async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Result<()> {
|
||||
// Initialize the object storage config and the hdfs config.
|
||||
let object_storage = Some(ObjectStorage {
|
||||
|
@ -626,12 +651,17 @@ async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Re
|
|||
delegation_token: args.hdfs_delegation_token.clone(),
|
||||
});
|
||||
|
||||
// Get all entries in the directory. If the directory is empty, then return directly.
|
||||
let entries = get_entries(args.clone(), object_storage, hdfs).await?;
|
||||
// Get all entries in the directory.
|
||||
let mut entries = get_entries(&args, object_storage, hdfs, download_client.clone()).await?;
|
||||
if let Some(ref include_files) = args.include_files {
|
||||
entries = filter_entries(&args.url, entries, include_files)?;
|
||||
}
|
||||
|
||||
// If the entries is empty, then return directly.
|
||||
if entries.is_empty() {
|
||||
warn!("directory {} is empty", args.url);
|
||||
warn!("no entries found in directory {}", args.url);
|
||||
return Ok(());
|
||||
};
|
||||
}
|
||||
|
||||
// If the actual file count is greater than the max_files, then reject the downloading.
|
||||
let count = entries.iter().filter(|entry| !entry.is_dir).count();
|
||||
|
@ -702,7 +732,13 @@ async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Re
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// download downloads the single file.
|
||||
/// Downloads a single file from various storage backends using the dfdaemon service.
|
||||
///
|
||||
/// This function handles single file downloads by communicating with a dfdaemon client.
|
||||
/// It supports multiple storage protocols (object storage, HDFS, HTTP/HTTPS) and provides
|
||||
/// two transfer modes: direct download by dfdaemon or streaming piece content through
|
||||
/// the client. The function includes progress tracking, file creation, and proper error
|
||||
/// handling throughout the download process.
|
||||
async fn download(
|
||||
args: Args,
|
||||
progress_bar: ProgressBar,
|
||||
|
@ -749,7 +785,7 @@ async fn download(
|
|||
.download_task(DownloadTaskRequest {
|
||||
download: Some(Download {
|
||||
url: args.url.to_string(),
|
||||
digest: Some(args.digest),
|
||||
digest: args.digest,
|
||||
// NOTE: Dfget does not support range download.
|
||||
range: None,
|
||||
r#type: TaskType::Standard as i32,
|
||||
|
@ -772,8 +808,9 @@ async fn download(
|
|||
need_piece_content,
|
||||
object_storage,
|
||||
hdfs,
|
||||
load_to_cache: false,
|
||||
force_hard_link: args.force_hard_link,
|
||||
content_for_calculating_task_id: args.content_for_calculating_task_id,
|
||||
remote_ip: Some(local_ip().unwrap().to_string()),
|
||||
}),
|
||||
})
|
||||
.await
|
||||
|
@ -829,6 +866,14 @@ async fn download(
|
|||
})? {
|
||||
match message.response {
|
||||
Some(download_task_response::Response::DownloadTaskStartedResponse(response)) => {
|
||||
if let Some(f) = &f {
|
||||
fallocate(f, response.content_length)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!("fallocate {:?} failed: {}", args.output, err);
|
||||
})?;
|
||||
}
|
||||
|
||||
progress_bar.set_length(response.content_length);
|
||||
}
|
||||
Some(download_task_response::Response::DownloadPieceFinishedResponse(response)) => {
|
||||
|
@ -865,69 +910,116 @@ async fn download(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// get_entries gets all entries in the directory.
|
||||
/// Retrieves all directory entries from a remote storage location.
|
||||
///
|
||||
/// This function communicates with the dfdaemon service to list all entries
|
||||
/// (files and subdirectories) in the specified directory URL. It supports
|
||||
/// various storage backends including object storage and HDFS by passing
|
||||
/// the appropriate credentials and configuration. The function converts
|
||||
/// the gRPC response into a local `DirEntry` format for further processing.
|
||||
async fn get_entries(
|
||||
args: Args,
|
||||
args: &Args,
|
||||
object_storage: Option<ObjectStorage>,
|
||||
hdfs: Option<Hdfs>,
|
||||
download_client: DfdaemonDownloadClient,
|
||||
) -> Result<Vec<DirEntry>> {
|
||||
// Initialize backend factory and build backend.
|
||||
let backend_factory = BackendFactory::new(None)?;
|
||||
let backend = backend_factory.build(args.url.as_str())?;
|
||||
|
||||
// Collect backend request started metrics.
|
||||
collect_backend_request_started_metrics(backend.scheme().as_str(), http::Method::HEAD.as_str());
|
||||
|
||||
// Record the start time.
|
||||
let start_time = Instant::now();
|
||||
let response = backend
|
||||
.head(HeadRequest {
|
||||
// NOTE: Mock a task id for head request.
|
||||
info!("list task entries: {:?}", args.url);
|
||||
// List task entries.
|
||||
let response = download_client
|
||||
.list_task_entries(ListTaskEntriesRequest {
|
||||
task_id: Uuid::new_v4().to_string(),
|
||||
url: args.url.to_string(),
|
||||
http_header: Some(header_vec_to_headermap(
|
||||
args.header.clone().unwrap_or_default(),
|
||||
)?),
|
||||
timeout: args.timeout,
|
||||
client_cert: None,
|
||||
request_header: header_vec_to_hashmap(args.header.clone().unwrap_or_default())?,
|
||||
timeout: None,
|
||||
certificate_chain: Vec::new(),
|
||||
object_storage,
|
||||
hdfs,
|
||||
remote_ip: Some(local_ip().unwrap().to_string()),
|
||||
})
|
||||
.await
|
||||
.inspect_err(|_err| {
|
||||
// Collect backend request failure metrics.
|
||||
collect_backend_request_failure_metrics(
|
||||
backend.scheme().as_str(),
|
||||
http::Method::HEAD.as_str(),
|
||||
);
|
||||
.inspect_err(|err| {
|
||||
error!("list task entries failed: {}", err);
|
||||
})?;
|
||||
|
||||
// Return error when response is failed.
|
||||
if !response.success {
|
||||
// Collect backend request failure metrics.
|
||||
collect_backend_request_failure_metrics(
|
||||
backend.scheme().as_str(),
|
||||
http::Method::HEAD.as_str(),
|
||||
);
|
||||
|
||||
return Err(Error::BackendError(Box::new(BackendError {
|
||||
message: response.error_message.unwrap_or_default(),
|
||||
status_code: Some(response.http_status_code.unwrap_or_default()),
|
||||
header: Some(response.http_header.unwrap_or_default()),
|
||||
})));
|
||||
}
|
||||
|
||||
// Collect backend request finished metrics.
|
||||
collect_backend_request_finished_metrics(
|
||||
backend.scheme().as_str(),
|
||||
http::Method::HEAD.as_str(),
|
||||
start_time.elapsed(),
|
||||
);
|
||||
|
||||
Ok(response.entries)
|
||||
Ok(response
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|entry| DirEntry {
|
||||
url: entry.url,
|
||||
content_length: entry.content_length as usize,
|
||||
is_dir: entry.is_dir,
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// make_output_by_entry makes the output path by the entry information.
|
||||
/// Filters directory entries based on include patterns and validates their URLs.
|
||||
///
|
||||
/// This function takes a collection of directory entries and filters them based on
|
||||
/// glob patterns specified in `include_files`. It performs URL validation to ensure
|
||||
/// all entries have valid URLs and that their paths fall within the scope of the
|
||||
/// root URL. When an entry matches a pattern, both the entry and its parent
|
||||
/// directory (if it exists) are included in the result.
|
||||
fn filter_entries(
|
||||
url: &Url,
|
||||
entries: Vec<DirEntry>,
|
||||
include_files: &[String],
|
||||
) -> Result<Vec<DirEntry>> {
|
||||
let patterns: Vec<Pattern> = include_files
|
||||
.iter()
|
||||
.filter_map(|include_file| Pattern::new(include_file).ok())
|
||||
.collect();
|
||||
|
||||
// Build a HashMap of DirEntry objects keyed by relative paths for filtering and
|
||||
// validates URLs and ensures paths are within the root URL's scope.
|
||||
let mut entries_by_relative_path = HashMap::with_capacity(entries.len());
|
||||
for entry in entries {
|
||||
let entry_url = Url::parse(&entry.url).map_err(|err| {
|
||||
error!("failed to parse entry URL '{}': {}", entry.url, err);
|
||||
Error::ValidationError(format!("invalid URL: {}", entry.url))
|
||||
})?;
|
||||
|
||||
let entry_path = entry_url.path();
|
||||
match entry_path.strip_prefix(url.path()) {
|
||||
Some(relative_path) => entries_by_relative_path
|
||||
.insert(relative_path.trim_start_matches('/').to_string(), entry),
|
||||
None => {
|
||||
error!(
|
||||
"entry path '{}' does not belong to the root path",
|
||||
entry_path
|
||||
);
|
||||
return Err(Error::ValidationError(format!(
|
||||
"path '{}' is outside the expected scope",
|
||||
entry_path
|
||||
)));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Filter entries by matching relative paths against patterns, including
|
||||
// parent directories for matches.
|
||||
let mut filtered_entries = HashSet::new();
|
||||
for (relative_path, entry) in &entries_by_relative_path {
|
||||
if patterns.iter().any(|pat| pat.matches(relative_path)) {
|
||||
filtered_entries.insert(entry.clone());
|
||||
if let Some(parent) = std::path::Path::new(relative_path).parent() {
|
||||
if let Some(parent_entry) =
|
||||
entries_by_relative_path.get(&parent.join("").to_string_lossy().to_string())
|
||||
{
|
||||
filtered_entries.insert(parent_entry.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(filtered_entries.into_iter().collect())
|
||||
}
|
||||
|
||||
/// Constructs the local output path for a directory entry based on its remote URL.
|
||||
///
|
||||
/// This function maps a remote directory entry to its corresponding local file system
|
||||
/// path by replacing the remote root directory with the local output directory.
|
||||
/// It handles URL percent-decoding to ensure proper path construction and maintains
|
||||
/// the relative directory structure from the remote source.
|
||||
fn make_output_by_entry(url: Url, output: &Path, entry: DirEntry) -> Result<PathBuf> {
|
||||
// Get the root directory of the download directory and the output root directory.
|
||||
let root_dir = url.path().to_string();
|
||||
|
@ -945,7 +1037,12 @@ fn make_output_by_entry(url: Url, output: &Path, entry: DirEntry) -> Result<Path
|
|||
.into())
|
||||
}
|
||||
|
||||
/// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health.
|
||||
/// Creates and validates a dfdaemon download client with health checking.
|
||||
///
|
||||
/// This function establishes a connection to the dfdaemon service via Unix domain socket
|
||||
/// and performs a health check to ensure the service is running and ready to handle
|
||||
/// download requests. Only after successful health verification does it return the
|
||||
/// download client for actual use.
|
||||
async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownloadClient> {
|
||||
// Check dfdaemon's health.
|
||||
let health_client = HealthClient::new_unix(endpoint.clone()).await?;
|
||||
|
@ -956,7 +1053,13 @@ async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownl
|
|||
Ok(dfdaemon_download_client)
|
||||
}
|
||||
|
||||
/// validate_args validates the command line arguments.
|
||||
/// Validates command line arguments for consistency and safety requirements.
|
||||
///
|
||||
/// This function performs comprehensive validation of the download arguments to ensure
|
||||
/// they are logically consistent and safe to execute. It checks URL-output path matching,
|
||||
/// directory existence, file conflicts, piece length constraints, and glob pattern validity.
|
||||
/// The validation prevents common user errors and potential security issues before
|
||||
/// starting the download process.
|
||||
fn validate_args(args: &Args) -> Result<()> {
|
||||
// If the URL is a directory, the output path should be a directory.
|
||||
if args.url.path().ends_with('/') && !args.output.is_dir() {
|
||||
|
@ -1005,9 +1108,42 @@ fn validate_args(args: &Args) -> Result<()> {
|
|||
}
|
||||
}
|
||||
|
||||
if let Some(ref include_files) = args.include_files {
|
||||
for include_file in include_files {
|
||||
if Pattern::new(include_file).is_err() {
|
||||
return Err(Error::ValidationError(format!(
|
||||
"invalid glob pattern in include_files: '{}'",
|
||||
include_file
|
||||
)));
|
||||
}
|
||||
|
||||
if !is_normal_relative_path(include_file) {
|
||||
return Err(Error::ValidationError(format!(
|
||||
"path is not a normal relative path in include_files: '{}'. It must not contain '..', '.', or start with '/'.",
|
||||
include_file
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validates that a path string is a normal relative path without unsafe components.
|
||||
///
|
||||
/// This function ensures that a given path is both relative (doesn't start with '/')
|
||||
/// and contains only normal path components. It rejects paths with parent directory
|
||||
/// references ('..'), current directory references ('.'), or any other special
|
||||
/// path components that could be used for directory traversal attacks or unexpected
|
||||
/// file system navigation.
|
||||
fn is_normal_relative_path(path: &str) -> bool {
|
||||
let path = Path::new(path);
|
||||
path.is_relative()
|
||||
&& path
|
||||
.components()
|
||||
.all(|comp| matches!(comp, Component::Normal(_)))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -1171,4 +1307,346 @@ mod tests {
|
|||
let result = make_output_by_entry(url, output, entry);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn should_filter_entries() {
|
||||
let test_cases = vec![
|
||||
(
|
||||
Url::parse("http://example.com/root/").unwrap(),
|
||||
vec![
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
],
|
||||
vec!["dir/file.txt".to_string()],
|
||||
vec![
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
],
|
||||
),
|
||||
(
|
||||
Url::parse("http://example.com/root/").unwrap(),
|
||||
vec![
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
],
|
||||
vec![
|
||||
"dir/file.txt".to_string(),
|
||||
"dir/subdir/file4.png".to_string(),
|
||||
],
|
||||
vec![
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
],
|
||||
),
|
||||
(
|
||||
Url::parse("http://example.com/root/").unwrap(),
|
||||
vec![
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
],
|
||||
vec!["dir/subdir/*.png".to_string()],
|
||||
vec![
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
],
|
||||
),
|
||||
(
|
||||
Url::parse("http://example.com/root/").unwrap(),
|
||||
vec![
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
],
|
||||
vec!["dir/*".to_string()],
|
||||
vec![
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
],
|
||||
),
|
||||
(
|
||||
Url::parse("http://example.com/root/").unwrap(),
|
||||
vec![
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
],
|
||||
vec!["dir/".to_string()],
|
||||
vec![DirEntry {
|
||||
url: "http://example.com/root/dir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
}],
|
||||
),
|
||||
(
|
||||
Url::parse("http://example.com/root/").unwrap(),
|
||||
vec![
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||
content_length: 10,
|
||||
is_dir: true,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
],
|
||||
vec!["test".to_string()],
|
||||
vec![],
|
||||
),
|
||||
(
|
||||
Url::parse("http://example.com/root/").unwrap(),
|
||||
vec![
|
||||
DirEntry {
|
||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
DirEntry {
|
||||
url: " ".to_string(),
|
||||
content_length: 100,
|
||||
is_dir: false,
|
||||
},
|
||||
],
|
||||
vec!["dir/file.txt".to_string()],
|
||||
vec![],
|
||||
),
|
||||
];
|
||||
|
||||
for (url, entries, include_files, expected_entries) in test_cases {
|
||||
let result = filter_entries(&url, entries, &include_files);
|
||||
if result.is_err() {
|
||||
assert!(matches!(result, Err(Error::ValidationError(_))));
|
||||
} else {
|
||||
let filtered_entries = result.unwrap();
|
||||
assert_eq!(filtered_entries.len(), expected_entries.len());
|
||||
|
||||
for filtered_entry in &filtered_entries {
|
||||
assert!(expected_entries
|
||||
.iter()
|
||||
.any(|expected_entry| { expected_entry.url == filtered_entry.url }));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,131 +0,0 @@
|
|||
/*
|
||||
* Copyright 2023 The Dragonfly Authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
use dragonfly_client::tracing::init_tracing;
|
||||
use dragonfly_client_config::VersionValueParser;
|
||||
use dragonfly_client_config::{dfdaemon, dfstore};
|
||||
use std::path::PathBuf;
|
||||
use tracing::Level;
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(
|
||||
name = dfstore::NAME,
|
||||
author,
|
||||
version,
|
||||
about = "dfstore is a storage command line based on P2P technology in Dragonfly.",
|
||||
long_about = "A storage command line based on P2P technology in Dragonfly that can rely on different types of object storage, \
|
||||
such as S3 or OSS, to provide stable object storage capabilities. It uses the entire P2P network as a cache when storing objects. \
|
||||
Rely on S3 or OSS as the backend to ensure storage reliability. In the process of object storage, \
|
||||
P2P cache is effectively used for fast read and write storage.",
|
||||
disable_version_flag = true
|
||||
)]
|
||||
struct Args {
|
||||
#[arg(
|
||||
short = 'e',
|
||||
long = "endpoint",
|
||||
default_value_os_t = dfdaemon::default_download_unix_socket_path(),
|
||||
help = "Endpoint of dfdaemon's GRPC server"
|
||||
)]
|
||||
endpoint: PathBuf,
|
||||
|
||||
#[arg(
|
||||
short = 'l',
|
||||
long,
|
||||
default_value = "info",
|
||||
help = "Specify the logging level [trace, debug, info, warn, error]"
|
||||
)]
|
||||
log_level: Level,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
default_value_os_t = dfstore::default_dfstore_log_dir(),
|
||||
help = "Specify the log directory"
|
||||
)]
|
||||
log_dir: PathBuf,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
default_value_t = 6,
|
||||
help = "Specify the max number of log files"
|
||||
)]
|
||||
log_max_files: usize,
|
||||
|
||||
#[arg(
|
||||
long = "verbose",
|
||||
default_value_t = true,
|
||||
help = "Specify whether to print log"
|
||||
)]
|
||||
verbose: bool,
|
||||
|
||||
#[arg(
|
||||
short = 'V',
|
||||
long = "version",
|
||||
help = "Print version information",
|
||||
default_value_t = false,
|
||||
action = clap::ArgAction::SetTrue,
|
||||
value_parser = VersionValueParser
|
||||
)]
|
||||
version: bool,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Command,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Subcommand)]
|
||||
#[command()]
|
||||
pub enum Command {
|
||||
#[command(
|
||||
name = "cp",
|
||||
author,
|
||||
version,
|
||||
about = "Download or upload files using object storage in Dragonfly",
|
||||
long_about = "Download a file from object storage in Dragonfly or upload a local file to object storage in Dragonfly"
|
||||
)]
|
||||
Copy(CopyCommand),
|
||||
|
||||
#[command(
|
||||
name = "rm",
|
||||
author,
|
||||
version,
|
||||
about = "Remove a file from Dragonfly object storage",
|
||||
long_about = "Remove the P2P cache in Dragonfly and remove the file stored in the object storage."
|
||||
)]
|
||||
Remove(RemoveCommand),
|
||||
}
|
||||
|
||||
/// Download or upload files using object storage in Dragonfly.
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
pub struct CopyCommand {}
|
||||
|
||||
/// Remove a file from Dragonfly object storage.
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
pub struct RemoveCommand {}
|
||||
|
||||
fn main() {
|
||||
// Parse command line arguments.
|
||||
let args = Args::parse();
|
||||
|
||||
// Initialize tracing.
|
||||
let _guards = init_tracing(
|
||||
dfstore::NAME,
|
||||
args.log_dir,
|
||||
args.log_level,
|
||||
args.log_max_files,
|
||||
None,
|
||||
args.verbose,
|
||||
);
|
||||
}
|
|
@ -25,7 +25,7 @@ use dragonfly_client_core::{Error, Result};
|
|||
use std::sync::Arc;
|
||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||
use tonic_health::pb::health_check_response::ServingStatus;
|
||||
use tracing::{error, info, instrument};
|
||||
use tracing::{debug, error, info, instrument};
|
||||
use url::Url;
|
||||
|
||||
/// Data is the dynamic configuration of the dfdaemon.
|
||||
|
@ -65,7 +65,6 @@ pub struct Dynconfig {
|
|||
/// Dynconfig is the implementation of Dynconfig.
|
||||
impl Dynconfig {
|
||||
/// new creates a new Dynconfig.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(
|
||||
config: Arc<Config>,
|
||||
manager_client: Arc<ManagerClient>,
|
||||
|
@ -88,7 +87,6 @@ impl Dynconfig {
|
|||
}
|
||||
|
||||
/// run starts the dynconfig server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
let mut shutdown = self.shutdown.clone();
|
||||
|
@ -98,9 +96,10 @@ impl Dynconfig {
|
|||
loop {
|
||||
tokio::select! {
|
||||
_ = interval.tick() => {
|
||||
if let Err(err) = self.refresh().await {
|
||||
error!("refresh dynconfig failed: {}", err);
|
||||
};
|
||||
match self.refresh().await {
|
||||
Err(err) => error!("refresh dynconfig failed: {}", err),
|
||||
Ok(_) => debug!("refresh dynconfig success"),
|
||||
}
|
||||
}
|
||||
_ = shutdown.recv() => {
|
||||
// Dynconfig server shutting down with signals.
|
||||
|
@ -163,6 +162,7 @@ impl Dynconfig {
|
|||
location: self.config.host.location.clone(),
|
||||
version: CARGO_PKG_VERSION.to_string(),
|
||||
commit: GIT_COMMIT_SHORT_HASH.to_string(),
|
||||
scheduler_cluster_id: self.config.host.scheduler_cluster_id.unwrap_or(0),
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
|
|
@ -53,7 +53,6 @@ pub struct GC {
|
|||
|
||||
impl GC {
|
||||
/// new creates a new GC.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
host_id: String,
|
||||
|
@ -73,7 +72,6 @@ impl GC {
|
|||
}
|
||||
|
||||
/// run runs the garbage collector.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
let mut shutdown = self.shutdown.clone();
|
||||
|
@ -127,6 +125,7 @@ impl GC {
|
|||
}
|
||||
}
|
||||
|
||||
info!("evict by task ttl done");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -153,6 +152,8 @@ impl GC {
|
|||
if let Err(err) = self.evict_task_space(need_evict_space as u64).await {
|
||||
info!("failed to evict task by disk usage: {}", err);
|
||||
}
|
||||
|
||||
info!("evict task by disk usage done");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -241,6 +242,7 @@ impl GC {
|
|||
}
|
||||
}
|
||||
|
||||
info!("evict by persistent cache task ttl done");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -270,6 +272,8 @@ impl GC {
|
|||
{
|
||||
info!("failed to evict task by disk usage: {}", err);
|
||||
}
|
||||
|
||||
info!("evict persistent cache task by disk usage done");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
|
|
@ -18,31 +18,40 @@ use crate::metrics::{
|
|||
collect_delete_host_failure_metrics, collect_delete_host_started_metrics,
|
||||
collect_delete_task_failure_metrics, collect_delete_task_started_metrics,
|
||||
collect_download_task_failure_metrics, collect_download_task_finished_metrics,
|
||||
collect_download_task_started_metrics, collect_stat_task_failure_metrics,
|
||||
collect_download_task_started_metrics, collect_list_task_entries_failure_metrics,
|
||||
collect_list_task_entries_started_metrics, collect_stat_task_failure_metrics,
|
||||
collect_stat_task_started_metrics, collect_upload_task_failure_metrics,
|
||||
collect_upload_task_finished_metrics, collect_upload_task_started_metrics,
|
||||
};
|
||||
use crate::resource::{persistent_cache_task, task};
|
||||
use crate::shutdown;
|
||||
use dragonfly_api::common::v2::{PersistentCacheTask, Priority, Task, TaskType};
|
||||
use dragonfly_api::common::v2::{CacheTask, PersistentCacheTask, Priority, Task, TaskType};
|
||||
use dragonfly_api::dfdaemon::v2::{
|
||||
dfdaemon_download_client::DfdaemonDownloadClient as DfdaemonDownloadGRPCClient,
|
||||
dfdaemon_download_server::{
|
||||
DfdaemonDownload, DfdaemonDownloadServer as DfdaemonDownloadGRPCServer,
|
||||
},
|
||||
DeleteTaskRequest, DownloadPersistentCacheTaskRequest, DownloadPersistentCacheTaskResponse,
|
||||
DownloadTaskRequest, DownloadTaskResponse, StatPersistentCacheTaskRequest,
|
||||
DeleteCacheTaskRequest, DeleteTaskRequest, DownloadCacheTaskRequest, DownloadCacheTaskResponse,
|
||||
DownloadPersistentCacheTaskRequest, DownloadPersistentCacheTaskResponse, DownloadTaskRequest,
|
||||
DownloadTaskResponse, Entry, ListTaskEntriesRequest, ListTaskEntriesResponse,
|
||||
StatCacheTaskRequest as DfdaemonStatCacheTaskRequest, StatPersistentCacheTaskRequest,
|
||||
StatTaskRequest as DfdaemonStatTaskRequest, UploadPersistentCacheTaskRequest,
|
||||
};
|
||||
use dragonfly_api::errordetails::v2::Backend;
|
||||
use dragonfly_api::scheduler::v2::DeleteHostRequest as SchedulerDeleteHostRequest;
|
||||
use dragonfly_client_backend::HeadRequest;
|
||||
use dragonfly_client_config::dfdaemon::Config;
|
||||
use dragonfly_client_core::{
|
||||
error::{ErrorType, OrErr},
|
||||
Error as ClientError, Result as ClientResult,
|
||||
};
|
||||
use dragonfly_client_util::http::{get_range, hashmap_to_headermap, headermap_to_hashmap};
|
||||
use dragonfly_client_util::{
|
||||
digest::{verify_file_digest, Digest},
|
||||
http::{get_range, hashmap_to_headermap, headermap_to_hashmap},
|
||||
id_generator::{PersistentCacheTaskIDParameter, TaskIDParameter},
|
||||
};
|
||||
use hyper_util::rt::TokioIo;
|
||||
use opentelemetry::Context;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
@ -60,8 +69,9 @@ use tonic::{
|
|||
};
|
||||
use tower::{service_fn, ServiceBuilder};
|
||||
use tracing::{error, info, instrument, Instrument, Span};
|
||||
use tracing_opentelemetry::OpenTelemetrySpanExt;
|
||||
|
||||
use super::interceptor::TracingInterceptor;
|
||||
use super::interceptor::{ExtractTracingInterceptor, InjectTracingInterceptor};
|
||||
|
||||
/// DfdaemonDownloadServer is the grpc unix server of the download.
|
||||
pub struct DfdaemonDownloadServer {
|
||||
|
@ -71,8 +81,11 @@ pub struct DfdaemonDownloadServer {
|
|||
/// socket_path is the path of the unix domain socket.
|
||||
socket_path: PathBuf,
|
||||
|
||||
/// service is the grpc service of the dfdaemon.
|
||||
service: DfdaemonDownloadGRPCServer<DfdaemonDownloadServerHandler>,
|
||||
/// task is the task manager.
|
||||
task: Arc<task::Task>,
|
||||
|
||||
/// persistent_cache_task is the persistent cache task manager.
|
||||
persistent_cache_task: Arc<persistent_cache_task::PersistentCacheTask>,
|
||||
|
||||
/// shutdown is used to shutdown the grpc server.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
@ -84,7 +97,6 @@ pub struct DfdaemonDownloadServer {
|
|||
/// DfdaemonDownloadServer implements the grpc server of the download.
|
||||
impl DfdaemonDownloadServer {
|
||||
/// new creates a new DfdaemonServer.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
socket_path: PathBuf,
|
||||
|
@ -93,27 +105,29 @@ impl DfdaemonDownloadServer {
|
|||
shutdown: shutdown::Shutdown,
|
||||
shutdown_complete_tx: mpsc::UnboundedSender<()>,
|
||||
) -> Self {
|
||||
// Initialize the grpc service.
|
||||
let service = DfdaemonDownloadGRPCServer::new(DfdaemonDownloadServerHandler {
|
||||
socket_path: socket_path.clone(),
|
||||
task,
|
||||
persistent_cache_task,
|
||||
})
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
|
||||
Self {
|
||||
config,
|
||||
socket_path,
|
||||
service,
|
||||
task,
|
||||
persistent_cache_task,
|
||||
shutdown,
|
||||
_shutdown_complete: shutdown_complete_tx,
|
||||
}
|
||||
}
|
||||
|
||||
/// run starts the download server with unix domain socket.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&mut self, grpc_server_started_barrier: Arc<Barrier>) -> ClientResult<()> {
|
||||
// Initialize the grpc service.
|
||||
let service = DfdaemonDownloadGRPCServer::with_interceptor(
|
||||
DfdaemonDownloadServerHandler {
|
||||
config: self.config.clone(),
|
||||
socket_path: self.socket_path.clone(),
|
||||
task: self.task.clone(),
|
||||
persistent_cache_task: self.persistent_cache_task.clone(),
|
||||
},
|
||||
ExtractTracingInterceptor,
|
||||
);
|
||||
|
||||
// Register the reflection service.
|
||||
let reflection = tonic_reflection::server::Builder::configure()
|
||||
.register_encoded_file_descriptor_set(dragonfly_api::FILE_DESCRIPTOR_SET)
|
||||
|
@ -125,11 +139,6 @@ impl DfdaemonDownloadServer {
|
|||
// Initialize health reporter.
|
||||
let (mut health_reporter, health_service) = tonic_health::server::health_reporter();
|
||||
|
||||
// Set the serving status of the download grpc server.
|
||||
health_reporter
|
||||
.set_serving::<DfdaemonDownloadGRPCServer<DfdaemonDownloadServerHandler>>()
|
||||
.await;
|
||||
|
||||
// Start download grpc server with unix domain socket.
|
||||
fs::create_dir_all(self.socket_path.parent().unwrap()).await?;
|
||||
fs::remove_file(self.socket_path.clone())
|
||||
|
@ -140,12 +149,12 @@ impl DfdaemonDownloadServer {
|
|||
|
||||
// Bind the unix domain socket and set the permissions for the socket.
|
||||
let uds = UnixListener::bind(&self.socket_path)?;
|
||||
let perms = std::fs::Permissions::from_mode(0o660);
|
||||
let perms = std::fs::Permissions::from_mode(0o777);
|
||||
fs::set_permissions(&self.socket_path, perms).await?;
|
||||
|
||||
// TODO(Gaius): RateLimitLayer is not implemented Clone, so we can't use it here.
|
||||
// Only use the LoadShed layer and the ConcurrencyLimit layer.
|
||||
let layer = ServiceBuilder::new()
|
||||
let rate_limit_layer = ServiceBuilder::new()
|
||||
.concurrency_limit(self.config.download.server.request_rate_limit as usize)
|
||||
.load_shed()
|
||||
.into_inner();
|
||||
|
@ -156,10 +165,10 @@ impl DfdaemonDownloadServer {
|
|||
.tcp_keepalive(Some(super::TCP_KEEPALIVE))
|
||||
.http2_keepalive_interval(Some(super::HTTP2_KEEP_ALIVE_INTERVAL))
|
||||
.http2_keepalive_timeout(Some(super::HTTP2_KEEP_ALIVE_TIMEOUT))
|
||||
.layer(layer)
|
||||
.add_service(reflection.clone())
|
||||
.layer(rate_limit_layer)
|
||||
.add_service(reflection)
|
||||
.add_service(health_service)
|
||||
.add_service(self.service.clone())
|
||||
.add_service(service)
|
||||
.serve_with_incoming_shutdown(uds_stream, async move {
|
||||
// When the grpc server is started, notify the barrier. If the shutdown signal is received
|
||||
// before barrier is waited successfully, the server will shutdown immediately.
|
||||
|
@ -167,6 +176,12 @@ impl DfdaemonDownloadServer {
|
|||
// Notify the download grpc server is started.
|
||||
_ = grpc_server_started_barrier.wait() => {
|
||||
info!("download server is ready to start");
|
||||
|
||||
health_reporter
|
||||
.set_serving::<DfdaemonDownloadGRPCServer<DfdaemonDownloadServerHandler>>()
|
||||
.await;
|
||||
|
||||
info!("download server's health status set to serving");
|
||||
}
|
||||
// Wait for shutdown signal.
|
||||
_ = shutdown.recv() => {
|
||||
|
@ -196,6 +211,9 @@ impl DfdaemonDownloadServer {
|
|||
|
||||
/// DfdaemonDownloadServerHandler is the handler of the dfdaemon download grpc service.
|
||||
pub struct DfdaemonDownloadServerHandler {
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
/// socket_path is the path of the unix domain socket.
|
||||
socket_path: PathBuf,
|
||||
|
||||
|
@ -213,11 +231,19 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
type DownloadTaskStream = ReceiverStream<Result<DownloadTaskResponse, Status>>;
|
||||
|
||||
/// download_task tells the dfdaemon to download the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id))]
|
||||
#[instrument(
|
||||
skip_all,
|
||||
fields(host_id, task_id, peer_id, url, remote_ip, content_length)
|
||||
)]
|
||||
async fn download_task(
|
||||
&self,
|
||||
request: Request<DownloadTaskRequest>,
|
||||
) -> Result<Response<Self::DownloadTaskStream>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Record the start time.
|
||||
let start_time = Instant::now();
|
||||
|
||||
|
@ -234,13 +260,16 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
let task_id = self
|
||||
.task
|
||||
.id_generator
|
||||
.task_id(
|
||||
download.url.as_str(),
|
||||
download.piece_length,
|
||||
download.tag.as_deref(),
|
||||
download.application.as_deref(),
|
||||
download.filtered_query_params.clone(),
|
||||
)
|
||||
.task_id(match download.content_for_calculating_task_id.clone() {
|
||||
Some(content) => TaskIDParameter::Content(content),
|
||||
None => TaskIDParameter::URLBased {
|
||||
url: download.url.clone(),
|
||||
piece_length: download.piece_length,
|
||||
tag: download.tag.clone(),
|
||||
application: download.application.clone(),
|
||||
filtered_query_params: download.filtered_query_params.clone(),
|
||||
},
|
||||
})
|
||||
.map_err(|e| {
|
||||
error!("generate task id: {}", e);
|
||||
Status::invalid_argument(e.to_string())
|
||||
|
@ -256,6 +285,11 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record("peer_id", peer_id.as_str());
|
||||
Span::current().record("url", download.url.clone());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
download.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("download task in download server");
|
||||
|
||||
// Download task started.
|
||||
|
@ -329,12 +363,15 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
error!("missing content length in the response");
|
||||
return Err(Status::internal("missing content length in the response"));
|
||||
};
|
||||
|
||||
info!(
|
||||
"content length {}, piece length {}",
|
||||
content_length,
|
||||
task.piece_length().unwrap_or_default()
|
||||
);
|
||||
|
||||
Span::current().record("content_length", content_length);
|
||||
|
||||
// Download's range priority is higher than the request header's range.
|
||||
// If download protocol is http, use the range of the request header.
|
||||
// If download protocol is not http, use the range of the download.
|
||||
|
@ -467,22 +504,48 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
)),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("check output path: {}", err);
|
||||
handle_error(&out_stream_tx, err).await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if let Err(err) = task_manager_clone
|
||||
} else if let Err(err) = task_manager_clone
|
||||
.copy_task(task_clone.id.as_str(), output_path)
|
||||
.await
|
||||
{
|
||||
error!("copy task: {}", err);
|
||||
handle_error(&out_stream_tx, err).await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the file digest if it is provided.
|
||||
if let Some(raw_digest) = &download_clone.digest {
|
||||
let digest = match raw_digest.parse::<Digest>() {
|
||||
Ok(digest) => digest,
|
||||
Err(err) => {
|
||||
error!("parse digest: {}", err);
|
||||
handle_error(
|
||||
&out_stream_tx,
|
||||
Status::invalid_argument(format!(
|
||||
"invalid digest({}): {}",
|
||||
raw_digest, err
|
||||
)),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) =
|
||||
verify_file_digest(digest, Path::new(output_path.as_str()))
|
||||
{
|
||||
error!("verify file digest: {}", err);
|
||||
handle_error(&out_stream_tx, err).await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -607,11 +670,16 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
}
|
||||
|
||||
/// stat_task gets the status of the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip, local_only))]
|
||||
async fn stat_task(
|
||||
&self,
|
||||
request: Request<DfdaemonStatTaskRequest>,
|
||||
) -> Result<Response<Task>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -621,36 +689,137 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
// Get the task id from the request.
|
||||
let task_id = request.task_id;
|
||||
|
||||
// Get the local_only flag from the request, default to false.
|
||||
let local_only = request.local_only;
|
||||
|
||||
// Span record the host id and task id.
|
||||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
Span::current().record("local_only", local_only.to_string().as_str());
|
||||
info!("stat task in download server");
|
||||
|
||||
// Collect the stat task metrics.
|
||||
collect_stat_task_started_metrics(TaskType::Standard as i32);
|
||||
|
||||
// Get the task from the scheduler.
|
||||
let task = self
|
||||
match self
|
||||
.task
|
||||
.stat(task_id.as_str(), host_id.as_str())
|
||||
.stat(task_id.as_str(), host_id.as_str(), local_only)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
{
|
||||
Ok(task) => Ok(Response::new(task)),
|
||||
Err(err) => {
|
||||
// Collect the stat task failure metrics.
|
||||
collect_stat_task_failure_metrics(TaskType::Standard as i32);
|
||||
|
||||
error!("stat task: {}", err);
|
||||
// Log the error with detailed context.
|
||||
error!("stat task failed: {}", err);
|
||||
|
||||
// Map the error to an appropriate gRPC status.
|
||||
Err(match err {
|
||||
ClientError::TaskNotFound(id) => {
|
||||
Status::not_found(format!("task not found: {}", id))
|
||||
}
|
||||
_ => Status::internal(err.to_string()),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// list_tasks lists the tasks.
|
||||
#[instrument(skip_all, fields(task_id, url, remote_ip))]
|
||||
async fn list_task_entries(
|
||||
&self,
|
||||
request: Request<ListTaskEntriesRequest>,
|
||||
) -> Result<Response<ListTaskEntriesResponse>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
// Span record the task id and url.
|
||||
Span::current().record("task_id", request.task_id.as_str());
|
||||
Span::current().record("url", request.url.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("list tasks in download server");
|
||||
|
||||
// Collect the list tasks started metrics.
|
||||
collect_list_task_entries_started_metrics(TaskType::Standard as i32);
|
||||
|
||||
// Build the backend.
|
||||
let backend = self
|
||||
.task
|
||||
.backend_factory
|
||||
.build(request.url.as_str())
|
||||
.map_err(|err| {
|
||||
// Collect the list tasks failure metrics.
|
||||
collect_list_task_entries_failure_metrics(TaskType::Standard as i32);
|
||||
|
||||
error!("build backend: {}", err);
|
||||
Status::internal(err.to_string())
|
||||
})?;
|
||||
|
||||
Ok(Response::new(task))
|
||||
// Head the task entries.
|
||||
let response = backend
|
||||
.head(HeadRequest {
|
||||
task_id: request.task_id.clone(),
|
||||
url: request.url.clone(),
|
||||
http_header: Some(hashmap_to_headermap(&request.request_header).map_err(
|
||||
|err| {
|
||||
error!("parse request header: {}", err);
|
||||
Status::internal(err.to_string())
|
||||
},
|
||||
)?),
|
||||
timeout: self.config.download.piece_timeout,
|
||||
client_cert: None,
|
||||
object_storage: request.object_storage.clone(),
|
||||
hdfs: request.hdfs.clone(),
|
||||
})
|
||||
.await
|
||||
.map_err(|err| {
|
||||
// Collect the list tasks failure metrics.
|
||||
collect_list_task_entries_failure_metrics(TaskType::Standard as i32);
|
||||
|
||||
error!("list task entries: {}", err);
|
||||
Status::internal(err.to_string())
|
||||
})?;
|
||||
|
||||
Ok(Response::new(ListTaskEntriesResponse {
|
||||
content_length: response.content_length.unwrap_or_default(),
|
||||
response_header: headermap_to_hashmap(&response.http_header.unwrap_or_default()),
|
||||
status_code: response.http_status_code.map(|code| code.as_u16().into()),
|
||||
entries: response
|
||||
.entries
|
||||
.into_iter()
|
||||
.map(|dir_entry| Entry {
|
||||
url: dir_entry.url,
|
||||
content_length: dir_entry.content_length as u64,
|
||||
is_dir: dir_entry.is_dir,
|
||||
})
|
||||
.collect(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// delete_task calls the dfdaemon to delete the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip))]
|
||||
async fn delete_task(
|
||||
&self,
|
||||
request: Request<DeleteTaskRequest>,
|
||||
) -> Result<Response<()>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -663,6 +832,10 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
// Span record the host id and task id.
|
||||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("delete task in download server");
|
||||
|
||||
// Collect the delete task started metrics.
|
||||
|
@ -685,7 +858,12 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
|
||||
/// delete_host calls the scheduler to delete the host.
|
||||
#[instrument(skip_all, fields(host_id))]
|
||||
async fn delete_host(&self, _: Request<()>) -> Result<Response<()>, Status> {
|
||||
async fn delete_host(&self, request: Request<()>) -> Result<Response<()>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Generate the host id.
|
||||
let host_id = self.task.id_generator.host_id();
|
||||
|
||||
|
@ -716,11 +894,16 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
ReceiverStream<Result<DownloadPersistentCacheTaskResponse, Status>>;
|
||||
|
||||
/// download_persistent_cache_task downloads the persistent cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id))]
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id, remote_ip, content_length))]
|
||||
async fn download_persistent_cache_task(
|
||||
&self,
|
||||
request: Request<DownloadPersistentCacheTaskRequest>,
|
||||
) -> Result<Response<Self::DownloadPersistentCacheTaskStream>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Record the start time.
|
||||
let start_time = Instant::now();
|
||||
|
||||
|
@ -744,6 +927,10 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record("peer_id", peer_id.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("download persistent cache task in download server");
|
||||
|
||||
// Download task started.
|
||||
|
@ -797,12 +984,15 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
task
|
||||
}
|
||||
};
|
||||
|
||||
info!(
|
||||
"content length {}, piece length {}",
|
||||
task.content_length(),
|
||||
task.piece_length()
|
||||
);
|
||||
|
||||
Span::current().record("content_length", task.content_length());
|
||||
|
||||
// Initialize stream channel.
|
||||
let request_clone = request.clone();
|
||||
let task_manager_clone = self.persistent_cache_task.clone();
|
||||
|
@ -884,22 +1074,48 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
)),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
Err(err) => {
|
||||
error!("check output path: {}", err);
|
||||
handle_error(&out_stream_tx, err).await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if let Err(err) = task_manager_clone
|
||||
} else if let Err(err) = task_manager_clone
|
||||
.copy_task(task_clone.id.as_str(), output_path)
|
||||
.await
|
||||
{
|
||||
error!("copy task: {}", err);
|
||||
handle_error(&out_stream_tx, err).await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the file digest if it is provided.
|
||||
if let Some(raw_digest) = &request_clone.digest {
|
||||
let digest = match raw_digest.parse::<Digest>() {
|
||||
Ok(digest) => digest,
|
||||
Err(err) => {
|
||||
error!("parse digest: {}", err);
|
||||
handle_error(
|
||||
&out_stream_tx,
|
||||
Status::invalid_argument(format!(
|
||||
"invalid digest({}): {}",
|
||||
raw_digest, err
|
||||
)),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) =
|
||||
verify_file_digest(digest, Path::new(output_path.as_str()))
|
||||
{
|
||||
error!("verify file digest: {}", err);
|
||||
handle_error(&out_stream_tx, err).await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -936,11 +1152,16 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
}
|
||||
|
||||
/// upload_persistent_cache_task uploads the persistent cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id))]
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id, remote_ip))]
|
||||
async fn upload_persistent_cache_task(
|
||||
&self,
|
||||
request: Request<UploadPersistentCacheTaskRequest>,
|
||||
) -> Result<Response<PersistentCacheTask>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Record the start time.
|
||||
let start_time = Instant::now();
|
||||
|
||||
|
@ -950,22 +1171,22 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
info!("upload persistent cache task {:?}", request);
|
||||
|
||||
// Generate the task id.
|
||||
let task_id = match request.task_id.as_deref() {
|
||||
Some(task_id) => task_id.to_string(),
|
||||
None => self
|
||||
.task
|
||||
.id_generator
|
||||
.persistent_cache_task_id(
|
||||
&path.to_path_buf(),
|
||||
request.piece_length,
|
||||
request.tag.as_deref(),
|
||||
request.application.as_deref(),
|
||||
)
|
||||
.map_err(|err| {
|
||||
error!("generate persistent cache task id: {}", err);
|
||||
Status::invalid_argument(err.to_string())
|
||||
})?,
|
||||
};
|
||||
let task_id = self
|
||||
.task
|
||||
.id_generator
|
||||
.persistent_cache_task_id(match request.content_for_calculating_task_id.clone() {
|
||||
Some(content) => PersistentCacheTaskIDParameter::Content(content),
|
||||
None => PersistentCacheTaskIDParameter::FileContentBased {
|
||||
path: path.to_path_buf(),
|
||||
piece_length: request.piece_length,
|
||||
tag: request.tag.clone(),
|
||||
application: request.application.clone(),
|
||||
},
|
||||
})
|
||||
.map_err(|err| {
|
||||
error!("generate persistent cache task id: {}", err);
|
||||
Status::invalid_argument(err.to_string())
|
||||
})?;
|
||||
info!("generate persistent cache task id: {}", task_id);
|
||||
|
||||
// Generate the host id.
|
||||
|
@ -978,6 +1199,10 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record("peer_id", peer_id.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("upload persistent cache task in download server");
|
||||
|
||||
// Collect upload task started metrics.
|
||||
|
@ -1028,11 +1253,16 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
}
|
||||
|
||||
/// stat_persistent_cache_task stats the persistent cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip))]
|
||||
async fn stat_persistent_cache_task(
|
||||
&self,
|
||||
request: Request<StatPersistentCacheTaskRequest>,
|
||||
) -> Result<Response<PersistentCacheTask>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -1045,6 +1275,10 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
// Span record the host id and task id.
|
||||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("stat persistent cache task in download server");
|
||||
|
||||
// Collect the stat persistent cache task started metrics.
|
||||
|
@ -1064,19 +1298,51 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
|
||||
Ok(Response::new(task))
|
||||
}
|
||||
|
||||
/// DownloadCacheTaskStream is the stream of the download cache task response.
|
||||
type DownloadCacheTaskStream = ReceiverStream<Result<DownloadCacheTaskResponse, Status>>;
|
||||
|
||||
/// download_cache_task tells the dfdaemon to download the cache task.
|
||||
#[instrument(
|
||||
skip_all,
|
||||
fields(host_id, task_id, peer_id, url, remote_ip, content_length)
|
||||
)]
|
||||
async fn download_cache_task(
|
||||
&self,
|
||||
_request: Request<DownloadCacheTaskRequest>,
|
||||
) -> Result<Response<Self::DownloadCacheTaskStream>, Status> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
/// stat_cache_task gets the status of the cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_pi, local_only))]
|
||||
async fn stat_cache_task(
|
||||
&self,
|
||||
_request: Request<DfdaemonStatCacheTaskRequest>,
|
||||
) -> Result<Response<CacheTask>, Status> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
/// delete_cache_task calls the dfdaemon to delete the cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip))]
|
||||
async fn delete_cache_task(
|
||||
&self,
|
||||
_request: Request<DeleteCacheTaskRequest>,
|
||||
) -> Result<Response<()>, Status> {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
|
||||
/// DfdaemonDownloadClient is a wrapper of DfdaemonDownloadGRPCClient.
|
||||
#[derive(Clone)]
|
||||
pub struct DfdaemonDownloadClient {
|
||||
/// client is the grpc client of the dfdaemon.
|
||||
pub client: DfdaemonDownloadGRPCClient<InterceptedService<Channel, TracingInterceptor>>,
|
||||
pub client: DfdaemonDownloadGRPCClient<InterceptedService<Channel, InjectTracingInterceptor>>,
|
||||
}
|
||||
|
||||
/// DfdaemonDownloadClient implements the grpc client of the dfdaemon download.
|
||||
impl DfdaemonDownloadClient {
|
||||
/// new_unix creates a new DfdaemonDownloadClient with unix domain socket.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new_unix(socket_path: PathBuf) -> ClientResult<Self> {
|
||||
// Ignore the uri because it is not used.
|
||||
let channel = Endpoint::try_from("http://[::]:50051")
|
||||
|
@ -1101,9 +1367,10 @@ impl DfdaemonDownloadClient {
|
|||
})
|
||||
.or_err(ErrorType::ConnectError)?;
|
||||
|
||||
let client = DfdaemonDownloadGRPCClient::with_interceptor(channel, TracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
let client =
|
||||
DfdaemonDownloadGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
Ok(Self { client })
|
||||
}
|
||||
|
||||
|
@ -1141,6 +1408,18 @@ impl DfdaemonDownloadClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
/// list_task_entries lists the task entries.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn list_task_entries(
|
||||
&self,
|
||||
request: ListTaskEntriesRequest,
|
||||
) -> ClientResult<ListTaskEntriesResponse> {
|
||||
let request = Self::make_request(request);
|
||||
info!("list task entries request: {:?}", request);
|
||||
let response = self.client.clone().list_task_entries(request).await?;
|
||||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
/// delete_task tells the dfdaemon to delete the task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete_task(&self, request: DeleteTaskRequest) -> ClientResult<()> {
|
||||
|
@ -1222,7 +1501,6 @@ impl DfdaemonDownloadClient {
|
|||
}
|
||||
|
||||
/// make_request creates a new request with timeout.
|
||||
#[instrument(skip_all)]
|
||||
fn make_request<T>(request: T) -> tonic::Request<T> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
request.set_timeout(super::REQUEST_TIMEOUT);
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
use super::interceptor::TracingInterceptor;
|
||||
use crate::metrics::{
|
||||
collect_delete_task_failure_metrics, collect_delete_task_started_metrics,
|
||||
collect_download_task_failure_metrics, collect_download_task_finished_metrics,
|
||||
|
@ -25,20 +24,22 @@ use crate::metrics::{
|
|||
};
|
||||
use crate::resource::{persistent_cache_task, task};
|
||||
use crate::shutdown;
|
||||
use bytesize::MB;
|
||||
use dragonfly_api::common::v2::{
|
||||
Host, Network, PersistentCacheTask, Piece, Priority, Task, TaskType,
|
||||
CacheTask, Host, Network, PersistentCacheTask, Piece, Priority, Task, TaskType,
|
||||
};
|
||||
use dragonfly_api::dfdaemon::v2::{
|
||||
dfdaemon_upload_client::DfdaemonUploadClient as DfdaemonUploadGRPCClient,
|
||||
dfdaemon_upload_server::{DfdaemonUpload, DfdaemonUploadServer as DfdaemonUploadGRPCServer},
|
||||
DeletePersistentCacheTaskRequest, DeleteTaskRequest, DownloadPersistentCachePieceRequest,
|
||||
DeleteCacheTaskRequest, DeletePersistentCacheTaskRequest, DeleteTaskRequest,
|
||||
DownloadCachePieceRequest, DownloadCachePieceResponse, DownloadCacheTaskRequest,
|
||||
DownloadCacheTaskResponse, DownloadPersistentCachePieceRequest,
|
||||
DownloadPersistentCachePieceResponse, DownloadPersistentCacheTaskRequest,
|
||||
DownloadPersistentCacheTaskResponse, DownloadPieceRequest, DownloadPieceResponse,
|
||||
DownloadTaskRequest, DownloadTaskResponse, ExchangeIbVerbsQueuePairEndpointRequest,
|
||||
ExchangeIbVerbsQueuePairEndpointResponse, StatPersistentCacheTaskRequest, StatTaskRequest,
|
||||
SyncHostRequest, SyncPersistentCachePiecesRequest, SyncPersistentCachePiecesResponse,
|
||||
SyncPiecesRequest, SyncPiecesResponse, UpdatePersistentCacheTaskRequest,
|
||||
ExchangeIbVerbsQueuePairEndpointResponse, StatCacheTaskRequest, StatPersistentCacheTaskRequest,
|
||||
StatTaskRequest, SyncCachePiecesRequest, SyncCachePiecesResponse, SyncHostRequest,
|
||||
SyncPersistentCachePiecesRequest, SyncPersistentCachePiecesResponse, SyncPiecesRequest,
|
||||
SyncPiecesResponse, UpdatePersistentCacheTaskRequest,
|
||||
};
|
||||
use dragonfly_api::errordetails::v2::Backend;
|
||||
use dragonfly_client_config::dfdaemon::Config;
|
||||
|
@ -48,13 +49,14 @@ use dragonfly_client_core::{
|
|||
};
|
||||
use dragonfly_client_util::{
|
||||
http::{get_range, hashmap_to_headermap, headermap_to_hashmap},
|
||||
net::{get_interface_info, Interface},
|
||||
id_generator::TaskIDParameter,
|
||||
net::Interface,
|
||||
};
|
||||
use opentelemetry::Context;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use sysinfo::Networks;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
|
@ -67,8 +69,11 @@ use tonic::{
|
|||
};
|
||||
use tower::ServiceBuilder;
|
||||
use tracing::{debug, error, info, instrument, Instrument, Span};
|
||||
use tracing_opentelemetry::OpenTelemetrySpanExt;
|
||||
use url::Url;
|
||||
|
||||
use super::interceptor::{ExtractTracingInterceptor, InjectTracingInterceptor};
|
||||
|
||||
/// DfdaemonUploadServer is the grpc server of the upload.
|
||||
pub struct DfdaemonUploadServer {
|
||||
/// config is the configuration of the dfdaemon.
|
||||
|
@ -77,8 +82,14 @@ pub struct DfdaemonUploadServer {
|
|||
/// addr is the address of the grpc server.
|
||||
addr: SocketAddr,
|
||||
|
||||
/// service is the grpc service of the dfdaemon upload.
|
||||
service: DfdaemonUploadGRPCServer<DfdaemonUploadServerHandler>,
|
||||
/// task is the task manager.
|
||||
task: Arc<task::Task>,
|
||||
|
||||
/// persistent_cache_task is the persistent cache task manager.
|
||||
persistent_cache_task: Arc<persistent_cache_task::PersistentCacheTask>,
|
||||
|
||||
/// interface is the network interface.
|
||||
interface: Arc<Interface>,
|
||||
|
||||
/// shutdown is used to shutdown the grpc server.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
@ -90,40 +101,38 @@ pub struct DfdaemonUploadServer {
|
|||
/// DfdaemonUploadServer implements the grpc server of the upload.
|
||||
impl DfdaemonUploadServer {
|
||||
/// new creates a new DfdaemonUploadServer.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
addr: SocketAddr,
|
||||
task: Arc<task::Task>,
|
||||
persistent_cache_task: Arc<persistent_cache_task::PersistentCacheTask>,
|
||||
interface: Arc<Interface>,
|
||||
shutdown: shutdown::Shutdown,
|
||||
shutdown_complete_tx: mpsc::UnboundedSender<()>,
|
||||
) -> Self {
|
||||
// Initialize the grpc service.
|
||||
let interface =
|
||||
get_interface_info(config.host.ip.unwrap(), config.upload.rate_limit).unwrap();
|
||||
|
||||
let service = DfdaemonUploadGRPCServer::new(DfdaemonUploadServerHandler {
|
||||
interface,
|
||||
socket_path: config.download.server.socket_path.clone(),
|
||||
task,
|
||||
persistent_cache_task,
|
||||
})
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
|
||||
Self {
|
||||
config,
|
||||
addr,
|
||||
service,
|
||||
task,
|
||||
interface,
|
||||
persistent_cache_task,
|
||||
shutdown,
|
||||
_shutdown_complete: shutdown_complete_tx,
|
||||
}
|
||||
}
|
||||
|
||||
/// run starts the upload server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&mut self, grpc_server_started_barrier: Arc<Barrier>) -> ClientResult<()> {
|
||||
let service = DfdaemonUploadGRPCServer::with_interceptor(
|
||||
DfdaemonUploadServerHandler {
|
||||
socket_path: self.config.download.server.socket_path.clone(),
|
||||
task: self.task.clone(),
|
||||
persistent_cache_task: self.persistent_cache_task.clone(),
|
||||
interface: self.interface.clone(),
|
||||
},
|
||||
ExtractTracingInterceptor,
|
||||
);
|
||||
|
||||
// Register the reflection service.
|
||||
let reflection = tonic_reflection::server::Builder::configure()
|
||||
.register_encoded_file_descriptor_set(dragonfly_api::FILE_DESCRIPTOR_SET)
|
||||
|
@ -135,14 +144,9 @@ impl DfdaemonUploadServer {
|
|||
// Initialize health reporter.
|
||||
let (mut health_reporter, health_service) = tonic_health::server::health_reporter();
|
||||
|
||||
// Set the serving status of the upload grpc server.
|
||||
health_reporter
|
||||
.set_serving::<DfdaemonUploadGRPCServer<DfdaemonUploadServerHandler>>()
|
||||
.await;
|
||||
|
||||
// TODO(Gaius): RateLimitLayer is not implemented Clone, so we can't use it here.
|
||||
// Only use the LoadShed layer and the ConcurrencyLimit layer.
|
||||
let layer = ServiceBuilder::new()
|
||||
let rate_limit_layer = ServiceBuilder::new()
|
||||
.concurrency_limit(self.config.upload.server.request_rate_limit as usize)
|
||||
.load_shed()
|
||||
.into_inner();
|
||||
|
@ -162,17 +166,23 @@ impl DfdaemonUploadServer {
|
|||
.tcp_keepalive(Some(super::TCP_KEEPALIVE))
|
||||
.http2_keepalive_interval(Some(super::HTTP2_KEEP_ALIVE_INTERVAL))
|
||||
.http2_keepalive_timeout(Some(super::HTTP2_KEEP_ALIVE_TIMEOUT))
|
||||
.layer(layer)
|
||||
.add_service(reflection.clone())
|
||||
.layer(rate_limit_layer)
|
||||
.add_service(reflection)
|
||||
.add_service(health_service)
|
||||
.add_service(self.service.clone())
|
||||
.add_service(service)
|
||||
.serve_with_shutdown(self.addr, async move {
|
||||
// When the grpc server is started, notify the barrier. If the shutdown signal is received
|
||||
// before barrier is waited successfully, the server will shutdown immediately.
|
||||
tokio::select! {
|
||||
// Notify the upload grpc server is started.
|
||||
_ = grpc_server_started_barrier.wait() => {
|
||||
info!("upload server is ready");
|
||||
info!("upload server is ready to start");
|
||||
|
||||
health_reporter
|
||||
.set_serving::<DfdaemonUploadGRPCServer<DfdaemonUploadServerHandler>>()
|
||||
.await;
|
||||
|
||||
info!("upload server's health status set to serving");
|
||||
}
|
||||
// Wait for shutdown signal.
|
||||
_ = shutdown.recv() => {
|
||||
|
@ -194,9 +204,6 @@ impl DfdaemonUploadServer {
|
|||
|
||||
/// DfdaemonUploadServerHandler is the handler of the dfdaemon upload grpc service.
|
||||
pub struct DfdaemonUploadServerHandler {
|
||||
/// interface is the network interface.
|
||||
interface: Interface,
|
||||
|
||||
/// socket_path is the path of the unix domain socket.
|
||||
socket_path: PathBuf,
|
||||
|
||||
|
@ -205,6 +212,9 @@ pub struct DfdaemonUploadServerHandler {
|
|||
|
||||
/// persistent_cache_task is the persistent cache task manager.
|
||||
persistent_cache_task: Arc<persistent_cache_task::PersistentCacheTask>,
|
||||
|
||||
/// interface is the network interface.
|
||||
interface: Arc<Interface>,
|
||||
}
|
||||
|
||||
/// DfdaemonUploadServerHandler implements the dfdaemon upload grpc service.
|
||||
|
@ -214,11 +224,19 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
type DownloadTaskStream = ReceiverStream<Result<DownloadTaskResponse, Status>>;
|
||||
|
||||
/// download_task downloads the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id))]
|
||||
#[instrument(
|
||||
skip_all,
|
||||
fields(host_id, task_id, peer_id, url, remote_ip, content_length)
|
||||
)]
|
||||
async fn download_task(
|
||||
&self,
|
||||
request: Request<DownloadTaskRequest>,
|
||||
) -> Result<Response<Self::DownloadTaskStream>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Record the start time.
|
||||
let start_time = Instant::now();
|
||||
|
||||
|
@ -235,13 +253,16 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
let task_id = self
|
||||
.task
|
||||
.id_generator
|
||||
.task_id(
|
||||
download.url.as_str(),
|
||||
download.piece_length,
|
||||
download.tag.as_deref(),
|
||||
download.application.as_deref(),
|
||||
download.filtered_query_params.clone(),
|
||||
)
|
||||
.task_id(match download.content_for_calculating_task_id.clone() {
|
||||
Some(content) => TaskIDParameter::Content(content),
|
||||
None => TaskIDParameter::URLBased {
|
||||
url: download.url.clone(),
|
||||
piece_length: download.piece_length,
|
||||
tag: download.tag.clone(),
|
||||
application: download.application.clone(),
|
||||
filtered_query_params: download.filtered_query_params.clone(),
|
||||
},
|
||||
})
|
||||
.map_err(|e| {
|
||||
error!("generate task id: {}", e);
|
||||
Status::invalid_argument(e.to_string())
|
||||
|
@ -257,6 +278,11 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record("peer_id", peer_id.as_str());
|
||||
Span::current().record("url", download.url.clone());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
download.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("download task in upload server");
|
||||
|
||||
// Download task started.
|
||||
|
@ -337,6 +363,8 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
task.piece_length().unwrap_or_default()
|
||||
);
|
||||
|
||||
Span::current().record("content_length", content_length);
|
||||
|
||||
// Download's range priority is higher than the request header's range.
|
||||
// If download protocol is http, use the range of the request header.
|
||||
// If download protocol is not http, use the range of the download.
|
||||
|
@ -611,8 +639,13 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
}
|
||||
|
||||
/// stat_task stats the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip, local_only))]
|
||||
async fn stat_task(&self, request: Request<StatTaskRequest>) -> Result<Response<Task>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -622,36 +655,57 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
// Get the task id from the request.
|
||||
let task_id = request.task_id;
|
||||
|
||||
// Get the local_only flag from the request, default to false.
|
||||
let local_only = request.local_only;
|
||||
|
||||
// Span record the host id and task id.
|
||||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
Span::current().record("local_only", local_only.to_string().as_str());
|
||||
info!("stat task in upload server");
|
||||
|
||||
// Collect the stat task metrics.
|
||||
collect_stat_task_started_metrics(TaskType::Standard as i32);
|
||||
|
||||
// Get the task from the scheduler.
|
||||
let task = self
|
||||
match self
|
||||
.task
|
||||
.stat(task_id.as_str(), host_id.as_str())
|
||||
.stat(task_id.as_str(), host_id.as_str(), local_only)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
{
|
||||
Ok(task) => Ok(Response::new(task)),
|
||||
Err(err) => {
|
||||
// Collect the stat task failure metrics.
|
||||
collect_stat_task_failure_metrics(TaskType::Standard as i32);
|
||||
|
||||
error!("stat task: {}", err);
|
||||
Status::internal(err.to_string())
|
||||
})?;
|
||||
// Log the error with detailed context.
|
||||
error!("stat task failed: {}", err);
|
||||
|
||||
Ok(Response::new(task))
|
||||
// Map the error to an appropriate gRPC status.
|
||||
Err(match err {
|
||||
ClientError::TaskNotFound(id) => {
|
||||
Status::not_found(format!("task not found: {}", id))
|
||||
}
|
||||
_ => Status::internal(err.to_string()),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// delete_task deletes the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip))]
|
||||
async fn delete_task(
|
||||
&self,
|
||||
request: Request<DeleteTaskRequest>,
|
||||
) -> Result<Response<()>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -664,6 +718,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
// Span record the host id and task id.
|
||||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("delete task in upload server");
|
||||
|
||||
// Collect the delete task started metrics.
|
||||
|
@ -687,12 +745,18 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
/// SyncPiecesStream is the stream of the sync pieces response.
|
||||
type SyncPiecesStream = ReceiverStream<Result<SyncPiecesResponse, Status>>;
|
||||
|
||||
/// sync_pieces provides the piece metadata for parent.
|
||||
/// sync_pieces provides the piece metadata for parent. If the per-piece collection timeout is exceeded,
|
||||
/// the stream will be closed.
|
||||
#[instrument(skip_all, fields(host_id, remote_host_id, task_id))]
|
||||
async fn sync_pieces(
|
||||
&self,
|
||||
request: Request<SyncPiecesRequest>,
|
||||
) -> Result<Response<Self::SyncPiecesStream>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -722,7 +786,6 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
tokio::spawn(
|
||||
async move {
|
||||
loop {
|
||||
let mut has_started_piece = false;
|
||||
let mut finished_piece_numbers = Vec::new();
|
||||
for interested_piece_number in interested_piece_numbers.iter() {
|
||||
let piece = match task_manager.piece.get(
|
||||
|
@ -787,11 +850,6 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
finished_piece_numbers.push(piece.number);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check whether the piece is started.
|
||||
if piece.is_started() {
|
||||
has_started_piece = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the finished piece numbers from the interested piece numbers.
|
||||
|
@ -805,13 +863,6 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
return;
|
||||
}
|
||||
|
||||
// If there is no started piece, return.
|
||||
if !has_started_piece {
|
||||
info!("there is no started piece");
|
||||
drop(out_stream_tx);
|
||||
return;
|
||||
}
|
||||
|
||||
// Wait for the piece to be finished.
|
||||
tokio::time::sleep(
|
||||
dragonfly_client_storage::DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL,
|
||||
|
@ -826,11 +877,19 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
}
|
||||
|
||||
/// download_piece provides the piece content for parent.
|
||||
#[instrument(skip_all, fields(host_id, remote_host_id, task_id, piece_id))]
|
||||
#[instrument(
|
||||
skip_all,
|
||||
fields(host_id, remote_host_id, task_id, piece_id, piece_length)
|
||||
)]
|
||||
async fn download_piece(
|
||||
&self,
|
||||
request: Request<DownloadPieceRequest>,
|
||||
) -> Result<Response<DownloadPieceResponse>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -849,7 +908,6 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
// Generate the piece id.
|
||||
let piece_id = self.task.piece.id(task_id.as_str(), piece_number);
|
||||
|
||||
// Span record the host id, task id and piece number.
|
||||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("remote_host_id", remote_host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
|
@ -870,6 +928,8 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
Status::not_found("piece metadata not found")
|
||||
})?;
|
||||
|
||||
Span::current().record("piece_length", piece.length);
|
||||
|
||||
// Collect upload piece started metrics.
|
||||
collect_upload_piece_started_metrics();
|
||||
info!("start upload piece content");
|
||||
|
@ -903,6 +963,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
error!("upload piece content failed: {}", err);
|
||||
Status::internal(err.to_string())
|
||||
})?;
|
||||
drop(reader);
|
||||
|
||||
// Collect upload piece finished metrics.
|
||||
collect_upload_piece_finished_metrics();
|
||||
|
@ -936,8 +997,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
&self,
|
||||
request: Request<SyncHostRequest>,
|
||||
) -> Result<Response<Self::SyncHostStream>, Status> {
|
||||
// DEFAULT_HOST_INFO_REFRESH_INTERVAL is the default interval for refreshing the host info.
|
||||
const DEFAULT_HOST_INFO_REFRESH_INTERVAL: Duration = Duration::from_millis(500);
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
@ -960,43 +1023,42 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
// Get local interface.
|
||||
let interface = self.interface.clone();
|
||||
|
||||
// DEFAULT_HOST_INFO_REFRESH_INTERVAL is the default interval for refreshing the host info.
|
||||
const DEFAULT_HOST_INFO_REFRESH_INTERVAL: Duration = Duration::from_millis(500);
|
||||
|
||||
// Initialize stream channel.
|
||||
let (out_stream_tx, out_stream_rx) = mpsc::channel(10 * 1024);
|
||||
tokio::spawn(
|
||||
async move {
|
||||
// Initialize sysinfo network.
|
||||
let mut networks = Networks::new_with_refreshed_list();
|
||||
|
||||
// Start the host info update loop.
|
||||
loop {
|
||||
// Sleep to calculate the network traffic difference over
|
||||
// the DEFAULT_HOST_INFO_REFRESH_INTERVAL.
|
||||
// Wait for the host info refresh interval.
|
||||
tokio::time::sleep(DEFAULT_HOST_INFO_REFRESH_INTERVAL).await;
|
||||
|
||||
// Refresh network information.
|
||||
networks.refresh();
|
||||
|
||||
// Init response.
|
||||
let mut host = Host::default();
|
||||
if let Some(network_data) = networks.get(&interface.name) {
|
||||
let network = Network {
|
||||
download_rate: network_data.received()
|
||||
/ DEFAULT_HOST_INFO_REFRESH_INTERVAL.as_secs(),
|
||||
// Convert bandwidth to bytes per second.
|
||||
download_rate_limit: interface.bandwidth / 8 * MB,
|
||||
upload_rate: network_data.transmitted()
|
||||
/ DEFAULT_HOST_INFO_REFRESH_INTERVAL.as_secs(),
|
||||
// Convert bandwidth to bytes per second.
|
||||
upload_rate_limit: interface.bandwidth / 8 * MB,
|
||||
..Default::default()
|
||||
};
|
||||
host.network = Some(network.clone());
|
||||
|
||||
debug!("interface: {}, network: {:?}", interface.name, network);
|
||||
};
|
||||
// Wait for getting the network data.
|
||||
let network_data = interface.get_network_data().await;
|
||||
debug!(
|
||||
"network data: rx bandwidth {}/{} bps, tx bandwidth {}/{} bps",
|
||||
network_data.rx_bandwidth.unwrap_or(0),
|
||||
network_data.max_rx_bandwidth,
|
||||
network_data.tx_bandwidth.unwrap_or(0),
|
||||
network_data.max_tx_bandwidth
|
||||
);
|
||||
|
||||
// Send host info.
|
||||
match out_stream_tx.send(Ok(host.clone())).await {
|
||||
match out_stream_tx
|
||||
.send(Ok(Host {
|
||||
network: Some(Network {
|
||||
max_rx_bandwidth: network_data.max_rx_bandwidth,
|
||||
rx_bandwidth: network_data.rx_bandwidth,
|
||||
max_tx_bandwidth: network_data.max_tx_bandwidth,
|
||||
tx_bandwidth: network_data.tx_bandwidth,
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
}))
|
||||
.await
|
||||
{
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
error!(
|
||||
|
@ -1004,7 +1066,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
remote_host_id, err
|
||||
);
|
||||
|
||||
break;
|
||||
return;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -1020,11 +1082,16 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
ReceiverStream<Result<DownloadPersistentCacheTaskResponse, Status>>;
|
||||
|
||||
/// download_persistent_cache_task downloads the persistent cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id))]
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id, remote_ip, content_length))]
|
||||
async fn download_persistent_cache_task(
|
||||
&self,
|
||||
request: Request<DownloadPersistentCacheTaskRequest>,
|
||||
) -> Result<Response<Self::DownloadPersistentCacheTaskStream>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Record the start time.
|
||||
let start_time = Instant::now();
|
||||
|
||||
|
@ -1048,6 +1115,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record("peer_id", peer_id.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("download persistent cache task in download server");
|
||||
|
||||
// Download task started.
|
||||
|
@ -1101,12 +1172,15 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
task
|
||||
}
|
||||
};
|
||||
|
||||
info!(
|
||||
"content length {}, piece length {}",
|
||||
task.content_length(),
|
||||
task.piece_length()
|
||||
);
|
||||
|
||||
Span::current().record("content_length", task.content_length());
|
||||
|
||||
// Initialize stream channel.
|
||||
let request_clone = request.clone();
|
||||
let task_manager_clone = self.persistent_cache_task.clone();
|
||||
|
@ -1240,11 +1314,16 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
}
|
||||
|
||||
/// update_persistent_cache_task update metadata of the persistent cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip))]
|
||||
async fn update_persistent_cache_task(
|
||||
&self,
|
||||
request: Request<UpdatePersistentCacheTaskRequest>,
|
||||
) -> Result<Response<()>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -1257,6 +1336,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
// Span record the host id and task id.
|
||||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("update persistent cache task in upload server");
|
||||
|
||||
// Collect the update task started metrics.
|
||||
|
@ -1278,11 +1361,16 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
}
|
||||
|
||||
/// stat_persistent_cache_task stats the persistent cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip))]
|
||||
async fn stat_persistent_cache_task(
|
||||
&self,
|
||||
request: Request<StatPersistentCacheTaskRequest>,
|
||||
) -> Result<Response<PersistentCacheTask>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -1295,6 +1383,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
// Span record the host id and task id.
|
||||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("stat persistent cache task in upload server");
|
||||
|
||||
// Collect the stat task started metrics.
|
||||
|
@ -1316,11 +1408,16 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
}
|
||||
|
||||
/// delete_persistent_cache_task deletes the persistent cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip))]
|
||||
async fn delete_persistent_cache_task(
|
||||
&self,
|
||||
request: Request<DeletePersistentCacheTaskRequest>,
|
||||
) -> Result<Response<()>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -1333,6 +1430,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
// Span record the host id and task id.
|
||||
Span::current().record("host_id", host_id.as_str());
|
||||
Span::current().record("task_id", task_id.as_str());
|
||||
Span::current().record(
|
||||
"remote_ip",
|
||||
request.remote_ip.clone().unwrap_or_default().as_str(),
|
||||
);
|
||||
info!("delete persistent cache task in upload server");
|
||||
|
||||
// Collect the delete task started metrics.
|
||||
|
@ -1351,6 +1452,11 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
&self,
|
||||
request: Request<SyncPersistentCachePiecesRequest>,
|
||||
) -> Result<Response<Self::SyncPersistentCachePiecesStream>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -1380,7 +1486,6 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
tokio::spawn(
|
||||
async move {
|
||||
loop {
|
||||
let mut has_started_piece = false;
|
||||
let mut finished_piece_numbers = Vec::new();
|
||||
for interested_piece_number in interested_piece_numbers.iter() {
|
||||
let piece = match task_manager.piece.get(
|
||||
|
@ -1439,11 +1544,6 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
finished_piece_numbers.push(piece.number);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check whether the piece is started.
|
||||
if piece.is_started() {
|
||||
has_started_piece = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the finished piece numbers from the interested piece numbers.
|
||||
|
@ -1457,13 +1557,6 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
return;
|
||||
}
|
||||
|
||||
// If there is no started piece, return.
|
||||
if !has_started_piece {
|
||||
info!("there is no started persistent cache piece");
|
||||
drop(out_stream_tx);
|
||||
return;
|
||||
}
|
||||
|
||||
// Wait for the piece to be finished.
|
||||
tokio::time::sleep(
|
||||
dragonfly_client_storage::DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL,
|
||||
|
@ -1483,6 +1576,11 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
&self,
|
||||
request: Request<DownloadPersistentCachePieceRequest>,
|
||||
) -> Result<Response<DownloadPersistentCachePieceResponse>, Status> {
|
||||
// If the parent context is set, use it as the parent context for the span.
|
||||
if let Some(parent_ctx) = request.extensions().get::<Context>() {
|
||||
Span::current().set_parent(parent_ctx.clone());
|
||||
};
|
||||
|
||||
// Clone the request.
|
||||
let request = request.into_inner();
|
||||
|
||||
|
@ -1560,6 +1658,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
error!("upload persistent cache piece content failed: {}", err);
|
||||
Status::internal(err.to_string())
|
||||
})?;
|
||||
drop(reader);
|
||||
|
||||
// Collect upload piece finished metrics.
|
||||
collect_upload_piece_finished_metrics();
|
||||
|
@ -1592,19 +1691,75 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
) -> Result<Response<ExchangeIbVerbsQueuePairEndpointResponse>, Status> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// DownloadCacheTaskStream is the stream of the download cache task response.
|
||||
type DownloadCacheTaskStream = ReceiverStream<Result<DownloadCacheTaskResponse, Status>>;
|
||||
|
||||
/// download_cache_task downloads the cache task.
|
||||
#[instrument(
|
||||
skip_all,
|
||||
fields(host_id, task_id, peer_id, url, remote_ip, content_length)
|
||||
)]
|
||||
async fn download_cache_task(
|
||||
&self,
|
||||
_request: Request<DownloadCacheTaskRequest>,
|
||||
) -> Result<Response<Self::DownloadCacheTaskStream>, Status> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
/// stat_cache_task stats the cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip, local_only))]
|
||||
async fn stat_cache_task(
|
||||
&self,
|
||||
_request: Request<StatCacheTaskRequest>,
|
||||
) -> Result<Response<CacheTask>, Status> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
/// delete_cache_task deletes the cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip))]
|
||||
async fn delete_cache_task(
|
||||
&self,
|
||||
_request: Request<DeleteCacheTaskRequest>,
|
||||
) -> Result<Response<()>, Status> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
/// SyncCachePiecesStream is the stream of the sync cache pieces response.
|
||||
type SyncCachePiecesStream = ReceiverStream<Result<SyncCachePiecesResponse, Status>>;
|
||||
|
||||
/// sync_cache_pieces provides the cache piece metadata for parent.
|
||||
#[instrument(skip_all, fields(host_id, remote_host_id, task_id))]
|
||||
async fn sync_cache_pieces(
|
||||
&self,
|
||||
_request: Request<SyncCachePiecesRequest>,
|
||||
) -> Result<Response<Self::SyncCachePiecesStream>, Status> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
/// download_cache_piece provides the cache piece content for parent.
|
||||
#[instrument(
|
||||
skip_all,
|
||||
fields(host_id, remote_host_id, task_id, piece_id, piece_length)
|
||||
)]
|
||||
async fn download_cache_piece(
|
||||
&self,
|
||||
_request: Request<DownloadCachePieceRequest>,
|
||||
) -> Result<Response<DownloadCachePieceResponse>, Status> {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
|
||||
/// DfdaemonUploadClient is a wrapper of DfdaemonUploadGRPCClient.
|
||||
#[derive(Clone)]
|
||||
pub struct DfdaemonUploadClient {
|
||||
/// client is the grpc client of the dfdaemon upload.
|
||||
pub client: DfdaemonUploadGRPCClient<InterceptedService<Channel, TracingInterceptor>>,
|
||||
pub client: DfdaemonUploadGRPCClient<InterceptedService<Channel, InjectTracingInterceptor>>,
|
||||
}
|
||||
|
||||
/// DfdaemonUploadClient implements the dfdaemon upload grpc client.
|
||||
impl DfdaemonUploadClient {
|
||||
/// new creates a new DfdaemonUploadClient.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(
|
||||
config: Arc<Config>,
|
||||
addr: String,
|
||||
|
@ -1663,7 +1818,7 @@ impl DfdaemonUploadClient {
|
|||
.or_err(ErrorType::ConnectError)?,
|
||||
};
|
||||
|
||||
let client = DfdaemonUploadGRPCClient::with_interceptor(channel, TracingInterceptor)
|
||||
let client = DfdaemonUploadGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
Ok(Self { client })
|
||||
|
@ -1791,6 +1946,7 @@ impl DfdaemonUploadClient {
|
|||
}
|
||||
|
||||
/// sync_persistent_cache_pieces provides the persistent cache piece metadata for parent.
|
||||
/// If the per-piece collection timeout is exceeded, the stream will be closed.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn sync_persistent_cache_pieces(
|
||||
&self,
|
||||
|
@ -1840,7 +1996,6 @@ impl DfdaemonUploadClient {
|
|||
}
|
||||
|
||||
/// make_request creates a new request with timeout.
|
||||
#[instrument(skip_all)]
|
||||
fn make_request<T>(request: T) -> tonic::Request<T> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
request.set_timeout(super::REQUEST_TIMEOUT);
|
||||
|
|
|
@ -21,27 +21,27 @@ use dragonfly_client_core::{
|
|||
use hyper_util::rt::TokioIo;
|
||||
use std::path::PathBuf;
|
||||
use tokio::net::UnixStream;
|
||||
use tonic::service::interceptor::InterceptedService;
|
||||
use tonic::transport::ClientTlsConfig;
|
||||
use tonic::transport::{Channel, Endpoint, Uri};
|
||||
use tonic::{service::interceptor::InterceptedService, transport::ClientTlsConfig};
|
||||
use tonic_health::pb::{
|
||||
health_client::HealthClient as HealthGRPCClient, HealthCheckRequest, HealthCheckResponse,
|
||||
};
|
||||
use tower::service_fn;
|
||||
use tracing::{error, instrument};
|
||||
|
||||
use super::interceptor::TracingInterceptor;
|
||||
use super::interceptor::InjectTracingInterceptor;
|
||||
|
||||
/// HealthClient is a wrapper of HealthGRPCClient.
|
||||
#[derive(Clone)]
|
||||
pub struct HealthClient {
|
||||
/// client is the grpc client of the certificate.
|
||||
client: HealthGRPCClient<InterceptedService<Channel, TracingInterceptor>>,
|
||||
client: HealthGRPCClient<InterceptedService<Channel, InjectTracingInterceptor>>,
|
||||
}
|
||||
|
||||
/// HealthClient implements the grpc client of the health.
|
||||
impl HealthClient {
|
||||
/// new creates a new HealthClient.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(addr: &str, client_tls_config: Option<ClientTlsConfig>) -> Result<Self> {
|
||||
let channel = match client_tls_config {
|
||||
Some(client_tls_config) => Channel::from_shared(addr.to_string())
|
||||
|
@ -73,14 +73,13 @@ impl HealthClient {
|
|||
.or_err(ErrorType::ConnectError)?,
|
||||
};
|
||||
|
||||
let client = HealthGRPCClient::with_interceptor(channel, TracingInterceptor)
|
||||
let client = HealthGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
Ok(Self { client })
|
||||
}
|
||||
|
||||
/// new_unix creates a new HealthClient with unix domain socket.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new_unix(socket_path: PathBuf) -> Result<Self> {
|
||||
// Ignore the uri because it is not used.
|
||||
let channel = Endpoint::try_from("http://[::]:50051")
|
||||
|
@ -98,7 +97,8 @@ impl HealthClient {
|
|||
error!("connect failed: {}", err);
|
||||
})
|
||||
.or_err(ErrorType::ConnectError)?;
|
||||
let client = HealthGRPCClient::with_interceptor(channel, TracingInterceptor)
|
||||
|
||||
let client = HealthGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
Ok(Self { client })
|
||||
|
@ -137,7 +137,6 @@ impl HealthClient {
|
|||
}
|
||||
|
||||
/// make_request creates a new request with timeout.
|
||||
#[instrument(skip_all)]
|
||||
fn make_request<T>(request: T) -> tonic::Request<T> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
request.set_timeout(super::REQUEST_TIMEOUT);
|
||||
|
|
|
@ -17,9 +17,28 @@
|
|||
use tonic::{metadata, service::Interceptor, Request, Status};
|
||||
use tracing_opentelemetry::OpenTelemetrySpanExt;
|
||||
|
||||
/// MetadataMap is a tracing meda data map container.
|
||||
/// MetadataMap is a tracing meda data map container for span context.
|
||||
struct MetadataMap<'a>(&'a mut metadata::MetadataMap);
|
||||
|
||||
/// MetadataMap implements the otel tracing Extractor.
|
||||
impl opentelemetry::propagation::Extractor for MetadataMap<'_> {
|
||||
/// Get a value for a key from the `MetadataMap`. If the value can't be converted to &str, returns None
|
||||
fn get(&self, key: &str) -> Option<&str> {
|
||||
self.0.get(key).and_then(|metadata| metadata.to_str().ok())
|
||||
}
|
||||
|
||||
/// Collect all the keys from the `MetadataMap`.
|
||||
fn keys(&self) -> Vec<&str> {
|
||||
self.0
|
||||
.keys()
|
||||
.map(|key| match key {
|
||||
tonic::metadata::KeyRef::Ascii(v) => v.as_str(),
|
||||
tonic::metadata::KeyRef::Binary(v) => v.as_str(),
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
}
|
||||
|
||||
/// MetadataMap implements the otel tracing Injector.
|
||||
impl opentelemetry::propagation::Injector for MetadataMap<'_> {
|
||||
/// set a key-value pair to the injector.
|
||||
|
@ -32,12 +51,12 @@ impl opentelemetry::propagation::Injector for MetadataMap<'_> {
|
|||
}
|
||||
}
|
||||
|
||||
/// TracingInterceptor is a auto-inject tracing gRPC interceptor.
|
||||
/// InjectTracingInterceptor is a auto-inject tracing gRPC interceptor.
|
||||
#[derive(Clone)]
|
||||
pub struct TracingInterceptor;
|
||||
pub struct InjectTracingInterceptor;
|
||||
|
||||
/// TracingInterceptor implements the tonic Interceptor interface.
|
||||
impl Interceptor for TracingInterceptor {
|
||||
/// InjectTracingInterceptor implements the tonic Interceptor interface.
|
||||
impl Interceptor for InjectTracingInterceptor {
|
||||
/// call and inject tracing context into lgobal propagator.
|
||||
fn call(&mut self, mut request: Request<()>) -> std::result::Result<Request<()>, Status> {
|
||||
let context = tracing::Span::current().context();
|
||||
|
@ -48,3 +67,20 @@ impl Interceptor for TracingInterceptor {
|
|||
Ok(request)
|
||||
}
|
||||
}
|
||||
|
||||
/// ExtractTracingInterceptor is a auto-extract tracing gRPC interceptor.
|
||||
#[derive(Clone)]
|
||||
pub struct ExtractTracingInterceptor;
|
||||
|
||||
/// ExtractTracingInterceptor implements the tonic Interceptor interface.
|
||||
impl Interceptor for ExtractTracingInterceptor {
|
||||
/// call and inject tracing context into lgobal propagator.
|
||||
fn call(&mut self, mut request: Request<()>) -> std::result::Result<Request<()>, Status> {
|
||||
let parent_cx = opentelemetry::global::get_text_map_propagator(|prop| {
|
||||
prop.extract(&MetadataMap(request.metadata_mut()))
|
||||
});
|
||||
|
||||
request.extensions_mut().insert(parent_cx);
|
||||
Ok(request)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,22 +27,21 @@ use dragonfly_client_core::{
|
|||
use std::sync::Arc;
|
||||
use tonic::{service::interceptor::InterceptedService, transport::Channel};
|
||||
use tonic_health::pb::health_check_response::ServingStatus;
|
||||
use tracing::{error, instrument, warn};
|
||||
use tracing::{error, instrument};
|
||||
use url::Url;
|
||||
|
||||
use super::interceptor::TracingInterceptor;
|
||||
use super::interceptor::InjectTracingInterceptor;
|
||||
|
||||
/// ManagerClient is a wrapper of ManagerGRPCClient.
|
||||
#[derive(Clone)]
|
||||
pub struct ManagerClient {
|
||||
/// client is the grpc client of the manager.
|
||||
pub client: ManagerGRPCClient<InterceptedService<Channel, TracingInterceptor>>,
|
||||
pub client: ManagerGRPCClient<InterceptedService<Channel, InjectTracingInterceptor>>,
|
||||
}
|
||||
|
||||
/// ManagerClient implements the grpc client of the manager.
|
||||
impl ManagerClient {
|
||||
/// new creates a new ManagerClient.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(config: Arc<Config>, addr: String) -> Result<Self> {
|
||||
let domain_name = Url::parse(addr.as_str())?
|
||||
.host_str()
|
||||
|
@ -99,7 +98,7 @@ impl ManagerClient {
|
|||
.or_err(ErrorType::ConnectError)?,
|
||||
};
|
||||
|
||||
let client = ManagerGRPCClient::with_interceptor(channel, TracingInterceptor)
|
||||
let client = ManagerGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
Ok(Self { client })
|
||||
|
@ -133,7 +132,6 @@ impl ManagerClient {
|
|||
}
|
||||
|
||||
/// make_request creates a new request with timeout.
|
||||
#[instrument(skip_all)]
|
||||
fn make_request<T>(request: T) -> tonic::Request<T> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
request.set_timeout(super::REQUEST_TIMEOUT);
|
||||
|
|
|
@ -34,8 +34,12 @@ pub mod scheduler;
|
|||
/// CONNECT_TIMEOUT is the timeout for GRPC connection.
|
||||
pub const CONNECT_TIMEOUT: Duration = Duration::from_secs(2);
|
||||
|
||||
/// REQUEST_TIMEOUT is the timeout for GRPC requests.
|
||||
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
/// REQUEST_TIMEOUT is the timeout for GRPC requests, default is 10 second.
|
||||
/// Note: This timeout is used for the whole request, including wait for scheduler
|
||||
/// scheduling, refer to https://d7y.io/docs/next/reference/configuration/scheduler/.
|
||||
/// Scheduler'configure `scheduler.retryInterval`, `scheduler.retryBackToSourceLimit` and `scheduler.retryLimit`
|
||||
/// is used for the scheduler to schedule the task.
|
||||
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(15);
|
||||
|
||||
/// TCP_KEEPALIVE is the keepalive duration for TCP connection.
|
||||
pub const TCP_KEEPALIVE: Duration = Duration::from_secs(3600);
|
||||
|
@ -46,11 +50,11 @@ pub const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(300);
|
|||
/// HTTP2_KEEP_ALIVE_TIMEOUT is the timeout for HTTP2 keep alive.
|
||||
pub const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(20);
|
||||
|
||||
/// MAX_FRAME_SIZE is the max frame size for GRPC, default is 12MB.
|
||||
pub const MAX_FRAME_SIZE: u32 = 12 * 1024 * 1024;
|
||||
/// MAX_FRAME_SIZE is the max frame size for GRPC, default is 4MB.
|
||||
pub const MAX_FRAME_SIZE: u32 = 4 * 1024 * 1024;
|
||||
|
||||
/// INITIAL_WINDOW_SIZE is the initial window size for GRPC, default is 12MB.
|
||||
pub const INITIAL_WINDOW_SIZE: u32 = 12 * 1024 * 1024;
|
||||
/// INITIAL_WINDOW_SIZE is the initial window size for GRPC, default is 512KB.
|
||||
pub const INITIAL_WINDOW_SIZE: u32 = 512 * 1024;
|
||||
|
||||
/// BUFFER_SIZE is the buffer size for GRPC, default is 64KB.
|
||||
pub const BUFFER_SIZE: usize = 64 * 1024;
|
||||
|
|
|
@ -40,7 +40,7 @@ use tonic::transport::Channel;
|
|||
use tracing::{error, info, instrument, Instrument};
|
||||
use url::Url;
|
||||
|
||||
use super::interceptor::TracingInterceptor;
|
||||
use super::interceptor::InjectTracingInterceptor;
|
||||
|
||||
/// VNode is the virtual node of the hashring.
|
||||
#[derive(Debug, Copy, Clone, Hash, PartialEq)]
|
||||
|
@ -79,7 +79,6 @@ pub struct SchedulerClient {
|
|||
/// SchedulerClient implements the grpc client of the scheduler.
|
||||
impl SchedulerClient {
|
||||
/// new creates a new SchedulerClient.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(config: Arc<Config>, dynconfig: Arc<Dynconfig>) -> Result<Self> {
|
||||
let client = Self {
|
||||
config,
|
||||
|
@ -192,9 +191,10 @@ impl SchedulerClient {
|
|||
})
|
||||
.or_err(ErrorType::ConnectError)?;
|
||||
|
||||
let mut client = SchedulerGRPCClient::with_interceptor(channel, TracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
let mut client =
|
||||
SchedulerGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
client.announce_host(request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -245,9 +245,10 @@ impl SchedulerClient {
|
|||
})
|
||||
.or_err(ErrorType::ConnectError)?;
|
||||
|
||||
let mut client = SchedulerGRPCClient::with_interceptor(channel, TracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
let mut client =
|
||||
SchedulerGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
client.announce_host(request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -303,9 +304,10 @@ impl SchedulerClient {
|
|||
})
|
||||
.or_err(ErrorType::ConnectError)?;
|
||||
|
||||
let mut client = SchedulerGRPCClient::with_interceptor(channel, TracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
let mut client =
|
||||
SchedulerGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX);
|
||||
client.delete_host(request).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -457,7 +459,7 @@ impl SchedulerClient {
|
|||
&self,
|
||||
task_id: &str,
|
||||
peer_id: Option<&str>,
|
||||
) -> Result<SchedulerGRPCClient<InterceptedService<Channel, TracingInterceptor>>> {
|
||||
) -> Result<SchedulerGRPCClient<InterceptedService<Channel, InjectTracingInterceptor>>> {
|
||||
// Update scheduler addresses of the client.
|
||||
self.update_available_scheduler_addrs().await?;
|
||||
|
||||
|
@ -516,7 +518,7 @@ impl SchedulerClient {
|
|||
};
|
||||
|
||||
Ok(
|
||||
SchedulerGRPCClient::with_interceptor(channel, TracingInterceptor)
|
||||
SchedulerGRPCClient::with_interceptor(channel, InjectTracingInterceptor)
|
||||
.max_decoding_message_size(usize::MAX)
|
||||
.max_encoding_message_size(usize::MAX),
|
||||
)
|
||||
|
@ -619,7 +621,6 @@ impl SchedulerClient {
|
|||
}
|
||||
|
||||
/// make_request creates a new request with timeout.
|
||||
#[instrument(skip_all)]
|
||||
fn make_request<T>(request: T) -> tonic::Request<T> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
request.set_timeout(super::REQUEST_TIMEOUT);
|
||||
|
|
|
@ -36,7 +36,6 @@ pub struct Health {
|
|||
/// Health implements the health server.
|
||||
impl Health {
|
||||
/// new creates a new Health.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
addr: SocketAddr,
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
@ -50,7 +49,6 @@ impl Health {
|
|||
}
|
||||
|
||||
/// run starts the health server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
let mut shutdown = self.shutdown.clone();
|
||||
|
@ -71,7 +69,6 @@ impl Health {
|
|||
_ = shutdown.recv() => {
|
||||
// Health server shutting down with signals.
|
||||
info!("health server shutting down");
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,9 +26,8 @@ use prometheus::{
|
|||
};
|
||||
use std::net::SocketAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use sysinfo::{ProcessRefreshKind, ProcessesToUpdate, RefreshKind, System, UpdateKind};
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{error, info, instrument, warn};
|
||||
use warp::{Filter, Rejection, Reply};
|
||||
|
@ -213,7 +212,21 @@ lazy_static! {
|
|||
&["type"]
|
||||
).expect("metric can be created");
|
||||
|
||||
/// DELETE_TASK_COUNT is used to count the number of delete tasks.
|
||||
/// LIST_TASK_ENTRIES_COUNT is used to count the number of list task entries.
|
||||
pub static ref LIST_TASK_ENTRIES_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("list_task_entries_total", "Counter of the number of the list task entries.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type"]
|
||||
).expect("metric can be created");
|
||||
|
||||
/// LIST_TASK_ENTRIES_FAILURE_COUNT is used to count the failed number of list task entries.
|
||||
pub static ref LIST_TASK_ENTRIES_FAILURE_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("list_task_entries_failure_total", "Counter of the number of failed of the list task entries.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type"]
|
||||
).expect("metric can be created");
|
||||
|
||||
/// DELETE_TASK_COUNT is used to count the number of delete tasks.
|
||||
pub static ref DELETE_TASK_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("delete_task_total", "Counter of the number of the delete task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
|
@ -254,24 +267,9 @@ lazy_static! {
|
|||
Opts::new("disk_usage_space_total", "Gauge of the disk usage space in bytes").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&[]
|
||||
).expect("metric can be created");
|
||||
|
||||
/// DISK_WRITTEN_BYTES is used to count of the disk written bytes.
|
||||
pub static ref DISK_WRITTEN_BYTES: IntGaugeVec =
|
||||
IntGaugeVec::new(
|
||||
Opts::new("disk_written_bytes", "Gauge of the disk written bytes.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&[]
|
||||
).expect("metric can be created");
|
||||
|
||||
/// DISK_READ_BYTES is used to count of the disk read bytes.
|
||||
pub static ref DISK_READ_BYTES: IntGaugeVec =
|
||||
IntGaugeVec::new(
|
||||
Opts::new("disk_read_bytes", "Gauge of the disk read bytes.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&[]
|
||||
).expect("metric can be created");
|
||||
}
|
||||
|
||||
/// register_custom_metrics registers all custom metrics.
|
||||
#[instrument(skip_all)]
|
||||
fn register_custom_metrics() {
|
||||
REGISTRY
|
||||
.register(Box::new(VERSION_GAUGE.clone()))
|
||||
|
@ -353,6 +351,14 @@ fn register_custom_metrics() {
|
|||
.register(Box::new(STAT_TASK_FAILURE_COUNT.clone()))
|
||||
.expect("metric can be registered");
|
||||
|
||||
REGISTRY
|
||||
.register(Box::new(LIST_TASK_ENTRIES_COUNT.clone()))
|
||||
.expect("metric can be registered");
|
||||
|
||||
REGISTRY
|
||||
.register(Box::new(LIST_TASK_ENTRIES_FAILURE_COUNT.clone()))
|
||||
.expect("metric can be registered");
|
||||
|
||||
REGISTRY
|
||||
.register(Box::new(DELETE_TASK_COUNT.clone()))
|
||||
.expect("metric can be registered");
|
||||
|
@ -376,18 +382,9 @@ fn register_custom_metrics() {
|
|||
REGISTRY
|
||||
.register(Box::new(DISK_USAGE_SPACE.clone()))
|
||||
.expect("metric can be registered");
|
||||
|
||||
REGISTRY
|
||||
.register(Box::new(DISK_WRITTEN_BYTES.clone()))
|
||||
.expect("metric can be registered");
|
||||
|
||||
REGISTRY
|
||||
.register(Box::new(DISK_READ_BYTES.clone()))
|
||||
.expect("metric can be registered");
|
||||
}
|
||||
|
||||
/// reset_custom_metrics resets all custom metrics.
|
||||
#[instrument(skip_all)]
|
||||
fn reset_custom_metrics() {
|
||||
VERSION_GAUGE.reset();
|
||||
DOWNLOAD_TASK_COUNT.reset();
|
||||
|
@ -409,14 +406,14 @@ fn reset_custom_metrics() {
|
|||
UPDATE_TASK_FAILURE_COUNT.reset();
|
||||
STAT_TASK_COUNT.reset();
|
||||
STAT_TASK_FAILURE_COUNT.reset();
|
||||
LIST_TASK_ENTRIES_COUNT.reset();
|
||||
LIST_TASK_ENTRIES_FAILURE_COUNT.reset();
|
||||
DELETE_TASK_COUNT.reset();
|
||||
DELETE_TASK_FAILURE_COUNT.reset();
|
||||
DELETE_HOST_COUNT.reset();
|
||||
DELETE_HOST_FAILURE_COUNT.reset();
|
||||
DISK_SPACE.reset();
|
||||
DISK_USAGE_SPACE.reset();
|
||||
DISK_WRITTEN_BYTES.reset();
|
||||
DISK_READ_BYTES.reset();
|
||||
}
|
||||
|
||||
/// TaskSize represents the size of the task.
|
||||
|
@ -775,6 +772,20 @@ pub fn collect_stat_task_failure_metrics(typ: i32) {
|
|||
.inc();
|
||||
}
|
||||
|
||||
/// collect_list_task_entries_started_metrics collects the list task entries started metrics.
|
||||
pub fn collect_list_task_entries_started_metrics(typ: i32) {
|
||||
LIST_TASK_ENTRIES_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str()])
|
||||
.inc();
|
||||
}
|
||||
|
||||
/// collect_list_task_entries_failure_metrics collects the list task entries failure metrics.
|
||||
pub fn collect_list_task_entries_failure_metrics(typ: i32) {
|
||||
LIST_TASK_ENTRIES_FAILURE_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str()])
|
||||
.inc();
|
||||
}
|
||||
|
||||
/// collect_delete_task_started_metrics collects the delete task started metrics.
|
||||
pub fn collect_delete_task_started_metrics(typ: i32) {
|
||||
DELETE_TASK_COUNT
|
||||
|
@ -800,7 +811,7 @@ pub fn collect_delete_host_failure_metrics() {
|
|||
}
|
||||
|
||||
/// collect_disk_metrics collects the disk metrics.
|
||||
pub fn collect_disk_metrics(path: &Path, system: &Arc<Mutex<System>>) {
|
||||
pub fn collect_disk_metrics(path: &Path) {
|
||||
// Collect disk space metrics.
|
||||
let stats = match fs2::statvfs(path) {
|
||||
Ok(stats) => stats,
|
||||
|
@ -817,24 +828,6 @@ pub fn collect_disk_metrics(path: &Path, system: &Arc<Mutex<System>>) {
|
|||
DISK_USAGE_SPACE
|
||||
.with_label_values(&[])
|
||||
.set(usage_space as i64);
|
||||
|
||||
// Collect disk bandwidth metrics.
|
||||
let mut sys = system.lock().unwrap();
|
||||
sys.refresh_processes_specifics(
|
||||
ProcessesToUpdate::All,
|
||||
true,
|
||||
ProcessRefreshKind::new()
|
||||
.with_disk_usage()
|
||||
.with_exe(UpdateKind::Always),
|
||||
);
|
||||
|
||||
let process = sys.process(sysinfo::get_current_pid().unwrap()).unwrap();
|
||||
DISK_WRITTEN_BYTES
|
||||
.with_label_values(&[])
|
||||
.set(process.disk_usage().written_bytes as i64);
|
||||
DISK_READ_BYTES
|
||||
.with_label_values(&[])
|
||||
.set(process.disk_usage().read_bytes as i64);
|
||||
}
|
||||
|
||||
/// Metrics is the metrics server.
|
||||
|
@ -843,9 +836,6 @@ pub struct Metrics {
|
|||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// system is the system information, only used for collecting disk metrics.
|
||||
system: Arc<Mutex<System>>,
|
||||
|
||||
/// shutdown is used to shutdown the metrics server.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
|
@ -856,7 +846,6 @@ pub struct Metrics {
|
|||
/// Metrics implements the metrics server.
|
||||
impl Metrics {
|
||||
/// new creates a new Metrics.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
@ -864,20 +853,12 @@ impl Metrics {
|
|||
) -> Self {
|
||||
Self {
|
||||
config,
|
||||
system: Arc::new(Mutex::new(System::new_with_specifics(
|
||||
RefreshKind::new().with_processes(
|
||||
ProcessRefreshKind::new()
|
||||
.with_disk_usage()
|
||||
.with_exe(UpdateKind::Always),
|
||||
),
|
||||
))),
|
||||
shutdown,
|
||||
_shutdown_complete: shutdown_complete_tx,
|
||||
}
|
||||
}
|
||||
|
||||
/// run starts the metrics server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
let mut shutdown = self.shutdown.clone();
|
||||
|
@ -898,7 +879,6 @@ impl Metrics {
|
|||
|
||||
// Clone the config.
|
||||
let config = self.config.clone();
|
||||
let system = self.system.clone();
|
||||
|
||||
// Create the metrics server address.
|
||||
let addr = SocketAddr::new(
|
||||
|
@ -910,7 +890,7 @@ impl Metrics {
|
|||
let get_metrics_route = warp::path!("metrics")
|
||||
.and(warp::get())
|
||||
.and(warp::path::end())
|
||||
.and_then(move || Self::get_metrics_handler(config.clone(), system.clone()));
|
||||
.and_then(move || Self::get_metrics_handler(config.clone()));
|
||||
|
||||
// Delete the metrics route.
|
||||
let delete_metrics_route = warp::path!("metrics")
|
||||
|
@ -929,19 +909,15 @@ impl Metrics {
|
|||
_ = shutdown.recv() => {
|
||||
// Metrics server shutting down with signals.
|
||||
info!("metrics server shutting down");
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// get_metrics_handler handles the metrics request of getting.
|
||||
#[instrument(skip_all)]
|
||||
async fn get_metrics_handler(
|
||||
config: Arc<Config>,
|
||||
system: Arc<Mutex<System>>,
|
||||
) -> Result<impl Reply, Rejection> {
|
||||
async fn get_metrics_handler(config: Arc<Config>) -> Result<impl Reply, Rejection> {
|
||||
// Collect the disk space metrics.
|
||||
collect_disk_metrics(config.storage.dir.as_path(), &system);
|
||||
collect_disk_metrics(config.storage.dir.as_path());
|
||||
|
||||
// Encode custom metrics.
|
||||
let encoder = TextEncoder::new();
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
use bytesize::ByteSize;
|
||||
use dragonfly_api::common::v2::Priority;
|
||||
use reqwest::header::HeaderMap;
|
||||
use tracing::{error, instrument};
|
||||
use tracing::error;
|
||||
|
||||
/// DRAGONFLY_TAG_HEADER is the header key of tag in http request.
|
||||
pub const DRAGONFLY_TAG_HEADER: &str = "X-Dragonfly-Tag";
|
||||
|
@ -66,15 +66,29 @@ pub const DRAGONFLY_OUTPUT_PATH_HEADER: &str = "X-Dragonfly-Output-Path";
|
|||
/// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
|
||||
pub const DRAGONFLY_FORCE_HARD_LINK_HEADER: &str = "X-Dragonfly-Force-Hard-Link";
|
||||
|
||||
/// DRAGONFLY_PIECE_LENGTH is the header key of piece length in http request.
|
||||
/// DRAGONFLY_PIECE_LENGTH_HEADER is the header key of piece length in http request.
|
||||
/// If the value is set, the piece length will be used to download the file.
|
||||
/// Different piece length will generate different task id. The value needs to
|
||||
/// be set with human readable format and needs to be greater than or equal
|
||||
/// to 4mib, for example: 4mib, 1gib
|
||||
pub const DRAGONFLY_PIECE_LENGTH: &str = "X-Dragonfly-Piece-Length";
|
||||
pub const DRAGONFLY_PIECE_LENGTH_HEADER: &str = "X-Dragonfly-Piece-Length";
|
||||
|
||||
/// DRAGONFLY_CONTENT_FOR_CALCULATING_TASK_ID_HEADER is the header key of content for calculating task id.
|
||||
/// If DRAGONFLY_CONTENT_FOR_CALCULATING_TASK_ID_HEADER is set, use its value to calculate the task ID.
|
||||
/// Otherwise, calculate the task ID based on `url`, `piece_length`, `tag`, `application`, and `filtered_query_params`.
|
||||
pub const DRAGONFLY_CONTENT_FOR_CALCULATING_TASK_ID_HEADER: &str =
|
||||
"X-Dragonfly-Content-For-Calculating-Task-ID";
|
||||
|
||||
/// DRAGONFLY_TASK_DOWNLOAD_FINISHED_HEADER is the response header key to indicate whether the task download finished.
|
||||
/// When the task download is finished, the response will include this header with the value `"true"`,
|
||||
/// indicating that the download hit the local cache.
|
||||
pub const DRAGONFLY_TASK_DOWNLOAD_FINISHED_HEADER: &str = "X-Dragonfly-Task-Download-Finished";
|
||||
|
||||
/// DRAGONFLY_TASK_ID_HEADER is the response header key of task id. Client will calculate the task ID
|
||||
/// based on `url`, `piece_length`, `tag`, `application`, and `filtered_query_params`.
|
||||
pub const DRAGONFLY_TASK_ID_HEADER: &str = "X-Dragonfly-Task-ID";
|
||||
|
||||
/// get_tag gets the tag from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_tag(header: &HeaderMap) -> Option<String> {
|
||||
header
|
||||
.get(DRAGONFLY_TAG_HEADER)
|
||||
|
@ -83,7 +97,6 @@ pub fn get_tag(header: &HeaderMap) -> Option<String> {
|
|||
}
|
||||
|
||||
/// get_application gets the application from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_application(header: &HeaderMap) -> Option<String> {
|
||||
header
|
||||
.get(DRAGONFLY_APPLICATION_HEADER)
|
||||
|
@ -92,7 +105,6 @@ pub fn get_application(header: &HeaderMap) -> Option<String> {
|
|||
}
|
||||
|
||||
/// get_priority gets the priority from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_priority(header: &HeaderMap) -> i32 {
|
||||
let default_priority = Priority::Level6 as i32;
|
||||
match header.get(DRAGONFLY_PRIORITY_HEADER) {
|
||||
|
@ -114,7 +126,6 @@ pub fn get_priority(header: &HeaderMap) -> i32 {
|
|||
}
|
||||
|
||||
/// get_registry gets the custom address of container registry from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_registry(header: &HeaderMap) -> Option<String> {
|
||||
header
|
||||
.get(DRAGONFLY_REGISTRY_HEADER)
|
||||
|
@ -123,7 +134,6 @@ pub fn get_registry(header: &HeaderMap) -> Option<String> {
|
|||
}
|
||||
|
||||
/// get_filters gets the filters from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_filtered_query_params(
|
||||
header: &HeaderMap,
|
||||
default_filtered_query_params: Vec<String>,
|
||||
|
@ -141,7 +151,6 @@ pub fn get_filtered_query_params(
|
|||
}
|
||||
|
||||
/// get_use_p2p gets the use p2p from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_use_p2p(header: &HeaderMap) -> bool {
|
||||
match header.get(DRAGONFLY_USE_P2P_HEADER) {
|
||||
Some(value) => match value.to_str() {
|
||||
|
@ -156,7 +165,6 @@ pub fn get_use_p2p(header: &HeaderMap) -> bool {
|
|||
}
|
||||
|
||||
/// get_prefetch gets the prefetch from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_prefetch(header: &HeaderMap) -> Option<bool> {
|
||||
match header.get(DRAGONFLY_PREFETCH_HEADER) {
|
||||
Some(value) => match value.to_str() {
|
||||
|
@ -179,7 +187,6 @@ pub fn get_output_path(header: &HeaderMap) -> Option<String> {
|
|||
}
|
||||
|
||||
/// get_force_hard_link gets the force hard link from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_force_hard_link(header: &HeaderMap) -> bool {
|
||||
match header.get(DRAGONFLY_FORCE_HARD_LINK_HEADER) {
|
||||
Some(value) => match value.to_str() {
|
||||
|
@ -195,7 +202,7 @@ pub fn get_force_hard_link(header: &HeaderMap) -> bool {
|
|||
|
||||
/// get_piece_length gets the piece length from http header.
|
||||
pub fn get_piece_length(header: &HeaderMap) -> Option<ByteSize> {
|
||||
match header.get(DRAGONFLY_PIECE_LENGTH) {
|
||||
match header.get(DRAGONFLY_PIECE_LENGTH_HEADER) {
|
||||
Some(piece_length) => match piece_length.to_str() {
|
||||
Ok(piece_length) => match piece_length.parse::<ByteSize>() {
|
||||
Ok(piece_length) => Some(piece_length),
|
||||
|
@ -213,6 +220,14 @@ pub fn get_piece_length(header: &HeaderMap) -> Option<ByteSize> {
|
|||
}
|
||||
}
|
||||
|
||||
/// get_content_for_calculating_task_id gets the content for calculating task id from http header.
|
||||
pub fn get_content_for_calculating_task_id(header: &HeaderMap) -> Option<String> {
|
||||
header
|
||||
.get(DRAGONFLY_CONTENT_FOR_CALCULATING_TASK_ID_HEADER)
|
||||
.and_then(|content| content.to_str().ok())
|
||||
.map(|content| content.to_string())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -353,16 +368,38 @@ mod tests {
|
|||
#[test]
|
||||
fn test_get_piece_length() {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(DRAGONFLY_PIECE_LENGTH, HeaderValue::from_static("4mib"));
|
||||
headers.insert(
|
||||
DRAGONFLY_PIECE_LENGTH_HEADER,
|
||||
HeaderValue::from_static("4mib"),
|
||||
);
|
||||
assert_eq!(get_piece_length(&headers), Some(ByteSize::mib(4)));
|
||||
|
||||
let empty_headers = HeaderMap::new();
|
||||
assert_eq!(get_piece_length(&empty_headers), None);
|
||||
|
||||
headers.insert(DRAGONFLY_PIECE_LENGTH, HeaderValue::from_static("invalid"));
|
||||
headers.insert(
|
||||
DRAGONFLY_PIECE_LENGTH_HEADER,
|
||||
HeaderValue::from_static("invalid"),
|
||||
);
|
||||
assert_eq!(get_piece_length(&headers), None);
|
||||
|
||||
headers.insert(DRAGONFLY_PIECE_LENGTH, HeaderValue::from_static("0"));
|
||||
headers.insert(DRAGONFLY_PIECE_LENGTH_HEADER, HeaderValue::from_static("0"));
|
||||
assert_eq!(get_piece_length(&headers), Some(ByteSize::b(0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_content_for_calculating_task_id() {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
DRAGONFLY_CONTENT_FOR_CALCULATING_TASK_ID_HEADER,
|
||||
HeaderValue::from_static("test-content"),
|
||||
);
|
||||
assert_eq!(
|
||||
get_content_for_calculating_task_id(&headers),
|
||||
Some("test-content".to_string())
|
||||
);
|
||||
|
||||
let empty_headers = HeaderMap::new();
|
||||
assert_eq!(get_registry(&empty_headers), None);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -99,7 +99,6 @@ pub struct Proxy {
|
|||
/// Proxy implements the proxy server.
|
||||
impl Proxy {
|
||||
/// new creates a new Proxy.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
task: Arc<Task>,
|
||||
|
@ -144,7 +143,6 @@ impl Proxy {
|
|||
}
|
||||
|
||||
/// run starts the proxy server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self, grpc_server_started_barrier: Arc<Barrier>) -> ClientResult<()> {
|
||||
let mut shutdown = self.shutdown.clone();
|
||||
let read_buffer_size = self.config.proxy.read_buffer_size;
|
||||
|
@ -210,7 +208,7 @@ impl Proxy {
|
|||
service_fn(move |request|{
|
||||
let context = context.clone();
|
||||
async move {
|
||||
handler(context.config, context.task, request, context.dfdaemon_download_client, context.registry_cert, context.server_ca_cert).await
|
||||
handler(context.config, context.task, request, context.dfdaemon_download_client, context.registry_cert, context.server_ca_cert, remote_address.ip()).await
|
||||
}
|
||||
} ),
|
||||
)
|
||||
|
@ -233,7 +231,7 @@ impl Proxy {
|
|||
}
|
||||
|
||||
/// handler handles the request from the client.
|
||||
#[instrument(skip_all, fields(uri, method))]
|
||||
#[instrument(skip_all, fields(url, method, remote_ip))]
|
||||
pub async fn handler(
|
||||
config: Arc<Config>,
|
||||
task: Arc<Task>,
|
||||
|
@ -241,7 +239,13 @@ pub async fn handler(
|
|||
dfdaemon_download_client: DfdaemonDownloadClient,
|
||||
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
|
||||
server_ca_cert: Arc<Option<Certificate>>,
|
||||
remote_ip: std::net::IpAddr,
|
||||
) -> ClientResult<Response> {
|
||||
// Span record the url and method.
|
||||
Span::current().record("url", request.uri().to_string().as_str());
|
||||
Span::current().record("method", request.method().as_str());
|
||||
Span::current().record("remote_ip", remote_ip.to_string().as_str());
|
||||
|
||||
// Record the proxy request started metrics. The metrics will be recorded
|
||||
// when the request is kept alive.
|
||||
collect_proxy_request_started_metrics();
|
||||
|
@ -254,6 +258,7 @@ pub async fn handler(
|
|||
config,
|
||||
task,
|
||||
request,
|
||||
remote_ip,
|
||||
dfdaemon_download_client,
|
||||
registry_cert,
|
||||
server_ca_cert,
|
||||
|
@ -265,22 +270,20 @@ pub async fn handler(
|
|||
config,
|
||||
task,
|
||||
request,
|
||||
remote_ip,
|
||||
dfdaemon_download_client,
|
||||
registry_cert,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// Span record the uri and method.
|
||||
Span::current().record("uri", request.uri().to_string().as_str());
|
||||
Span::current().record("method", request.method().as_str());
|
||||
|
||||
// Handle CONNECT request.
|
||||
if Method::CONNECT == request.method() {
|
||||
return https_handler(
|
||||
config,
|
||||
task,
|
||||
request,
|
||||
remote_ip,
|
||||
dfdaemon_download_client,
|
||||
registry_cert,
|
||||
server_ca_cert,
|
||||
|
@ -292,6 +295,7 @@ pub async fn handler(
|
|||
config,
|
||||
task,
|
||||
request,
|
||||
remote_ip,
|
||||
dfdaemon_download_client,
|
||||
registry_cert,
|
||||
)
|
||||
|
@ -304,6 +308,7 @@ pub async fn registry_mirror_http_handler(
|
|||
config: Arc<Config>,
|
||||
task: Arc<Task>,
|
||||
request: Request<hyper::body::Incoming>,
|
||||
remote_ip: std::net::IpAddr,
|
||||
dfdaemon_download_client: DfdaemonDownloadClient,
|
||||
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
|
||||
) -> ClientResult<Response> {
|
||||
|
@ -312,6 +317,7 @@ pub async fn registry_mirror_http_handler(
|
|||
config,
|
||||
task,
|
||||
request,
|
||||
remote_ip,
|
||||
dfdaemon_download_client,
|
||||
registry_cert,
|
||||
)
|
||||
|
@ -324,6 +330,7 @@ pub async fn registry_mirror_https_handler(
|
|||
config: Arc<Config>,
|
||||
task: Arc<Task>,
|
||||
request: Request<hyper::body::Incoming>,
|
||||
remote_ip: std::net::IpAddr,
|
||||
dfdaemon_download_client: DfdaemonDownloadClient,
|
||||
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
|
||||
server_ca_cert: Arc<Option<Certificate>>,
|
||||
|
@ -333,6 +340,7 @@ pub async fn registry_mirror_https_handler(
|
|||
config,
|
||||
task,
|
||||
request,
|
||||
remote_ip,
|
||||
dfdaemon_download_client,
|
||||
registry_cert,
|
||||
server_ca_cert,
|
||||
|
@ -346,6 +354,7 @@ pub async fn http_handler(
|
|||
config: Arc<Config>,
|
||||
task: Arc<Task>,
|
||||
request: Request<hyper::body::Incoming>,
|
||||
remote_ip: std::net::IpAddr,
|
||||
dfdaemon_download_client: DfdaemonDownloadClient,
|
||||
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
|
||||
) -> ClientResult<Response> {
|
||||
|
@ -377,7 +386,15 @@ pub async fn http_handler(
|
|||
request.method(),
|
||||
request_uri
|
||||
);
|
||||
return proxy_via_dfdaemon(config, task, &rule, request, dfdaemon_download_client).await;
|
||||
return proxy_via_dfdaemon(
|
||||
config,
|
||||
task,
|
||||
&rule,
|
||||
request,
|
||||
remote_ip,
|
||||
dfdaemon_download_client,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// If the request header contains the X-Dragonfly-Use-P2P header, proxy the request via the
|
||||
|
@ -393,6 +410,7 @@ pub async fn http_handler(
|
|||
task,
|
||||
&Rule::default(),
|
||||
request,
|
||||
remote_ip,
|
||||
dfdaemon_download_client,
|
||||
)
|
||||
.await;
|
||||
|
@ -421,6 +439,7 @@ pub async fn https_handler(
|
|||
config: Arc<Config>,
|
||||
task: Arc<Task>,
|
||||
request: Request<hyper::body::Incoming>,
|
||||
remote_ip: std::net::IpAddr,
|
||||
dfdaemon_download_client: DfdaemonDownloadClient,
|
||||
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
|
||||
server_ca_cert: Arc<Option<Certificate>>,
|
||||
|
@ -440,6 +459,7 @@ pub async fn https_handler(
|
|||
upgraded,
|
||||
host,
|
||||
port,
|
||||
remote_ip,
|
||||
dfdaemon_download_client,
|
||||
registry_cert,
|
||||
server_ca_cert,
|
||||
|
@ -470,6 +490,7 @@ async fn upgraded_tunnel(
|
|||
upgraded: Upgraded,
|
||||
host: String,
|
||||
port: u16,
|
||||
remote_ip: std::net::IpAddr,
|
||||
dfdaemon_download_client: DfdaemonDownloadClient,
|
||||
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
|
||||
server_ca_cert: Arc<Option<Certificate>>,
|
||||
|
@ -518,6 +539,7 @@ async fn upgraded_tunnel(
|
|||
host.clone(),
|
||||
port,
|
||||
request,
|
||||
remote_ip,
|
||||
dfdaemon_download_client.clone(),
|
||||
registry_cert.clone(),
|
||||
)
|
||||
|
@ -533,18 +555,20 @@ async fn upgraded_tunnel(
|
|||
}
|
||||
|
||||
/// upgraded_handler handles the upgraded https request from the client.
|
||||
#[instrument(skip_all, fields(uri, method))]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all, fields(url, method))]
|
||||
pub async fn upgraded_handler(
|
||||
config: Arc<Config>,
|
||||
task: Arc<Task>,
|
||||
host: String,
|
||||
port: u16,
|
||||
mut request: Request<hyper::body::Incoming>,
|
||||
remote_ip: std::net::IpAddr,
|
||||
dfdaemon_download_client: DfdaemonDownloadClient,
|
||||
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
|
||||
) -> ClientResult<Response> {
|
||||
// Span record the uri and method.
|
||||
Span::current().record("uri", request.uri().to_string().as_str());
|
||||
// Span record the url and method.
|
||||
Span::current().record("url", request.uri().to_string().as_str());
|
||||
Span::current().record("method", request.method().as_str());
|
||||
|
||||
// Authenticate the request with the basic auth.
|
||||
|
@ -589,7 +613,15 @@ pub async fn upgraded_handler(
|
|||
request.method(),
|
||||
request_uri
|
||||
);
|
||||
return proxy_via_dfdaemon(config, task, &rule, request, dfdaemon_download_client).await;
|
||||
return proxy_via_dfdaemon(
|
||||
config,
|
||||
task,
|
||||
&rule,
|
||||
request,
|
||||
remote_ip,
|
||||
dfdaemon_download_client,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// If the request header contains the X-Dragonfly-Use-P2P header, proxy the request via the
|
||||
|
@ -605,6 +637,7 @@ pub async fn upgraded_handler(
|
|||
task,
|
||||
&Rule::default(),
|
||||
request,
|
||||
remote_ip,
|
||||
dfdaemon_download_client,
|
||||
)
|
||||
.await;
|
||||
|
@ -634,22 +667,24 @@ async fn proxy_via_dfdaemon(
|
|||
task: Arc<Task>,
|
||||
rule: &Rule,
|
||||
request: Request<hyper::body::Incoming>,
|
||||
remote_ip: std::net::IpAddr,
|
||||
dfdaemon_download_client: DfdaemonDownloadClient,
|
||||
) -> ClientResult<Response> {
|
||||
// Collect the metrics for the proxy request via dfdaemon.
|
||||
collect_proxy_request_via_dfdaemon_metrics();
|
||||
|
||||
// Make the download task request.
|
||||
let download_task_request = match make_download_task_request(config.clone(), rule, request) {
|
||||
Ok(download_task_request) => download_task_request,
|
||||
Err(err) => {
|
||||
error!("make download task request failed: {}", err);
|
||||
return Ok(make_error_response(
|
||||
http::StatusCode::INTERNAL_SERVER_ERROR,
|
||||
None,
|
||||
));
|
||||
}
|
||||
};
|
||||
let download_task_request =
|
||||
match make_download_task_request(config.clone(), rule, request, remote_ip) {
|
||||
Ok(download_task_request) => download_task_request,
|
||||
Err(err) => {
|
||||
error!("make download task request failed: {}", err);
|
||||
return Ok(make_error_response(
|
||||
http::StatusCode::INTERNAL_SERVER_ERROR,
|
||||
None,
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
// Download the task by the dfdaemon download client.
|
||||
let response = match dfdaemon_download_client
|
||||
|
@ -733,7 +768,10 @@ async fn proxy_via_dfdaemon(
|
|||
|
||||
// Construct the response.
|
||||
let mut response = Response::new(boxed_body);
|
||||
*response.headers_mut() = make_response_headers(download_task_started_response.clone())?;
|
||||
*response.headers_mut() = make_response_headers(
|
||||
message.task_id.as_str(),
|
||||
download_task_started_response.clone(),
|
||||
)?;
|
||||
*response.status_mut() = http::StatusCode::OK;
|
||||
|
||||
// Return the response if the client return the first piece.
|
||||
|
@ -981,7 +1019,6 @@ async fn proxy_via_https(
|
|||
}
|
||||
|
||||
/// make_registry_mirror_request makes a registry mirror request by the request.
|
||||
#[instrument(skip_all)]
|
||||
fn make_registry_mirror_request(
|
||||
config: Arc<Config>,
|
||||
mut request: Request<hyper::body::Incoming>,
|
||||
|
@ -1015,11 +1052,11 @@ fn make_registry_mirror_request(
|
|||
}
|
||||
|
||||
/// make_download_task_request makes a download task request by the request.
|
||||
#[instrument(skip_all)]
|
||||
fn make_download_task_request(
|
||||
config: Arc<Config>,
|
||||
rule: &Rule,
|
||||
request: Request<hyper::body::Incoming>,
|
||||
remote_ip: std::net::IpAddr,
|
||||
) -> ClientResult<DownloadTaskRequest> {
|
||||
// Convert the Reqwest header to the Hyper header.
|
||||
let mut header = request.headers().clone();
|
||||
|
@ -1065,15 +1102,15 @@ fn make_download_task_request(
|
|||
hdfs: None,
|
||||
is_prefetch: false,
|
||||
need_piece_content: false,
|
||||
load_to_cache: false,
|
||||
force_hard_link: header::get_force_hard_link(&header),
|
||||
content_for_calculating_task_id: header::get_content_for_calculating_task_id(&header),
|
||||
remote_ip: Some(remote_ip.to_string()),
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
/// need_prefetch returns whether the prefetch is needed by the configuration and the request
|
||||
/// header.
|
||||
#[instrument(skip_all)]
|
||||
fn need_prefetch(config: Arc<Config>, header: &http::HeaderMap) -> bool {
|
||||
// If the header not contains the range header, the request does not need prefetch.
|
||||
if !header.contains_key(reqwest::header::RANGE) {
|
||||
|
@ -1087,11 +1124,10 @@ fn need_prefetch(config: Arc<Config>, header: &http::HeaderMap) -> bool {
|
|||
}
|
||||
|
||||
// Return the prefetch value from the configuration.
|
||||
return config.proxy.prefetch;
|
||||
config.proxy.prefetch
|
||||
}
|
||||
|
||||
/// make_download_url makes a download url by the given uri.
|
||||
#[instrument(skip_all)]
|
||||
fn make_download_url(
|
||||
uri: &hyper::Uri,
|
||||
use_tls: bool,
|
||||
|
@ -1116,8 +1152,8 @@ fn make_download_url(
|
|||
}
|
||||
|
||||
/// make_response_headers makes the response headers.
|
||||
#[instrument(skip_all)]
|
||||
fn make_response_headers(
|
||||
task_id: &str,
|
||||
mut download_task_started_response: DownloadTaskStartedResponse,
|
||||
) -> ClientResult<hyper::header::HeaderMap> {
|
||||
// Insert the content range header to the response header.
|
||||
|
@ -1138,18 +1174,28 @@ fn make_response_headers(
|
|||
);
|
||||
};
|
||||
|
||||
if download_task_started_response.is_finished {
|
||||
download_task_started_response.response_header.insert(
|
||||
header::DRAGONFLY_TASK_DOWNLOAD_FINISHED_HEADER.to_string(),
|
||||
"true".to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
download_task_started_response.response_header.insert(
|
||||
header::DRAGONFLY_TASK_ID_HEADER.to_string(),
|
||||
task_id.to_string(),
|
||||
);
|
||||
|
||||
hashmap_to_headermap(&download_task_started_response.response_header)
|
||||
}
|
||||
|
||||
/// find_matching_rule returns whether the dfdaemon should be used to download the task.
|
||||
/// If the dfdaemon should be used, return the matched rule.
|
||||
#[instrument(skip_all)]
|
||||
fn find_matching_rule(rules: Option<&[Rule]>, url: &str) -> Option<Rule> {
|
||||
rules?.iter().find(|rule| rule.regex.is_match(url)).cloned()
|
||||
}
|
||||
|
||||
/// make_error_response makes an error response with the given status and message.
|
||||
#[instrument(skip_all)]
|
||||
fn make_error_response(status: http::StatusCode, header: Option<http::HeaderMap>) -> Response {
|
||||
let mut response = Response::new(empty());
|
||||
*response.status_mut() = status;
|
||||
|
@ -1163,7 +1209,6 @@ fn make_error_response(status: http::StatusCode, header: Option<http::HeaderMap>
|
|||
}
|
||||
|
||||
/// empty returns an empty body.
|
||||
#[instrument(skip_all)]
|
||||
fn empty() -> BoxBody<Bytes, ClientError> {
|
||||
Empty::<Bytes>::new()
|
||||
.map_err(|never| match never {})
|
||||
|
|
|
@ -84,7 +84,6 @@ pub struct PersistentCacheTask {
|
|||
/// PersistentCacheTask is the implementation of PersistentCacheTask.
|
||||
impl PersistentCacheTask {
|
||||
/// new creates a new PersistentCacheTask.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
id_generator: Arc<IDGenerator>,
|
||||
|
@ -105,7 +104,7 @@ impl PersistentCacheTask {
|
|||
id_generator,
|
||||
storage,
|
||||
scheduler_client,
|
||||
piece: piece.clone(),
|
||||
piece,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -129,8 +128,9 @@ impl PersistentCacheTask {
|
|||
let ttl = Duration::try_from(request.ttl.ok_or(Error::UnexpectedResponse)?)
|
||||
.or_err(ErrorType::ParseError)?;
|
||||
|
||||
// Get the content length of the file.
|
||||
let content_length = std::fs::metadata(path.as_path())
|
||||
// Get the content length of the file asynchronously.
|
||||
let content_length = tokio::fs::metadata(path.as_path())
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
error!("get file metadata error: {}", err);
|
||||
})?
|
||||
|
@ -150,8 +150,7 @@ impl PersistentCacheTask {
|
|||
};
|
||||
|
||||
// Notify the scheduler that the persistent cache task is started.
|
||||
match self
|
||||
.scheduler_client
|
||||
self.scheduler_client
|
||||
.upload_persistent_cache_task_started(UploadPersistentCacheTaskStartedRequest {
|
||||
host_id: host_id.to_string(),
|
||||
task_id: task_id.to_string(),
|
||||
|
@ -167,13 +166,7 @@ impl PersistentCacheTask {
|
|||
ttl: request.ttl,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
error!("upload persistent cache task started: {}", err);
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
.inspect_err(|err| error!("upload persistent cache task started: {}", err))?;
|
||||
|
||||
// Check if the storage has enough space to store the persistent cache task.
|
||||
let has_enough_space = self.storage.has_enough_space(content_length)?;
|
||||
|
@ -509,7 +502,6 @@ impl PersistentCacheTask {
|
|||
}
|
||||
|
||||
/// is_same_dev_inode checks if the persistent cache task is on the same device inode as the given path.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn is_same_dev_inode(&self, id: &str, to: &Path) -> ClientResult<bool> {
|
||||
self.storage
|
||||
.is_same_dev_inode_as_persistent_cache_task(id, to)
|
||||
|
@ -755,7 +747,7 @@ impl PersistentCacheTask {
|
|||
})? {
|
||||
// Check if the schedule count is exceeded.
|
||||
schedule_count += 1;
|
||||
if schedule_count >= self.config.scheduler.max_schedule_count {
|
||||
if schedule_count > self.config.scheduler.max_schedule_count {
|
||||
in_stream_tx
|
||||
.send_timeout(
|
||||
AnnouncePersistentCachePeerRequest {
|
||||
|
@ -1151,13 +1143,13 @@ impl PersistentCacheTask {
|
|||
REQUEST_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
.unwrap_or_else(|err| {
|
||||
error!(
|
||||
"send DownloadPieceFinishedRequest for piece {} failed: {:?}",
|
||||
piece_id, err
|
||||
);
|
||||
interrupt.store(true, Ordering::SeqCst);
|
||||
})?;
|
||||
});
|
||||
|
||||
// Send the download progress.
|
||||
download_progress_tx
|
||||
|
@ -1177,13 +1169,13 @@ impl PersistentCacheTask {
|
|||
REQUEST_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
.unwrap_or_else(|err| {
|
||||
error!(
|
||||
"send DownloadPieceFinishedResponse for piece {} failed: {:?}",
|
||||
piece_id, err
|
||||
);
|
||||
interrupt.store(true, Ordering::SeqCst);
|
||||
})?;
|
||||
});
|
||||
|
||||
info!(
|
||||
"finished persistent cache piece {} from parent {:?}",
|
||||
|
@ -1378,12 +1370,12 @@ impl PersistentCacheTask {
|
|||
REQUEST_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
.unwrap_or_else(|err| {
|
||||
error!(
|
||||
"send DownloadPieceFinishedResponse for piece {} failed: {:?}",
|
||||
piece_id, err
|
||||
);
|
||||
})?;
|
||||
});
|
||||
|
||||
// Store the finished piece.
|
||||
finished_pieces.push(interested_piece.clone());
|
||||
|
|
|
@ -46,7 +46,7 @@ pub const MAX_PIECE_COUNT: u64 = 500;
|
|||
pub const MIN_PIECE_LENGTH: u64 = 4 * 1024 * 1024;
|
||||
|
||||
/// MAX_PIECE_LENGTH is the maximum piece length.
|
||||
pub const MAX_PIECE_LENGTH: u64 = 16 * 1024 * 1024;
|
||||
pub const MAX_PIECE_LENGTH: u64 = 64 * 1024 * 1024;
|
||||
|
||||
/// PieceLengthStrategy sets the optimization strategy of piece length.
|
||||
pub enum PieceLengthStrategy {
|
||||
|
@ -87,7 +87,6 @@ pub struct Piece {
|
|||
/// Piece implements the piece manager.
|
||||
impl Piece {
|
||||
/// new returns a new Piece.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
id_generator: Arc<IDGenerator>,
|
||||
|
@ -136,17 +135,20 @@ impl Piece {
|
|||
|
||||
/// id generates a new piece id.
|
||||
#[inline]
|
||||
#[instrument(skip_all)]
|
||||
pub fn id(&self, task_id: &str, number: u32) -> String {
|
||||
self.storage.piece_id(task_id, number)
|
||||
}
|
||||
|
||||
/// get gets a piece from the local storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get(&self, piece_id: &str) -> Result<Option<metadata::Piece>> {
|
||||
self.storage.get_piece(piece_id)
|
||||
}
|
||||
|
||||
/// get_all gets all pieces of a task from the local storage.
|
||||
pub fn get_all(&self, task_id: &str) -> Result<Vec<metadata::Piece>> {
|
||||
self.storage.get_pieces(task_id)
|
||||
}
|
||||
|
||||
/// calculate_interested calculates the interested pieces by content_length and range.
|
||||
pub fn calculate_interested(
|
||||
&self,
|
||||
|
@ -338,6 +340,7 @@ impl Piece {
|
|||
) -> Result<impl AsyncRead> {
|
||||
// Span record the piece_id.
|
||||
Span::current().record("piece_id", piece_id);
|
||||
Span::current().record("piece_length", length);
|
||||
|
||||
// Acquire the upload rate limiter.
|
||||
if !disable_rate_limit {
|
||||
|
@ -369,6 +372,7 @@ impl Piece {
|
|||
) -> Result<impl AsyncRead> {
|
||||
// Span record the piece_id.
|
||||
Span::current().record("piece_id", piece_id);
|
||||
Span::current().record("piece_length", length);
|
||||
|
||||
// Acquire the download rate limiter.
|
||||
if !disable_rate_limit {
|
||||
|
@ -408,10 +412,10 @@ impl Piece {
|
|||
length: u64,
|
||||
parent: piece_collector::CollectedParent,
|
||||
is_prefetch: bool,
|
||||
load_to_cache: bool,
|
||||
) -> Result<metadata::Piece> {
|
||||
// Span record the piece_id.
|
||||
Span::current().record("piece_id", piece_id);
|
||||
Span::current().record("piece_length", length);
|
||||
|
||||
// Record the start of downloading piece.
|
||||
let piece = self
|
||||
|
@ -422,6 +426,7 @@ impl Piece {
|
|||
// If the piece is downloaded by the other thread,
|
||||
// return the piece directly.
|
||||
if piece.is_finished() {
|
||||
info!("finished piece {} from local", piece_id);
|
||||
return Ok(piece);
|
||||
}
|
||||
|
||||
|
@ -471,7 +476,7 @@ impl Piece {
|
|||
digest.as_str(),
|
||||
parent.id.as_str(),
|
||||
&mut reader,
|
||||
load_to_cache,
|
||||
self.config.storage.write_piece_timeout,
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
@ -508,12 +513,12 @@ impl Piece {
|
|||
length: u64,
|
||||
request_header: HeaderMap,
|
||||
is_prefetch: bool,
|
||||
load_to_cache: bool,
|
||||
object_storage: Option<ObjectStorage>,
|
||||
hdfs: Option<Hdfs>,
|
||||
) -> Result<metadata::Piece> {
|
||||
// Span record the piece_id.
|
||||
Span::current().record("piece_id", piece_id);
|
||||
Span::current().record("piece_length", length);
|
||||
|
||||
// Record the start of downloading piece.
|
||||
let piece = self
|
||||
|
@ -524,6 +529,7 @@ impl Piece {
|
|||
// If the piece is downloaded by the other thread,
|
||||
// return the piece directly.
|
||||
if piece.is_finished() {
|
||||
info!("finished piece {} from local", piece_id);
|
||||
return Ok(piece);
|
||||
}
|
||||
|
||||
|
@ -632,7 +638,7 @@ impl Piece {
|
|||
offset,
|
||||
length,
|
||||
&mut response.reader,
|
||||
load_to_cache,
|
||||
self.config.storage.write_piece_timeout,
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
@ -658,7 +664,6 @@ impl Piece {
|
|||
|
||||
/// persistent_cache_id generates a new persistent cache piece id.
|
||||
#[inline]
|
||||
#[instrument(skip_all)]
|
||||
pub fn persistent_cache_id(&self, task_id: &str, number: u32) -> String {
|
||||
self.storage.persistent_cache_piece_id(task_id, number)
|
||||
}
|
||||
|
@ -696,6 +701,7 @@ impl Piece {
|
|||
) -> Result<impl AsyncRead> {
|
||||
// Span record the piece_id.
|
||||
Span::current().record("piece_id", piece_id);
|
||||
Span::current().record("piece_length", length);
|
||||
|
||||
// Acquire the upload rate limiter.
|
||||
self.upload_rate_limiter.acquire(length as usize).await;
|
||||
|
@ -725,6 +731,7 @@ impl Piece {
|
|||
) -> Result<impl AsyncRead> {
|
||||
// Span record the piece_id.
|
||||
Span::current().record("piece_id", piece_id);
|
||||
Span::current().record("piece_length", length);
|
||||
|
||||
// Acquire the download rate limiter.
|
||||
if !disable_rate_limit {
|
||||
|
@ -769,6 +776,7 @@ impl Piece {
|
|||
) -> Result<metadata::Piece> {
|
||||
// Span record the piece_id.
|
||||
Span::current().record("piece_id", piece_id);
|
||||
Span::current().record("piece_length", length);
|
||||
|
||||
if is_prefetch {
|
||||
// Acquire the prefetch rate limiter.
|
||||
|
@ -787,6 +795,7 @@ impl Piece {
|
|||
// If the piece is downloaded by the other thread,
|
||||
// return the piece directly.
|
||||
if piece.is_finished() {
|
||||
info!("finished persistent cache piece {} from local", piece_id);
|
||||
return Ok(piece);
|
||||
}
|
||||
|
||||
|
@ -832,6 +841,7 @@ impl Piece {
|
|||
piece_id,
|
||||
task_id,
|
||||
offset,
|
||||
length,
|
||||
digest.as_str(),
|
||||
parent.id.as_str(),
|
||||
&mut reader,
|
||||
|
|
|
@ -15,20 +15,21 @@
|
|||
*/
|
||||
|
||||
use crate::grpc::dfdaemon_upload::DfdaemonUploadClient;
|
||||
use dashmap::DashMap;
|
||||
use dragonfly_api::common::v2::Host;
|
||||
use dragonfly_api::dfdaemon::v2::{SyncPersistentCachePiecesRequest, SyncPiecesRequest};
|
||||
use dragonfly_client_config::dfdaemon::Config;
|
||||
use dragonfly_client_core::{Error, Result};
|
||||
use dragonfly_client_storage::metadata;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::task::JoinSet;
|
||||
use tokio_stream::StreamExt;
|
||||
use tracing::{error, info, instrument, Instrument};
|
||||
|
||||
const DEFAULT_WAIT_FOR_PIECE_FROM_DIFFERENT_PARENTS: Duration = Duration::from_millis(5);
|
||||
|
||||
/// CollectedParent is the parent peer collected from the parent.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CollectedParent {
|
||||
|
@ -68,14 +69,13 @@ pub struct PieceCollector {
|
|||
/// interested_pieces is the pieces interested by the collector.
|
||||
interested_pieces: Vec<metadata::Piece>,
|
||||
|
||||
/// collected_pieces is the pieces collected from peers.
|
||||
collected_pieces: Arc<Mutex<HashMap<u32, String>>>,
|
||||
/// collected_pieces is a map to store the collected pieces from different parents.
|
||||
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
|
||||
}
|
||||
|
||||
/// PieceCollector is used to collect pieces from peers.
|
||||
impl PieceCollector {
|
||||
/// new creates a new PieceCollector.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(
|
||||
config: Arc<Config>,
|
||||
host_id: &str,
|
||||
|
@ -83,14 +83,10 @@ impl PieceCollector {
|
|||
interested_pieces: Vec<metadata::Piece>,
|
||||
parents: Vec<CollectedParent>,
|
||||
) -> Self {
|
||||
let collected_pieces =
|
||||
Arc::new(Mutex::new(HashMap::with_capacity(interested_pieces.len())));
|
||||
|
||||
let mut collected_pieces_guard = collected_pieces.lock().await;
|
||||
let collected_pieces = Arc::new(DashMap::with_capacity(interested_pieces.len()));
|
||||
for interested_piece in &interested_pieces {
|
||||
collected_pieces_guard.insert(interested_piece.number, String::new());
|
||||
collected_pieces.insert(interested_piece.number, Vec::new());
|
||||
}
|
||||
drop(collected_pieces_guard);
|
||||
|
||||
Self {
|
||||
config,
|
||||
|
@ -111,7 +107,7 @@ impl PieceCollector {
|
|||
let parents = self.parents.clone();
|
||||
let interested_pieces = self.interested_pieces.clone();
|
||||
let collected_pieces = self.collected_pieces.clone();
|
||||
let collected_piece_timeout = self.config.download.piece_timeout;
|
||||
let collected_piece_timeout = self.config.download.collected_piece_timeout;
|
||||
let (collected_piece_tx, collected_piece_rx) = mpsc::channel(128 * 1024);
|
||||
tokio::spawn(
|
||||
async move {
|
||||
|
@ -136,7 +132,25 @@ impl PieceCollector {
|
|||
collected_piece_rx
|
||||
}
|
||||
|
||||
/// collect_from_parents collects pieces from parents.
|
||||
/// collect_from_parents collects pieces from multiple parents with load balancing strategy.
|
||||
///
|
||||
/// The collection process works in two phases:
|
||||
/// 1. **Synchronization Phase**: Waits for a configured duration (DEFAULT_WAIT_FOR_PIECE_FROM_DIFFERENT_PARENTS)
|
||||
/// to collect the same piece information from different parents. This allows the collector
|
||||
/// to gather multiple sources for each piece.
|
||||
///
|
||||
/// 2. **Selection Phase**: After the wait period, randomly selects one parent from the available
|
||||
/// candidates for each piece and forwards it to the piece downloader.
|
||||
///
|
||||
/// **Load Balancing Strategy**:
|
||||
/// The random parent selection is designed to distribute download load across multiple parents
|
||||
/// during concurrent piece downloads. This approach ensures:
|
||||
/// - Optimal utilization of bandwidth from multiple parent nodes
|
||||
/// - Prevention of overwhelming any single parent with too many requests
|
||||
/// - Better overall download performance through parallel connections
|
||||
///
|
||||
/// This strategy is particularly effective when downloading multiple pieces simultaneously,
|
||||
/// as it naturally spreads the workload across the available parent pool.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
async fn collect_from_parents(
|
||||
|
@ -145,7 +159,7 @@ impl PieceCollector {
|
|||
task_id: &str,
|
||||
parents: Vec<CollectedParent>,
|
||||
interested_pieces: Vec<metadata::Piece>,
|
||||
collected_pieces: Arc<Mutex<HashMap<u32, String>>>,
|
||||
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
|
||||
collected_piece_tx: Sender<CollectedPiece>,
|
||||
collected_piece_timeout: Duration,
|
||||
) -> Result<()> {
|
||||
|
@ -159,7 +173,7 @@ impl PieceCollector {
|
|||
task_id: String,
|
||||
parent: CollectedParent,
|
||||
interested_pieces: Vec<metadata::Piece>,
|
||||
collected_pieces: Arc<Mutex<HashMap<u32, String>>>,
|
||||
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
|
||||
collected_piece_tx: Sender<CollectedPiece>,
|
||||
collected_piece_timeout: Duration,
|
||||
) -> Result<CollectedParent> {
|
||||
|
@ -207,18 +221,33 @@ impl PieceCollector {
|
|||
error!("sync pieces from parent {} failed: {}", parent.id, err);
|
||||
})? {
|
||||
let message = message?;
|
||||
|
||||
// Remove the piece from collected_pieces, avoid to collect the same piece from
|
||||
// different parents.
|
||||
{
|
||||
let mut collected_pieces_guard = collected_pieces.lock().await;
|
||||
if collected_pieces_guard.remove(&message.number).is_none() {
|
||||
continue;
|
||||
}
|
||||
if let Some(mut parents) = collected_pieces.get_mut(&message.number) {
|
||||
parents.push(parent.clone());
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Wait for collecting the piece from different parents when the first
|
||||
// piece is collected.
|
||||
tokio::time::sleep(DEFAULT_WAIT_FOR_PIECE_FROM_DIFFERENT_PARENTS).await;
|
||||
let parents = match collected_pieces.remove(&message.number) {
|
||||
Some((_, parents)) => parents,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let parent = match parents.get(fastrand::usize(..parents.len())) {
|
||||
Some(parent) => parent,
|
||||
None => {
|
||||
error!(
|
||||
"collected_pieces does not contain parent for piece {}",
|
||||
message.number
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
info!(
|
||||
"received piece {}-{} metadata from parent {}",
|
||||
"picked up piece {}-{} metadata from parent {}",
|
||||
task_id, message.number, parent.id
|
||||
);
|
||||
|
||||
|
@ -259,11 +288,7 @@ impl PieceCollector {
|
|||
info!("peer {} sync pieces finished", peer.id);
|
||||
|
||||
// If all pieces are collected, abort all tasks.
|
||||
let collected_pieces_guard = collected_pieces.lock().await;
|
||||
let is_empty = collected_pieces_guard.is_empty();
|
||||
drop(collected_pieces_guard);
|
||||
|
||||
if !is_empty {
|
||||
if collected_pieces.is_empty() {
|
||||
info!("all pieces are collected, abort all tasks");
|
||||
join_set.abort_all();
|
||||
}
|
||||
|
@ -298,14 +323,13 @@ pub struct PersistentCachePieceCollector {
|
|||
/// interested_pieces is the pieces interested by the collector.
|
||||
interested_pieces: Vec<metadata::Piece>,
|
||||
|
||||
/// collected_pieces is the pieces collected from peers.
|
||||
collected_pieces: Arc<Mutex<HashMap<u32, String>>>,
|
||||
/// collected_pieces is a map to store the collected pieces from different parents.
|
||||
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
|
||||
}
|
||||
|
||||
/// PersistentCachePieceCollector is used to collect persistent cache pieces from peers.
|
||||
impl PersistentCachePieceCollector {
|
||||
/// new creates a new PieceCollector.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(
|
||||
config: Arc<Config>,
|
||||
host_id: &str,
|
||||
|
@ -313,14 +337,10 @@ impl PersistentCachePieceCollector {
|
|||
interested_pieces: Vec<metadata::Piece>,
|
||||
parents: Vec<CollectedParent>,
|
||||
) -> Self {
|
||||
let collected_pieces =
|
||||
Arc::new(Mutex::new(HashMap::with_capacity(interested_pieces.len())));
|
||||
|
||||
let mut collected_pieces_guard = collected_pieces.lock().await;
|
||||
let collected_pieces = Arc::new(DashMap::with_capacity(interested_pieces.len()));
|
||||
for interested_piece in &interested_pieces {
|
||||
collected_pieces_guard.insert(interested_piece.number, String::new());
|
||||
collected_pieces.insert(interested_piece.number, Vec::new());
|
||||
}
|
||||
drop(collected_pieces_guard);
|
||||
|
||||
Self {
|
||||
config,
|
||||
|
@ -366,7 +386,25 @@ impl PersistentCachePieceCollector {
|
|||
collected_piece_rx
|
||||
}
|
||||
|
||||
/// collect_from_parents collects pieces from parents.
|
||||
/// collect_from_parents collects pieces from multiple parents with load balancing strategy.
|
||||
///
|
||||
/// The collection process works in two phases:
|
||||
/// 1. **Synchronization Phase**: Waits for a configured duration (DEFAULT_WAIT_FOR_PIECE_FROM_DIFFERENT_PARENTS)
|
||||
/// to collect the same piece information from different parents. This allows the collector
|
||||
/// to gather multiple sources for each piece.
|
||||
///
|
||||
/// 2. **Selection Phase**: After the wait period, randomly selects one parent from the available
|
||||
/// candidates for each piece and forwards it to the piece downloader.
|
||||
///
|
||||
/// **Load Balancing Strategy**:
|
||||
/// The random parent selection is designed to distribute download load across multiple parents
|
||||
/// during concurrent piece downloads. This approach ensures:
|
||||
/// - Optimal utilization of bandwidth from multiple parent nodes
|
||||
/// - Prevention of overwhelming any single parent with too many requests
|
||||
/// - Better overall download performance through parallel connections
|
||||
///
|
||||
/// This strategy is particularly effective when downloading multiple pieces simultaneously,
|
||||
/// as it naturally spreads the workload across the available parent pool.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
async fn collect_from_parents(
|
||||
|
@ -375,7 +413,7 @@ impl PersistentCachePieceCollector {
|
|||
task_id: &str,
|
||||
parents: Vec<CollectedParent>,
|
||||
interested_pieces: Vec<metadata::Piece>,
|
||||
collected_pieces: Arc<Mutex<HashMap<u32, String>>>,
|
||||
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
|
||||
collected_piece_tx: Sender<CollectedPiece>,
|
||||
collected_piece_timeout: Duration,
|
||||
) -> Result<()> {
|
||||
|
@ -389,7 +427,7 @@ impl PersistentCachePieceCollector {
|
|||
task_id: String,
|
||||
parent: CollectedParent,
|
||||
interested_pieces: Vec<metadata::Piece>,
|
||||
collected_pieces: Arc<Mutex<HashMap<u32, String>>>,
|
||||
collected_pieces: Arc<DashMap<u32, Vec<CollectedParent>>>,
|
||||
collected_piece_tx: Sender<CollectedPiece>,
|
||||
collected_piece_timeout: Duration,
|
||||
) -> Result<CollectedParent> {
|
||||
|
@ -443,18 +481,33 @@ impl PersistentCachePieceCollector {
|
|||
);
|
||||
})? {
|
||||
let message = message?;
|
||||
|
||||
// Remove the piece from collected_pieces, avoid to collect the same piece from
|
||||
// different parents.
|
||||
{
|
||||
let mut collected_pieces_guard = collected_pieces.lock().await;
|
||||
if collected_pieces_guard.remove(&message.number).is_none() {
|
||||
continue;
|
||||
}
|
||||
if let Some(mut parents) = collected_pieces.get_mut(&message.number) {
|
||||
parents.push(parent.clone());
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Wait for collecting the piece from different parents when the first
|
||||
// piece is collected.
|
||||
tokio::time::sleep(DEFAULT_WAIT_FOR_PIECE_FROM_DIFFERENT_PARENTS).await;
|
||||
let parents = match collected_pieces.remove(&message.number) {
|
||||
Some((_, parents)) => parents,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let parent = match parents.get(fastrand::usize(..parents.len())) {
|
||||
Some(parent) => parent,
|
||||
None => {
|
||||
error!(
|
||||
"collected_pieces does not contain parent for piece {}",
|
||||
message.number
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
info!(
|
||||
"received persistent cache piece {}-{} metadata from parent {}",
|
||||
"picked up piece {}-{} metadata from parent {}",
|
||||
task_id, message.number, parent.id
|
||||
);
|
||||
|
||||
|
@ -495,11 +548,7 @@ impl PersistentCachePieceCollector {
|
|||
info!("peer {} sync persistent cache pieces finished", peer.id);
|
||||
|
||||
// If all pieces are collected, abort all tasks.
|
||||
let collected_pieces_guard = collected_pieces.lock().await;
|
||||
let is_empty = collected_pieces_guard.is_empty();
|
||||
drop(collected_pieces_guard);
|
||||
|
||||
if !is_empty {
|
||||
if collected_pieces.is_empty() {
|
||||
info!("all persistent cache pieces are collected, abort all tasks");
|
||||
join_set.abort_all();
|
||||
}
|
||||
|
|
|
@ -66,7 +66,6 @@ pub struct DownloaderFactory {
|
|||
/// DownloadFactory implements the DownloadFactory trait.
|
||||
impl DownloaderFactory {
|
||||
/// new returns a new DownloadFactory.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(protocol: &str, config: Arc<Config>) -> Result<Self> {
|
||||
let downloader = match protocol {
|
||||
"grpc" => Arc::new(GRPCDownloader::new(
|
||||
|
@ -84,7 +83,6 @@ impl DownloaderFactory {
|
|||
}
|
||||
|
||||
/// build returns the downloader.
|
||||
#[instrument(skip_all)]
|
||||
pub fn build(&self) -> Arc<dyn Downloader> {
|
||||
self.downloader.clone()
|
||||
}
|
||||
|
@ -151,7 +149,6 @@ pub struct GRPCDownloader {
|
|||
/// GRPCDownloader implements the downloader with the gRPC protocol.
|
||||
impl GRPCDownloader {
|
||||
/// new returns a new GRPCDownloader.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(config: Arc<Config>, capacity: usize, idle_timeout: Duration) -> Self {
|
||||
Self {
|
||||
config,
|
||||
|
|
|
@ -20,7 +20,8 @@ use crate::metrics::{
|
|||
collect_backend_request_started_metrics,
|
||||
};
|
||||
use dragonfly_api::common::v2::{
|
||||
Download, Hdfs, ObjectStorage, Peer, Piece, Task as CommonTask, TrafficType,
|
||||
Download, Hdfs, ObjectStorage, Peer, Piece, SizeScope, Task as CommonTask, TaskType,
|
||||
TrafficType,
|
||||
};
|
||||
use dragonfly_api::dfdaemon::{
|
||||
self,
|
||||
|
@ -48,6 +49,7 @@ use dragonfly_client_util::{
|
|||
id_generator::IDGenerator,
|
||||
};
|
||||
use reqwest::header::HeaderMap;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
|
@ -90,7 +92,6 @@ pub struct Task {
|
|||
/// Task implements the task manager.
|
||||
impl Task {
|
||||
/// new returns a new Task.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
id_generator: Arc<IDGenerator>,
|
||||
|
@ -117,6 +118,7 @@ impl Task {
|
|||
}
|
||||
|
||||
/// get gets the metadata of the task.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get(&self, id: &str) -> ClientResult<Option<metadata::Task>> {
|
||||
self.storage.get_task(id)
|
||||
}
|
||||
|
@ -128,33 +130,30 @@ impl Task {
|
|||
id: &str,
|
||||
request: Download,
|
||||
) -> ClientResult<metadata::Task> {
|
||||
let task = self
|
||||
.storage
|
||||
.download_task_started(id, None, None, None, request.load_to_cache)
|
||||
.await?;
|
||||
|
||||
// Attempt to create a hard link from the task file to the output path.
|
||||
//
|
||||
// Behavior based on force_hard_link setting:
|
||||
// 1. force_hard_link is true:
|
||||
// - Success: Continue processing
|
||||
// - Failure: Return error immediately
|
||||
// 2. force_hard_link is false:
|
||||
// - Success: Continue processing
|
||||
// - Failure: Fall back to copying the file instead
|
||||
if let Some(output_path) = &request.output_path {
|
||||
if let Err(err) = self
|
||||
.storage
|
||||
.hard_link_task(id, Path::new(output_path.as_str()))
|
||||
.await
|
||||
{
|
||||
if request.force_hard_link {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
let task = self.storage.prepare_download_task_started(id).await?;
|
||||
|
||||
if task.content_length.is_some() && task.piece_length.is_some() {
|
||||
// Attempt to create a hard link from the task file to the output path.
|
||||
//
|
||||
// Behavior based on force_hard_link setting:
|
||||
// 1. force_hard_link is true:
|
||||
// - Success: Continue processing
|
||||
// - Failure: Return error immediately
|
||||
// 2. force_hard_link is false:
|
||||
// - Success: Continue processing
|
||||
// - Failure: Fall back to copying the file instead
|
||||
if let Some(output_path) = &request.output_path {
|
||||
if let Err(err) = self
|
||||
.storage
|
||||
.hard_link_task(id, Path::new(output_path.as_str()))
|
||||
.await
|
||||
{
|
||||
if request.force_hard_link {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(task);
|
||||
}
|
||||
|
||||
|
@ -242,20 +241,38 @@ impl Task {
|
|||
// store the task.
|
||||
if !task.is_finished() && !self.storage.has_enough_space(content_length)? {
|
||||
return Err(Error::NoSpace(format!(
|
||||
"not enough space to store the persistent cache task: content_length={}",
|
||||
"not enough space to store the task: content_length={}",
|
||||
content_length
|
||||
)));
|
||||
}
|
||||
|
||||
self.storage
|
||||
.download_task_started(
|
||||
id,
|
||||
Some(piece_length),
|
||||
Some(content_length),
|
||||
response.http_header,
|
||||
request.load_to_cache,
|
||||
)
|
||||
.await
|
||||
let task = self
|
||||
.storage
|
||||
.download_task_started(id, piece_length, content_length, response.http_header)
|
||||
.await;
|
||||
|
||||
// Attempt to create a hard link from the task file to the output path.
|
||||
//
|
||||
// Behavior based on force_hard_link setting:
|
||||
// 1. force_hard_link is true:
|
||||
// - Success: Continue processing
|
||||
// - Failure: Return error immediately
|
||||
// 2. force_hard_link is false:
|
||||
// - Success: Continue processing
|
||||
// - Failure: Fall back to copying the file instead
|
||||
if let Some(output_path) = &request.output_path {
|
||||
if let Err(err) = self
|
||||
.storage
|
||||
.hard_link_task(id, Path::new(output_path.as_str()))
|
||||
.await
|
||||
{
|
||||
if request.force_hard_link {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task
|
||||
}
|
||||
|
||||
/// download_finished updates the metadata of the task when the task downloads finished.
|
||||
|
@ -283,7 +300,6 @@ impl Task {
|
|||
}
|
||||
|
||||
/// is_same_dev_inode checks if the task is on the same device inode as the given path.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn is_same_dev_inode(&self, id: &str, to: &Path) -> ClientResult<bool> {
|
||||
self.storage.is_same_dev_inode_as_task(id, to).await
|
||||
}
|
||||
|
@ -372,6 +388,7 @@ impl Task {
|
|||
range: request.range,
|
||||
response_header: task.response_header.clone(),
|
||||
pieces,
|
||||
is_finished: task.is_finished(),
|
||||
},
|
||||
),
|
||||
),
|
||||
|
@ -581,7 +598,7 @@ impl Task {
|
|||
})? {
|
||||
// Check if the schedule count is exceeded.
|
||||
schedule_count += 1;
|
||||
if schedule_count >= self.config.scheduler.max_schedule_count {
|
||||
if schedule_count > self.config.scheduler.max_schedule_count {
|
||||
in_stream_tx
|
||||
.send_timeout(
|
||||
AnnouncePeerRequest {
|
||||
|
@ -717,7 +734,6 @@ impl Task {
|
|||
remaining_interested_pieces.clone(),
|
||||
request.is_prefetch,
|
||||
request.need_piece_content,
|
||||
request.load_to_cache,
|
||||
download_progress_tx.clone(),
|
||||
in_stream_tx.clone(),
|
||||
)
|
||||
|
@ -961,7 +977,6 @@ impl Task {
|
|||
interested_pieces: Vec<metadata::Piece>,
|
||||
is_prefetch: bool,
|
||||
need_piece_content: bool,
|
||||
load_to_cache: bool,
|
||||
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
||||
in_stream_tx: Sender<AnnouncePeerRequest>,
|
||||
) -> ClientResult<Vec<metadata::Piece>> {
|
||||
|
@ -1022,7 +1037,6 @@ impl Task {
|
|||
finished_pieces: Arc<Mutex<Vec<metadata::Piece>>>,
|
||||
is_prefetch: bool,
|
||||
need_piece_content: bool,
|
||||
load_to_cache: bool,
|
||||
) -> ClientResult<metadata::Piece> {
|
||||
// Limit the concurrent piece count.
|
||||
let _permit = semaphore.acquire().await.unwrap();
|
||||
|
@ -1043,7 +1057,6 @@ impl Task {
|
|||
length,
|
||||
parent.clone(),
|
||||
is_prefetch,
|
||||
load_to_cache,
|
||||
)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
|
@ -1116,13 +1129,13 @@ impl Task {
|
|||
REQUEST_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
.unwrap_or_else(|err| {
|
||||
error!(
|
||||
"send DownloadPieceFinishedRequest for piece {} failed: {:?}",
|
||||
piece_id, err
|
||||
);
|
||||
interrupt.store(true, Ordering::SeqCst);
|
||||
})?;
|
||||
});
|
||||
|
||||
// Send the download progress.
|
||||
download_progress_tx
|
||||
|
@ -1142,13 +1155,13 @@ impl Task {
|
|||
REQUEST_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
.unwrap_or_else(|err| {
|
||||
error!(
|
||||
"send DownloadPieceFinishedResponse for piece {} failed: {:?}",
|
||||
piece_id, err
|
||||
);
|
||||
interrupt.store(true, Ordering::SeqCst);
|
||||
})?;
|
||||
});
|
||||
|
||||
info!(
|
||||
"finished piece {} from parent {:?}",
|
||||
|
@ -1177,7 +1190,6 @@ impl Task {
|
|||
finished_pieces.clone(),
|
||||
is_prefetch,
|
||||
need_piece_content,
|
||||
load_to_cache,
|
||||
)
|
||||
.in_current_span(),
|
||||
);
|
||||
|
@ -1291,7 +1303,6 @@ impl Task {
|
|||
request_header: HeaderMap,
|
||||
is_prefetch: bool,
|
||||
need_piece_content: bool,
|
||||
load_to_cache: bool,
|
||||
piece_manager: Arc<piece::Piece>,
|
||||
semaphore: Arc<Semaphore>,
|
||||
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
||||
|
@ -1315,7 +1326,6 @@ impl Task {
|
|||
length,
|
||||
request_header,
|
||||
is_prefetch,
|
||||
load_to_cache,
|
||||
object_storage,
|
||||
hdfs,
|
||||
)
|
||||
|
@ -1375,9 +1385,9 @@ impl Task {
|
|||
},
|
||||
REQUEST_TIMEOUT,
|
||||
)
|
||||
.await.inspect_err(|err| {
|
||||
.await.unwrap_or_else(|err| {
|
||||
error!("send DownloadPieceBackToSourceFinishedRequest for piece {} failed: {:?}", piece_id, err);
|
||||
})?;
|
||||
});
|
||||
|
||||
// Send the download progress.
|
||||
download_progress_tx
|
||||
|
@ -1397,12 +1407,12 @@ impl Task {
|
|||
REQUEST_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
.unwrap_or_else(|err| {
|
||||
error!(
|
||||
"send DownloadPieceFinishedResponse for piece {} failed: {:?}",
|
||||
piece_id, err
|
||||
);
|
||||
})?;
|
||||
});
|
||||
|
||||
info!("finished piece {} from source", piece_id);
|
||||
Ok(metadata)
|
||||
|
@ -1420,7 +1430,6 @@ impl Task {
|
|||
request_header.clone(),
|
||||
request.is_prefetch,
|
||||
request.need_piece_content,
|
||||
request.load_to_cache,
|
||||
self.piece.clone(),
|
||||
semaphore.clone(),
|
||||
download_progress_tx.clone(),
|
||||
|
@ -1568,6 +1577,11 @@ impl Task {
|
|||
}
|
||||
};
|
||||
|
||||
if !piece.is_finished() {
|
||||
debug!("piece {} is not finished, skip it", piece_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Fake the download from the local.
|
||||
self.piece.download_from_local(task_id, piece.length);
|
||||
info!("finished piece {} from local", piece_id,);
|
||||
|
@ -1628,12 +1642,12 @@ impl Task {
|
|||
REQUEST_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
.unwrap_or_else(|err| {
|
||||
error!(
|
||||
"send DownloadPieceFinishedResponse for piece {} failed: {:?}",
|
||||
piece_id, err
|
||||
);
|
||||
})?;
|
||||
});
|
||||
|
||||
// Store the finished piece.
|
||||
finished_pieces.push(interested_piece.clone());
|
||||
|
@ -1682,7 +1696,6 @@ impl Task {
|
|||
length: u64,
|
||||
request_header: HeaderMap,
|
||||
is_prefetch: bool,
|
||||
load_to_cache: bool,
|
||||
piece_manager: Arc<piece::Piece>,
|
||||
semaphore: Arc<Semaphore>,
|
||||
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
||||
|
@ -1705,7 +1718,6 @@ impl Task {
|
|||
length,
|
||||
request_header,
|
||||
is_prefetch,
|
||||
load_to_cache,
|
||||
object_storage,
|
||||
hdfs,
|
||||
)
|
||||
|
@ -1742,12 +1754,12 @@ impl Task {
|
|||
REQUEST_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|err| {
|
||||
.unwrap_or_else(|err| {
|
||||
error!(
|
||||
"send DownloadPieceFinishedResponse for piece {} failed: {:?}",
|
||||
piece_id, err
|
||||
);
|
||||
})?;
|
||||
});
|
||||
|
||||
info!("finished piece {} from source", piece_id);
|
||||
Ok(metadata)
|
||||
|
@ -1764,7 +1776,6 @@ impl Task {
|
|||
interested_piece.length,
|
||||
request_header.clone(),
|
||||
request.is_prefetch,
|
||||
request.load_to_cache,
|
||||
self.piece.clone(),
|
||||
semaphore.clone(),
|
||||
download_progress_tx.clone(),
|
||||
|
@ -1810,7 +1821,74 @@ impl Task {
|
|||
|
||||
/// stat_task returns the task metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn stat(&self, task_id: &str, host_id: &str) -> ClientResult<CommonTask> {
|
||||
pub async fn stat(
|
||||
&self,
|
||||
task_id: &str,
|
||||
host_id: &str,
|
||||
local_only: bool,
|
||||
) -> ClientResult<CommonTask> {
|
||||
if local_only {
|
||||
let Some(task_metadata) = self.storage.get_task(task_id).inspect_err(|err| {
|
||||
error!("get task {} from local storage error: {:?}", task_id, err);
|
||||
})?
|
||||
else {
|
||||
return Err(Error::TaskNotFound(task_id.to_owned()));
|
||||
};
|
||||
|
||||
let piece_metadatas = self.piece.get_all(task_id).inspect_err(|err| {
|
||||
error!(
|
||||
"get pieces for task {} from local storage error: {:?}",
|
||||
task_id, err
|
||||
);
|
||||
})?;
|
||||
|
||||
let pieces = piece_metadatas
|
||||
.into_iter()
|
||||
.filter(|piece| piece.is_finished())
|
||||
.map(|piece| {
|
||||
// The traffic_type indicates whether the first download was from the source or hit the remote peer cache.
|
||||
// If the parent_id exists, the piece was downloaded from a remote peer. Otherwise, it was
|
||||
// downloaded from the source.
|
||||
let traffic_type = match piece.parent_id {
|
||||
None => TrafficType::BackToSource,
|
||||
Some(_) => TrafficType::RemotePeer,
|
||||
};
|
||||
|
||||
Piece {
|
||||
number: piece.number,
|
||||
parent_id: piece.parent_id.clone(),
|
||||
offset: piece.offset,
|
||||
length: piece.length,
|
||||
digest: piece.digest.clone(),
|
||||
content: None,
|
||||
traffic_type: Some(traffic_type as i32),
|
||||
cost: piece.prost_cost(),
|
||||
created_at: Some(prost_wkt_types::Timestamp::from(piece.created_at)),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<Piece>>();
|
||||
|
||||
return Ok(CommonTask {
|
||||
id: task_metadata.id,
|
||||
r#type: TaskType::Standard as i32,
|
||||
url: String::new(),
|
||||
digest: None,
|
||||
tag: None,
|
||||
application: None,
|
||||
filtered_query_params: Vec::new(),
|
||||
request_header: HashMap::new(),
|
||||
content_length: task_metadata.content_length.unwrap_or(0),
|
||||
piece_count: pieces.len() as u32,
|
||||
size_scope: SizeScope::Normal as i32,
|
||||
pieces,
|
||||
state: String::new(),
|
||||
peer_count: 0,
|
||||
has_available_peer: false,
|
||||
created_at: Some(prost_wkt_types::Timestamp::from(task_metadata.created_at)),
|
||||
updated_at: Some(prost_wkt_types::Timestamp::from(task_metadata.updated_at)),
|
||||
});
|
||||
}
|
||||
|
||||
let task = self
|
||||
.scheduler_client
|
||||
.stat_task(StatTaskRequest {
|
||||
|
@ -1856,3 +1934,54 @@ impl Task {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Arc;
|
||||
use tempfile::tempdir;
|
||||
|
||||
// test_delete_task_not_found tests the Task.delete method when the task does not exist.
|
||||
#[tokio::test]
|
||||
async fn test_delete_task_not_found() {
|
||||
// Create a temporary directory for testing.
|
||||
let temp_dir = tempdir().unwrap();
|
||||
let log_dir = temp_dir.path().join("log");
|
||||
std::fs::create_dir_all(&log_dir).unwrap();
|
||||
|
||||
// Create configuration.
|
||||
let config = Config::default();
|
||||
let config = Arc::new(config);
|
||||
|
||||
// Create storage.
|
||||
let storage = Storage::new(config.clone(), temp_dir.path(), log_dir)
|
||||
.await
|
||||
.unwrap();
|
||||
let storage = Arc::new(storage);
|
||||
|
||||
// Test Storage.get_task and Error::TaskNotFound.
|
||||
let task_id = "non-existent-task-id";
|
||||
|
||||
// Verify that non-existent tasks return None.
|
||||
let task = storage.get_task(task_id).unwrap();
|
||||
assert!(task.is_none(), "non-existent tasks should return None");
|
||||
|
||||
// Create a task and save it to storage.
|
||||
let task_id = "test-task-id";
|
||||
storage
|
||||
.download_task_started(task_id, 1024, 4096, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify that the task exists.
|
||||
let task = storage.get_task(task_id).unwrap();
|
||||
assert!(task.is_some(), "task should exist");
|
||||
|
||||
// Delete the task from storage.
|
||||
storage.delete_task(task_id).await;
|
||||
|
||||
// Verify that the task has been deleted.
|
||||
let task = storage.get_task(task_id).unwrap();
|
||||
assert!(task.is_none(), "task should be deleted");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -109,3 +109,100 @@ pub async fn shutdown_signal() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tokio::time::{sleep, Duration};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_shutdown_trigger_and_recv() {
|
||||
// Create a new shutdown instance.
|
||||
let mut shutdown = Shutdown::new();
|
||||
|
||||
// Trigger the shutdown signal in a separate task.
|
||||
let shutdown_clone = shutdown.clone();
|
||||
tokio::spawn(async move {
|
||||
// Small delay to ensure the receiver is waiting.
|
||||
sleep(Duration::from_millis(10)).await;
|
||||
shutdown_clone.trigger();
|
||||
});
|
||||
|
||||
// Wait for the shutdown signal.
|
||||
shutdown.recv().await;
|
||||
|
||||
// Verify that is_shutdown is set to true.
|
||||
assert!(shutdown.is_shutdown());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_shutdown_multiple_receivers() {
|
||||
// Create a new shutdown instance.
|
||||
let mut shutdown1 = Shutdown::new();
|
||||
let mut shutdown2 = shutdown1.clone();
|
||||
let mut shutdown3 = shutdown1.clone();
|
||||
|
||||
// Trigger the shutdown signal.
|
||||
shutdown1.trigger();
|
||||
|
||||
// All receivers should receive the signal.
|
||||
shutdown1.recv().await;
|
||||
shutdown2.recv().await;
|
||||
shutdown3.recv().await;
|
||||
|
||||
// Verify that all instances have is_shutdown set to true.
|
||||
assert!(shutdown1.is_shutdown());
|
||||
assert!(shutdown2.is_shutdown());
|
||||
assert!(shutdown3.is_shutdown());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_shutdown_clone_behavior() {
|
||||
// Create a new shutdown instance.
|
||||
let mut shutdown1 = Shutdown::new();
|
||||
|
||||
// Set is_shutdown to true.
|
||||
shutdown1.trigger();
|
||||
shutdown1.recv().await;
|
||||
assert!(shutdown1.is_shutdown());
|
||||
|
||||
// Clone the instance.
|
||||
let shutdown2 = shutdown1.clone();
|
||||
|
||||
// Verify that the clone has the same is_shutdown value.
|
||||
assert_eq!(shutdown1.is_shutdown(), shutdown2.is_shutdown());
|
||||
|
||||
// Create a new instance before triggering.
|
||||
let mut shutdown3 = Shutdown::new();
|
||||
let mut shutdown4 = shutdown3.clone();
|
||||
|
||||
// Trigger after cloning.
|
||||
shutdown3.trigger();
|
||||
|
||||
// Both should receive the signal.
|
||||
shutdown3.recv().await;
|
||||
shutdown4.recv().await;
|
||||
|
||||
assert!(shutdown3.is_shutdown());
|
||||
assert!(shutdown4.is_shutdown());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_shutdown_already_triggered() {
|
||||
// Create a new shutdown instance.
|
||||
let mut shutdown = Shutdown::new();
|
||||
|
||||
// Trigger and receive.
|
||||
shutdown.trigger();
|
||||
shutdown.recv().await;
|
||||
assert!(shutdown.is_shutdown());
|
||||
|
||||
// Call recv again, should return immediately.
|
||||
let start = std::time::Instant::now();
|
||||
shutdown.recv().await;
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
// Verify that recv returned immediately (less than 5ms).
|
||||
assert!(elapsed < Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,7 +67,6 @@ pub struct Stats {
|
|||
/// Stats implements the stats server.
|
||||
impl Stats {
|
||||
/// new creates a new Stats.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
addr: SocketAddr,
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
@ -81,7 +80,6 @@ impl Stats {
|
|||
}
|
||||
|
||||
/// run starts the stats server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
let mut shutdown = self.shutdown.clone();
|
||||
|
@ -110,7 +108,6 @@ impl Stats {
|
|||
_ = shutdown.recv() => {
|
||||
// Stats server shutting down with signals.
|
||||
info!("stats server shutting down");
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,13 +14,19 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
||||
use dragonfly_client_config::dfdaemon::Host;
|
||||
use opentelemetry::{global, trace::TracerProvider, KeyValue};
|
||||
use opentelemetry_otlp::{WithExportConfig, WithTonicConfig};
|
||||
use opentelemetry_sdk::{propagation::TraceContextPropagator, Resource};
|
||||
use rolling_file::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use tonic::metadata::{MetadataKey, MetadataMap, MetadataValue};
|
||||
use tracing::{info, Level};
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_log::LogTracer;
|
||||
use tracing_opentelemetry::OpenTelemetryLayer;
|
||||
use tracing_subscriber::{
|
||||
filter::LevelFilter,
|
||||
fmt::{time::ChronoLocal, Layer},
|
||||
|
@ -28,6 +34,9 @@ use tracing_subscriber::{
|
|||
EnvFilter, Registry,
|
||||
};
|
||||
|
||||
/// SPAN_EXPORTER_TIMEOUT is the timeout for the span exporter.
|
||||
const SPAN_EXPORTER_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
/// init_tracing initializes the tracing system.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn init_tracing(
|
||||
|
@ -35,8 +44,13 @@ pub fn init_tracing(
|
|||
log_dir: PathBuf,
|
||||
log_level: Level,
|
||||
log_max_files: usize,
|
||||
jaeger_addr: Option<String>,
|
||||
verbose: bool,
|
||||
otel_protocol: Option<String>,
|
||||
otel_endpoint: Option<String>,
|
||||
otel_path: Option<PathBuf>,
|
||||
otel_headers: Option<reqwest::header::HeaderMap>,
|
||||
host: Option<Host>,
|
||||
is_seed_peer: bool,
|
||||
console: bool,
|
||||
) -> Vec<WorkerGuard> {
|
||||
let mut guards = vec![];
|
||||
|
||||
|
@ -45,7 +59,7 @@ pub fn init_tracing(
|
|||
guards.push(stdout_guard);
|
||||
|
||||
// Initialize stdout layer.
|
||||
let stdout_filter = if verbose {
|
||||
let stdout_filter = if console {
|
||||
LevelFilter::DEBUG
|
||||
} else {
|
||||
LevelFilter::OFF
|
||||
|
@ -88,31 +102,116 @@ pub fn init_tracing(
|
|||
let env_filter = EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| EnvFilter::default().add_directive(log_level.into()));
|
||||
|
||||
// Enable console subscriber layer for tracing spawn tasks on `127.0.0.1:6669` when log level is TRACE.
|
||||
let console_subscriber_layer = if log_level == Level::TRACE {
|
||||
Some(console_subscriber::spawn())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let subscriber = Registry::default()
|
||||
.with(env_filter)
|
||||
.with(console_subscriber_layer)
|
||||
.with(file_logging_layer)
|
||||
.with(stdout_logging_layer);
|
||||
|
||||
// Setup jaeger layer.
|
||||
if let Some(jaeger_addr) = jaeger_addr {
|
||||
opentelemetry::global::set_text_map_propagator(TraceContextPropagator::new());
|
||||
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||
.with_service_name(name)
|
||||
.with_endpoint(jaeger_addr)
|
||||
.install_batch(opentelemetry::runtime::Tokio)
|
||||
.expect("install");
|
||||
let jaeger_layer = tracing_opentelemetry::layer().with_tracer(tracer);
|
||||
let subscriber = subscriber.with(jaeger_layer);
|
||||
// If OTLP protocol and endpoint are provided, set up OpenTelemetry tracing.
|
||||
if let (Some(protocol), Some(endpoint)) = (otel_protocol, otel_endpoint) {
|
||||
let otlp_exporter = match protocol.as_str() {
|
||||
"grpc" => {
|
||||
let mut metadata = MetadataMap::new();
|
||||
if let Some(headers) = otel_headers {
|
||||
for (key, value) in headers.iter() {
|
||||
metadata.insert(
|
||||
MetadataKey::from_str(key.as_str())
|
||||
.expect("failed to create metadata key"),
|
||||
MetadataValue::from_str(value.to_str().unwrap())
|
||||
.expect("failed to create metadata value"),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
tracing::subscriber::set_global_default(subscriber)
|
||||
.expect("failed to set global subscriber");
|
||||
let endpoint_url = url::Url::parse(&format!("http://{}", endpoint))
|
||||
.expect("failed to parse OTLP endpoint URL");
|
||||
|
||||
opentelemetry_otlp::SpanExporter::builder()
|
||||
.with_tonic()
|
||||
.with_endpoint(endpoint_url)
|
||||
.with_timeout(SPAN_EXPORTER_TIMEOUT)
|
||||
.with_metadata(metadata)
|
||||
.build()
|
||||
.expect("failed to create OTLP exporter")
|
||||
}
|
||||
"http" | "https" => {
|
||||
let mut endpoint_url = url::Url::parse(&format!("{}://{}", protocol, endpoint))
|
||||
.expect("failed to parse OTLP endpoint URL");
|
||||
|
||||
if let Some(path) = otel_path {
|
||||
endpoint_url = endpoint_url
|
||||
.join(path.to_str().unwrap())
|
||||
.expect("failed to join OTLP endpoint path");
|
||||
}
|
||||
|
||||
opentelemetry_otlp::SpanExporter::builder()
|
||||
.with_http()
|
||||
.with_endpoint(endpoint_url.as_str())
|
||||
.with_protocol(opentelemetry_otlp::Protocol::HttpJson)
|
||||
.with_timeout(SPAN_EXPORTER_TIMEOUT)
|
||||
.build()
|
||||
.expect("failed to create OTLP exporter")
|
||||
}
|
||||
_ => {
|
||||
panic!("unsupported OTLP protocol: {}", protocol);
|
||||
}
|
||||
};
|
||||
|
||||
let host = host.unwrap();
|
||||
let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder()
|
||||
.with_batch_exporter(otlp_exporter)
|
||||
.with_resource(
|
||||
Resource::builder()
|
||||
.with_service_name(format!("{}-{}", name, host.ip.unwrap()))
|
||||
.with_schema_url(
|
||||
[
|
||||
KeyValue::new(
|
||||
opentelemetry_semantic_conventions::attribute::SERVICE_NAMESPACE,
|
||||
"dragonfly",
|
||||
),
|
||||
KeyValue::new(
|
||||
opentelemetry_semantic_conventions::attribute::HOST_NAME,
|
||||
host.hostname,
|
||||
),
|
||||
KeyValue::new(
|
||||
opentelemetry_semantic_conventions::attribute::HOST_IP,
|
||||
host.ip.unwrap().to_string(),
|
||||
),
|
||||
],
|
||||
opentelemetry_semantic_conventions::SCHEMA_URL,
|
||||
)
|
||||
.with_attribute(opentelemetry::KeyValue::new(
|
||||
"host.idc",
|
||||
host.idc.unwrap_or_default(),
|
||||
))
|
||||
.with_attribute(opentelemetry::KeyValue::new(
|
||||
"host.location",
|
||||
host.location.unwrap_or_default(),
|
||||
))
|
||||
.with_attribute(opentelemetry::KeyValue::new("host.seed_peer", is_seed_peer))
|
||||
.build(),
|
||||
)
|
||||
.build();
|
||||
|
||||
let tracer = provider.tracer(name.to_string());
|
||||
global::set_tracer_provider(provider.clone());
|
||||
global::set_text_map_propagator(TraceContextPropagator::new());
|
||||
|
||||
let jaeger_layer = OpenTelemetryLayer::new(tracer);
|
||||
subscriber.with(jaeger_layer).init();
|
||||
} else {
|
||||
tracing::subscriber::set_global_default(subscriber)
|
||||
.expect("failed to set global subscriber");
|
||||
subscriber.init();
|
||||
}
|
||||
|
||||
LogTracer::init().expect("failed to init LogTracer");
|
||||
|
||||
std::panic::set_hook(Box::new(tracing_panic::panic_hook));
|
||||
info!(
|
||||
"tracing initialized directory: {}, level: {}",
|
||||
log_dir.as_path().display(),
|
||||
|
|
Loading…
Reference in New Issue