Compare commits
No commits in common. "main" and "v1.0.7" have entirely different histories.
|
@ -1,2 +0,0 @@
|
||||||
[build]
|
|
||||||
rustflags = ["--cfg", "tokio_unstable"]
|
|
|
@ -8,4 +8,4 @@ jobs:
|
||||||
add-assignee:
|
add-assignee:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: kentaro-m/auto-assign-action@9f6dbe84a80c6e7639d1b9698048b201052a2a94
|
- uses: kentaro-m/auto-assign-action@586b61c136c65d09c1775da39cc4a80e026834f4
|
||||||
|
|
|
@ -94,7 +94,7 @@ jobs:
|
||||||
output: 'trivy-results.sarif'
|
output: 'trivy-results.sarif'
|
||||||
|
|
||||||
- name: Upload Trivy scan results to GitHub Security tab
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed
|
uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e
|
||||||
with:
|
with:
|
||||||
sarif_file: 'trivy-results.sarif'
|
sarif_file: 'trivy-results.sarif'
|
||||||
|
|
||||||
|
@ -189,7 +189,7 @@ jobs:
|
||||||
output: 'trivy-results.sarif'
|
output: 'trivy-results.sarif'
|
||||||
|
|
||||||
- name: Upload Trivy scan results to GitHub Security tab
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed
|
uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e
|
||||||
with:
|
with:
|
||||||
sarif_file: 'trivy-results.sarif'
|
sarif_file: 'trivy-results.sarif'
|
||||||
|
|
||||||
|
@ -284,7 +284,7 @@ jobs:
|
||||||
output: 'trivy-results.sarif'
|
output: 'trivy-results.sarif'
|
||||||
|
|
||||||
- name: Upload Trivy scan results to GitHub Security tab
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
uses: github/codeql-action/upload-sarif@76621b61decf072c1cee8dd1ce2d2a82d33c17ed
|
uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e
|
||||||
with:
|
with:
|
||||||
sarif_file: 'trivy-results.sarif'
|
sarif_file: 'trivy-results.sarif'
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ jobs:
|
||||||
target: ${{ matrix.target }}
|
target: ${{ matrix.target }}
|
||||||
|
|
||||||
- name: Install cargo-deb
|
- name: Install cargo-deb
|
||||||
uses: taiki-e/cache-cargo-install-action@b33c63d3b3c85540f4eba8a4f71a5cc0ce030855
|
uses: taiki-e/cache-cargo-install-action@1bb5728d7988b14bfdd9690a8e5399fc8a3f75ab
|
||||||
with:
|
with:
|
||||||
# Don't upgrade cargo-deb, refer to https://github.com/kornelski/cargo-deb/issues/169.
|
# Don't upgrade cargo-deb, refer to https://github.com/kornelski/cargo-deb/issues/169.
|
||||||
tool: cargo-deb@2.10.0
|
tool: cargo-deb@2.10.0
|
||||||
|
@ -119,7 +119,7 @@ jobs:
|
||||||
contents: write
|
contents: write
|
||||||
steps:
|
steps:
|
||||||
- name: Download Release Artifacts
|
- name: Download Release Artifacts
|
||||||
uses: actions/download-artifact@v5
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
path: releases
|
path: releases
|
||||||
pattern: release-*
|
pattern: release-*
|
||||||
|
|
|
@ -603,9 +603,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clap"
|
name = "clap"
|
||||||
version = "4.5.45"
|
version = "4.5.41"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1fc0e74a703892159f5ae7d3aac52c8e6c392f5ae5f359c70b5881d60aaac318"
|
checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap_builder",
|
"clap_builder",
|
||||||
"clap_derive",
|
"clap_derive",
|
||||||
|
@ -613,9 +613,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clap_builder"
|
name = "clap_builder"
|
||||||
version = "4.5.44"
|
version = "4.5.41"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b3e7f4214277f3c7aa526a59dd3fbe306a370daee1f8b7b8c987069cd8e888a8"
|
checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anstream",
|
"anstream",
|
||||||
"anstyle",
|
"anstyle",
|
||||||
|
@ -625,9 +625,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clap_derive"
|
name = "clap_derive"
|
||||||
version = "4.5.45"
|
version = "4.5.41"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "14cb31bb0a7d536caef2639baa7fad459e15c3144efefa6dbd1c84562c4739f6"
|
checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"heck",
|
"heck",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
|
@ -660,45 +660,6 @@ dependencies = [
|
||||||
"windows-sys 0.60.2",
|
"windows-sys 0.60.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "console-api"
|
|
||||||
version = "0.8.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "8030735ecb0d128428b64cd379809817e620a40e5001c54465b99ec5feec2857"
|
|
||||||
dependencies = [
|
|
||||||
"futures-core",
|
|
||||||
"prost 0.13.5",
|
|
||||||
"prost-types 0.13.5",
|
|
||||||
"tonic",
|
|
||||||
"tracing-core",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "console-subscriber"
|
|
||||||
version = "0.4.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "6539aa9c6a4cd31f4b1c040f860a1eac9aa80e7df6b05d506a6e7179936d6a01"
|
|
||||||
dependencies = [
|
|
||||||
"console-api",
|
|
||||||
"crossbeam-channel",
|
|
||||||
"crossbeam-utils",
|
|
||||||
"futures-task",
|
|
||||||
"hdrhistogram",
|
|
||||||
"humantime",
|
|
||||||
"hyper-util",
|
|
||||||
"prost 0.13.5",
|
|
||||||
"prost-types 0.13.5",
|
|
||||||
"serde",
|
|
||||||
"serde_json",
|
|
||||||
"thread_local",
|
|
||||||
"tokio",
|
|
||||||
"tokio-stream",
|
|
||||||
"tonic",
|
|
||||||
"tracing",
|
|
||||||
"tracing-core",
|
|
||||||
"tracing-subscriber",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "const-oid"
|
name = "const-oid"
|
||||||
version = "0.9.6"
|
version = "0.9.6"
|
||||||
|
@ -978,9 +939,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-api"
|
name = "dragonfly-api"
|
||||||
version = "2.1.57"
|
version = "2.1.49"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8d07e740a105d6dd2ce968318897beaf37ef8b8f581fbae3d0e227722857786b"
|
checksum = "71caaf1841c95fdffc19943e413db6f1e8f88068b381f148e0645dba9a722841"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"prost 0.13.5",
|
"prost 0.13.5",
|
||||||
"prost-types 0.14.1",
|
"prost-types 0.14.1",
|
||||||
|
@ -993,14 +954,13 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client"
|
name = "dragonfly-client"
|
||||||
version = "1.0.10"
|
version = "1.0.7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bytes",
|
"bytes",
|
||||||
"bytesize",
|
"bytesize",
|
||||||
"chrono",
|
"chrono",
|
||||||
"clap",
|
"clap",
|
||||||
"console-subscriber",
|
|
||||||
"dashmap",
|
"dashmap",
|
||||||
"dragonfly-api",
|
"dragonfly-api",
|
||||||
"dragonfly-client-backend",
|
"dragonfly-client-backend",
|
||||||
|
@ -1010,6 +970,7 @@ dependencies = [
|
||||||
"dragonfly-client-util",
|
"dragonfly-client-util",
|
||||||
"fastrand",
|
"fastrand",
|
||||||
"fs2",
|
"fs2",
|
||||||
|
"fslock",
|
||||||
"futures",
|
"futures",
|
||||||
"glob",
|
"glob",
|
||||||
"hashring",
|
"hashring",
|
||||||
|
@ -1024,6 +985,7 @@ dependencies = [
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"leaky-bucket",
|
"leaky-bucket",
|
||||||
"local-ip-address",
|
"local-ip-address",
|
||||||
|
"lru",
|
||||||
"openssl",
|
"openssl",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"opentelemetry-otlp",
|
"opentelemetry-otlp",
|
||||||
|
@ -1061,12 +1023,13 @@ dependencies = [
|
||||||
"tracing-subscriber",
|
"tracing-subscriber",
|
||||||
"url",
|
"url",
|
||||||
"uuid",
|
"uuid",
|
||||||
|
"validator",
|
||||||
"warp",
|
"warp",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-backend"
|
name = "dragonfly-client-backend"
|
||||||
version = "1.0.10"
|
version = "1.0.7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"dragonfly-api",
|
"dragonfly-api",
|
||||||
"dragonfly-client-core",
|
"dragonfly-client-core",
|
||||||
|
@ -1097,7 +1060,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-config"
|
name = "dragonfly-client-config"
|
||||||
version = "1.0.10"
|
version = "1.0.7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytesize",
|
"bytesize",
|
||||||
"bytesize-serde",
|
"bytesize-serde",
|
||||||
|
@ -1127,7 +1090,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-core"
|
name = "dragonfly-client-core"
|
||||||
version = "1.0.10"
|
version = "1.0.7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"headers 0.4.1",
|
"headers 0.4.1",
|
||||||
"hyper 1.6.0",
|
"hyper 1.6.0",
|
||||||
|
@ -1145,7 +1108,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-init"
|
name = "dragonfly-client-init"
|
||||||
version = "1.0.10"
|
version = "1.0.7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"clap",
|
"clap",
|
||||||
|
@ -1155,6 +1118,7 @@ dependencies = [
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
"toml",
|
||||||
"toml_edit",
|
"toml_edit",
|
||||||
"tracing",
|
"tracing",
|
||||||
"url",
|
"url",
|
||||||
|
@ -1162,7 +1126,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-storage"
|
name = "dragonfly-client-storage"
|
||||||
version = "1.0.10"
|
version = "1.0.7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode",
|
"bincode",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
@ -1175,11 +1139,13 @@ dependencies = [
|
||||||
"dragonfly-client-core",
|
"dragonfly-client-core",
|
||||||
"dragonfly-client-util",
|
"dragonfly-client-util",
|
||||||
"fs2",
|
"fs2",
|
||||||
|
"lru",
|
||||||
"num_cpus",
|
"num_cpus",
|
||||||
"prost-wkt-types",
|
"prost-wkt-types",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"rocksdb",
|
"rocksdb",
|
||||||
"serde",
|
"serde",
|
||||||
|
"sha2",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
|
@ -1189,7 +1155,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-util"
|
name = "dragonfly-client-util"
|
||||||
version = "1.0.10"
|
version = "1.0.7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"bytesize",
|
"bytesize",
|
||||||
|
@ -1199,6 +1165,7 @@ dependencies = [
|
||||||
"hex",
|
"hex",
|
||||||
"http 1.3.1",
|
"http 1.3.1",
|
||||||
"http-range-header",
|
"http-range-header",
|
||||||
|
"hyper 1.6.0",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
"lru",
|
"lru",
|
||||||
"openssl",
|
"openssl",
|
||||||
|
@ -1210,7 +1177,6 @@ dependencies = [
|
||||||
"rustls-pemfile 2.2.0",
|
"rustls-pemfile 2.2.0",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
"sha2",
|
"sha2",
|
||||||
"sysinfo",
|
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tracing",
|
"tracing",
|
||||||
|
@ -1370,6 +1336,16 @@ dependencies = [
|
||||||
"winapi",
|
"winapi",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "fslock"
|
||||||
|
version = "0.2.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "04412b8935272e3a9bae6f48c7bfff74c2911f60525404edfdd28e49884c3bfb"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
"winapi",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures"
|
name = "futures"
|
||||||
version = "0.3.31"
|
version = "0.3.31"
|
||||||
|
@ -1502,9 +1478,9 @@ checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "glob"
|
name = "glob"
|
||||||
version = "0.3.3"
|
version = "0.3.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280"
|
checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "h2"
|
name = "h2"
|
||||||
|
@ -1588,26 +1564,13 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hdfs"
|
name = "hdfs"
|
||||||
version = "1.0.10"
|
version = "1.0.7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"dragonfly-client-backend",
|
"dragonfly-client-backend",
|
||||||
"dragonfly-client-core",
|
"dragonfly-client-core",
|
||||||
"tonic",
|
"tonic",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hdrhistogram"
|
|
||||||
version = "7.5.4"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d"
|
|
||||||
dependencies = [
|
|
||||||
"base64 0.21.7",
|
|
||||||
"byteorder",
|
|
||||||
"flate2",
|
|
||||||
"nom",
|
|
||||||
"num-traits",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "headers"
|
name = "headers"
|
||||||
version = "0.3.9"
|
version = "0.3.9"
|
||||||
|
@ -1826,7 +1789,7 @@ dependencies = [
|
||||||
"httpdate",
|
"httpdate",
|
||||||
"itoa",
|
"itoa",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"socket2 0.5.9",
|
"socket2",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"tracing",
|
"tracing",
|
||||||
|
@ -1904,9 +1867,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hyper-util"
|
name = "hyper-util"
|
||||||
version = "0.1.16"
|
version = "0.1.15"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e"
|
checksum = "7f66d5bd4c6f02bf0542fad85d626775bab9258cf795a4256dcaf3161114d1df"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
|
@ -1917,7 +1880,7 @@ dependencies = [
|
||||||
"hyper 1.6.0",
|
"hyper 1.6.0",
|
||||||
"libc",
|
"libc",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"socket2 0.6.0",
|
"socket2",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"tracing",
|
"tracing",
|
||||||
|
@ -2868,9 +2831,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "openssl-src"
|
name = "openssl-src"
|
||||||
version = "300.5.1+3.5.1"
|
version = "300.2.1+3.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "735230c832b28c000e3bc117119e6466a663ec73506bc0a9907ea4187508e42a"
|
checksum = "3fe476c29791a5ca0d1273c697e96085bbabbbea2ef7afd5617e78a4b40332d3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
]
|
]
|
||||||
|
@ -4370,9 +4333,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_json"
|
name = "serde_json"
|
||||||
version = "1.0.142"
|
version = "1.0.141"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7"
|
checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"itoa",
|
"itoa",
|
||||||
"memchr",
|
"memchr",
|
||||||
|
@ -4390,6 +4353,15 @@ dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "serde_spanned"
|
||||||
|
version = "0.6.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_urlencoded"
|
name = "serde_urlencoded"
|
||||||
version = "0.7.1"
|
version = "0.7.1"
|
||||||
|
@ -4514,16 +4486,6 @@ dependencies = [
|
||||||
"windows-sys 0.52.0",
|
"windows-sys 0.52.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "socket2"
|
|
||||||
version = "0.6.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807"
|
|
||||||
dependencies = [
|
|
||||||
"libc",
|
|
||||||
"windows-sys 0.59.0",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "spin"
|
name = "spin"
|
||||||
version = "0.5.2"
|
version = "0.5.2"
|
||||||
|
@ -4909,9 +4871,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio"
|
name = "tokio"
|
||||||
version = "1.47.1"
|
version = "1.46.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
|
checksum = "0cc3a2344dafbe23a245241fe8b09735b521110d30fcefbbd5feb1797ca35d17"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"backtrace",
|
"backtrace",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
@ -4922,10 +4884,9 @@ dependencies = [
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"signal-hook-registry",
|
"signal-hook-registry",
|
||||||
"slab",
|
"slab",
|
||||||
"socket2 0.6.0",
|
"socket2",
|
||||||
"tokio-macros",
|
"tokio-macros",
|
||||||
"tracing",
|
"windows-sys 0.52.0",
|
||||||
"windows-sys 0.59.0",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -4996,9 +4957,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio-util"
|
name = "tokio-util"
|
||||||
version = "0.7.16"
|
version = "0.7.15"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5"
|
checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
|
@ -5011,11 +4972,26 @@ dependencies = [
|
||||||
"tokio",
|
"tokio",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "toml"
|
||||||
|
version = "0.8.23"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
"serde_spanned",
|
||||||
|
"toml_datetime",
|
||||||
|
"toml_edit",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml_datetime"
|
name = "toml_datetime"
|
||||||
version = "0.6.11"
|
version = "0.6.11"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c"
|
checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml_edit"
|
name = "toml_edit"
|
||||||
|
@ -5024,6 +5000,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
|
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap 2.5.0",
|
"indexmap 2.5.0",
|
||||||
|
"serde",
|
||||||
|
"serde_spanned",
|
||||||
"toml_datetime",
|
"toml_datetime",
|
||||||
"toml_write",
|
"toml_write",
|
||||||
"winnow",
|
"winnow",
|
||||||
|
@ -5057,7 +5035,7 @@ dependencies = [
|
||||||
"pin-project",
|
"pin-project",
|
||||||
"prost 0.13.5",
|
"prost 0.13.5",
|
||||||
"rustls-pemfile 2.2.0",
|
"rustls-pemfile 2.2.0",
|
||||||
"socket2 0.5.9",
|
"socket2",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-rustls 0.26.0",
|
"tokio-rustls 0.26.0",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
|
|
29
Cargo.toml
29
Cargo.toml
|
@ -12,7 +12,7 @@ members = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.0.10"
|
version = "1.0.7"
|
||||||
authors = ["The Dragonfly Developers"]
|
authors = ["The Dragonfly Developers"]
|
||||||
homepage = "https://d7y.io/"
|
homepage = "https://d7y.io/"
|
||||||
repository = "https://github.com/dragonflyoss/client.git"
|
repository = "https://github.com/dragonflyoss/client.git"
|
||||||
|
@ -22,14 +22,14 @@ readme = "README.md"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
dragonfly-client = { path = "dragonfly-client", version = "1.0.10" }
|
dragonfly-client = { path = "dragonfly-client", version = "1.0.7" }
|
||||||
dragonfly-client-core = { path = "dragonfly-client-core", version = "1.0.10" }
|
dragonfly-client-core = { path = "dragonfly-client-core", version = "1.0.7" }
|
||||||
dragonfly-client-config = { path = "dragonfly-client-config", version = "1.0.10" }
|
dragonfly-client-config = { path = "dragonfly-client-config", version = "1.0.7" }
|
||||||
dragonfly-client-storage = { path = "dragonfly-client-storage", version = "1.0.10" }
|
dragonfly-client-storage = { path = "dragonfly-client-storage", version = "1.0.7" }
|
||||||
dragonfly-client-backend = { path = "dragonfly-client-backend", version = "1.0.10" }
|
dragonfly-client-backend = { path = "dragonfly-client-backend", version = "1.0.7" }
|
||||||
dragonfly-client-util = { path = "dragonfly-client-util", version = "1.0.10" }
|
dragonfly-client-util = { path = "dragonfly-client-util", version = "1.0.7" }
|
||||||
dragonfly-client-init = { path = "dragonfly-client-init", version = "1.0.10" }
|
dragonfly-client-init = { path = "dragonfly-client-init", version = "1.0.7" }
|
||||||
dragonfly-api = "2.1.57"
|
dragonfly-api = "=2.1.49"
|
||||||
thiserror = "2.0"
|
thiserror = "2.0"
|
||||||
futures = "0.3.31"
|
futures = "0.3.31"
|
||||||
reqwest = { version = "0.12.4", features = [
|
reqwest = { version = "0.12.4", features = [
|
||||||
|
@ -46,7 +46,7 @@ reqwest = { version = "0.12.4", features = [
|
||||||
reqwest-middleware = "0.4"
|
reqwest-middleware = "0.4"
|
||||||
rcgen = { version = "0.12.1", features = ["x509-parser"] }
|
rcgen = { version = "0.12.1", features = ["x509-parser"] }
|
||||||
hyper = { version = "1.6", features = ["full"] }
|
hyper = { version = "1.6", features = ["full"] }
|
||||||
hyper-util = { version = "0.1.16", features = [
|
hyper-util = { version = "0.1.15", features = [
|
||||||
"client",
|
"client",
|
||||||
"client-legacy",
|
"client-legacy",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
@ -71,8 +71,8 @@ serde_yaml = "0.9"
|
||||||
http = "1"
|
http = "1"
|
||||||
tonic = { version = "0.12.2", features = ["tls"] }
|
tonic = { version = "0.12.2", features = ["tls"] }
|
||||||
tonic-reflection = "0.12.3"
|
tonic-reflection = "0.12.3"
|
||||||
tokio = { version = "1.47.1", features = ["full", "tracing"] }
|
tokio = { version = "1.46.1", features = ["full"] }
|
||||||
tokio-util = { version = "0.7.16", features = ["full"] }
|
tokio-util = { version = "0.7.15", features = ["full"] }
|
||||||
tokio-stream = "0.1.17"
|
tokio-stream = "0.1.17"
|
||||||
validator = { version = "0.16", features = ["derive"] }
|
validator = { version = "0.16", features = ["derive"] }
|
||||||
warp = "0.3.5"
|
warp = "0.3.5"
|
||||||
|
@ -91,7 +91,7 @@ opendal = { version = "0.48.0", features = [
|
||||||
"services-cos",
|
"services-cos",
|
||||||
"services-webhdfs",
|
"services-webhdfs",
|
||||||
] }
|
] }
|
||||||
clap = { version = "4.5.45", features = ["derive"] }
|
clap = { version = "4.5.41", features = ["derive"] }
|
||||||
anyhow = "1.0.98"
|
anyhow = "1.0.98"
|
||||||
toml_edit = "0.22.26"
|
toml_edit = "0.22.26"
|
||||||
toml = "0.8.23"
|
toml = "0.8.23"
|
||||||
|
@ -100,13 +100,12 @@ bytesize-serde = "0.2.1"
|
||||||
percent-encoding = "2.3.1"
|
percent-encoding = "2.3.1"
|
||||||
tempfile = "3.20.0"
|
tempfile = "3.20.0"
|
||||||
tokio-rustls = "0.25.0-alpha.4"
|
tokio-rustls = "0.25.0-alpha.4"
|
||||||
serde_json = "1.0.142"
|
serde_json = "1.0.141"
|
||||||
lru = "0.12.5"
|
lru = "0.12.5"
|
||||||
fs2 = "0.4.3"
|
fs2 = "0.4.3"
|
||||||
lazy_static = "1.5"
|
lazy_static = "1.5"
|
||||||
bytes = "1.10"
|
bytes = "1.10"
|
||||||
local-ip-address = "0.6.5"
|
local-ip-address = "0.6.5"
|
||||||
sysinfo = { version = "0.32.1", default-features = false, features = ["component", "disk", "network", "system", "user"] }
|
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
opt-level = 3
|
opt-level = 3
|
||||||
|
|
|
@ -7,7 +7,6 @@ RUN apt-get update && apt-get install -y \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
COPY Cargo.toml Cargo.lock ./
|
COPY Cargo.toml Cargo.lock ./
|
||||||
COPY .cargo ./cargo
|
|
||||||
|
|
||||||
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
|
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
|
||||||
COPY dragonfly-client/src ./dragonfly-client/src
|
COPY dragonfly-client/src ./dragonfly-client/src
|
||||||
|
@ -41,8 +40,6 @@ RUN case "${TARGETPLATFORM}" in \
|
||||||
esac && \
|
esac && \
|
||||||
cargo build --release --verbose --bin dfget --bin dfdaemon --bin dfcache
|
cargo build --release --verbose --bin dfget --bin dfdaemon --bin dfcache
|
||||||
|
|
||||||
RUN cargo install tokio-console --locked --root /usr/local
|
|
||||||
|
|
||||||
FROM public.ecr.aws/docker/library/alpine:3.20 AS health
|
FROM public.ecr.aws/docker/library/alpine:3.20 AS health
|
||||||
|
|
||||||
ENV GRPC_HEALTH_PROBE_VERSION=v0.4.24
|
ENV GRPC_HEALTH_PROBE_VERSION=v0.4.24
|
||||||
|
@ -59,7 +56,6 @@ RUN if [ "$(uname -m)" = "ppc64le" ]; then \
|
||||||
FROM public.ecr.aws/docker/library/golang:1.23.0-alpine3.20 AS pprof
|
FROM public.ecr.aws/docker/library/golang:1.23.0-alpine3.20 AS pprof
|
||||||
|
|
||||||
RUN go install github.com/google/pprof@latest
|
RUN go install github.com/google/pprof@latest
|
||||||
RUN go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest
|
|
||||||
|
|
||||||
FROM public.ecr.aws/debian/debian:bookworm-slim
|
FROM public.ecr.aws/debian/debian:bookworm-slim
|
||||||
|
|
||||||
|
@ -71,9 +67,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends iperf3 fio curl
|
||||||
COPY --from=builder /app/client/target/release/dfget /usr/local/bin/dfget
|
COPY --from=builder /app/client/target/release/dfget /usr/local/bin/dfget
|
||||||
COPY --from=builder /app/client/target/release/dfdaemon /usr/local/bin/dfdaemon
|
COPY --from=builder /app/client/target/release/dfdaemon /usr/local/bin/dfdaemon
|
||||||
COPY --from=builder /app/client/target/release/dfcache /usr/local/bin/dfcache
|
COPY --from=builder /app/client/target/release/dfcache /usr/local/bin/dfcache
|
||||||
COPY --from=builder /usr/local/bin/tokio-console /usr/local/bin/
|
|
||||||
COPY --from=pprof /go/bin/pprof /bin/pprof
|
COPY --from=pprof /go/bin/pprof /bin/pprof
|
||||||
COPY --from=pprof /go/bin/grpcurl /bin/grpcurl
|
|
||||||
COPY --from=health /bin/grpc_health_probe /bin/grpc_health_probe
|
COPY --from=health /bin/grpc_health_probe /bin/grpc_health_probe
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/dfdaemon"]
|
ENTRYPOINT ["/usr/local/bin/dfdaemon"]
|
||||||
|
|
|
@ -7,7 +7,6 @@ RUN apt-get update && apt-get install -y \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
COPY Cargo.toml Cargo.lock ./
|
COPY Cargo.toml Cargo.lock ./
|
||||||
COPY .cargo ./cargo
|
|
||||||
|
|
||||||
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
|
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
|
||||||
COPY dragonfly-client/src ./dragonfly-client/src
|
COPY dragonfly-client/src ./dragonfly-client/src
|
||||||
|
@ -43,7 +42,6 @@ RUN case "${TARGETPLATFORM}" in \
|
||||||
|
|
||||||
RUN cargo install flamegraph --root /usr/local
|
RUN cargo install flamegraph --root /usr/local
|
||||||
RUN cargo install bottom --locked --root /usr/local
|
RUN cargo install bottom --locked --root /usr/local
|
||||||
RUN cargo install tokio-console --locked --root /usr/local
|
|
||||||
|
|
||||||
FROM public.ecr.aws/docker/library/alpine:3.20 AS health
|
FROM public.ecr.aws/docker/library/alpine:3.20 AS health
|
||||||
|
|
||||||
|
@ -61,7 +59,6 @@ RUN if [ "$(uname -m)" = "ppc64le" ]; then \
|
||||||
FROM public.ecr.aws/docker/library/golang:1.23.0-alpine3.20 AS pprof
|
FROM public.ecr.aws/docker/library/golang:1.23.0-alpine3.20 AS pprof
|
||||||
|
|
||||||
RUN go install github.com/google/pprof@latest
|
RUN go install github.com/google/pprof@latest
|
||||||
RUN go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest
|
|
||||||
|
|
||||||
FROM public.ecr.aws/debian/debian:bookworm-slim
|
FROM public.ecr.aws/debian/debian:bookworm-slim
|
||||||
|
|
||||||
|
@ -75,9 +72,7 @@ COPY --from=builder /app/client/target/debug/dfdaemon /usr/local/bin/dfdaemon
|
||||||
COPY --from=builder /app/client/target/debug/dfcache /usr/local/bin/dfcache
|
COPY --from=builder /app/client/target/debug/dfcache /usr/local/bin/dfcache
|
||||||
COPY --from=builder /usr/local/bin/flamegraph /usr/local/bin/
|
COPY --from=builder /usr/local/bin/flamegraph /usr/local/bin/
|
||||||
COPY --from=builder /usr/local/bin/btm /usr/local/bin/
|
COPY --from=builder /usr/local/bin/btm /usr/local/bin/
|
||||||
COPY --from=builder /usr/local/bin/tokio-console /usr/local/bin/
|
|
||||||
COPY --from=pprof /go/bin/pprof /bin/pprof
|
COPY --from=pprof /go/bin/pprof /bin/pprof
|
||||||
COPY --from=pprof /go/bin/grpcurl /bin/grpcurl
|
|
||||||
COPY --from=health /bin/grpc_health_probe /bin/grpc_health_probe
|
COPY --from=health /bin/grpc_health_probe /bin/grpc_health_probe
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/dfdaemon"]
|
ENTRYPOINT ["/usr/local/bin/dfdaemon"]
|
||||||
|
|
|
@ -7,7 +7,6 @@ RUN apt-get update && apt-get install -y \
|
||||||
WORKDIR /app/client
|
WORKDIR /app/client
|
||||||
|
|
||||||
COPY Cargo.toml Cargo.lock ./
|
COPY Cargo.toml Cargo.lock ./
|
||||||
COPY .cargo ./cargo
|
|
||||||
|
|
||||||
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
|
COPY dragonfly-client/Cargo.toml ./dragonfly-client/Cargo.toml
|
||||||
COPY dragonfly-client/src ./dragonfly-client/src
|
COPY dragonfly-client/src ./dragonfly-client/src
|
||||||
|
|
|
@ -50,22 +50,11 @@ impl HTTP {
|
||||||
.with_custom_certificate_verifier(NoVerifier::new())
|
.with_custom_certificate_verifier(NoVerifier::new())
|
||||||
.with_no_client_auth();
|
.with_no_client_auth();
|
||||||
|
|
||||||
// Disable automatic compression to prevent double-decompression issues.
|
|
||||||
//
|
|
||||||
// Problem scenario:
|
|
||||||
// 1. Origin server supports gzip and returns "content-encoding: gzip" header.
|
|
||||||
// 2. Backend decompresses the response and stores uncompressed content to disk.
|
|
||||||
// 3. When user's client downloads via dfdaemon proxy, the original "content-encoding: gzip".
|
|
||||||
// header is forwarded to it.
|
|
||||||
// 4. User's client attempts to decompress the already-decompressed content, causing errors.
|
|
||||||
//
|
|
||||||
// Solution: Disable all compression formats (gzip, brotli, zstd, deflate) to ensure
|
|
||||||
// we receive and store uncompressed content, eliminating the double-decompression issue.
|
|
||||||
let client = reqwest::Client::builder()
|
let client = reqwest::Client::builder()
|
||||||
.no_gzip()
|
.gzip(true)
|
||||||
.no_brotli()
|
.brotli(true)
|
||||||
.no_zstd()
|
.zstd(true)
|
||||||
.no_deflate()
|
.deflate(true)
|
||||||
.use_preconfigured_tls(client_config_builder)
|
.use_preconfigured_tls(client_config_builder)
|
||||||
.pool_max_idle_per_host(super::POOL_MAX_IDLE_PER_HOST)
|
.pool_max_idle_per_host(super::POOL_MAX_IDLE_PER_HOST)
|
||||||
.tcp_keepalive(super::KEEP_ALIVE_INTERVAL)
|
.tcp_keepalive(super::KEEP_ALIVE_INTERVAL)
|
||||||
|
@ -99,22 +88,11 @@ impl HTTP {
|
||||||
.with_root_certificates(root_cert_store)
|
.with_root_certificates(root_cert_store)
|
||||||
.with_no_client_auth();
|
.with_no_client_auth();
|
||||||
|
|
||||||
// Disable automatic compression to prevent double-decompression issues.
|
|
||||||
//
|
|
||||||
// Problem scenario:
|
|
||||||
// 1. Origin server supports gzip and returns "content-encoding: gzip" header.
|
|
||||||
// 2. Backend decompresses the response and stores uncompressed content to disk.
|
|
||||||
// 3. When user's client downloads via dfdaemon proxy, the original "content-encoding: gzip".
|
|
||||||
// header is forwarded to it.
|
|
||||||
// 4. User's client attempts to decompress the already-decompressed content, causing errors.
|
|
||||||
//
|
|
||||||
// Solution: Disable all compression formats (gzip, brotli, zstd, deflate) to ensure
|
|
||||||
// we receive and store uncompressed content, eliminating the double-decompression issue.
|
|
||||||
let client = reqwest::Client::builder()
|
let client = reqwest::Client::builder()
|
||||||
.no_gzip()
|
.gzip(true)
|
||||||
.no_brotli()
|
.brotli(true)
|
||||||
.no_zstd()
|
.zstd(true)
|
||||||
.no_deflate()
|
.deflate(true)
|
||||||
.use_preconfigured_tls(client_config_builder)
|
.use_preconfigured_tls(client_config_builder)
|
||||||
.build()?;
|
.build()?;
|
||||||
|
|
||||||
|
@ -160,13 +138,6 @@ impl super::Backend for HTTP {
|
||||||
.client(request.client_cert)?
|
.client(request.client_cert)?
|
||||||
.get(&request.url)
|
.get(&request.url)
|
||||||
.headers(header)
|
.headers(header)
|
||||||
// Add Range header to ensure Content-Length is returned in response headers.
|
|
||||||
// Some servers (especially when using Transfer-Encoding: chunked,
|
|
||||||
// refer to https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Transfer-Encoding.) may not
|
|
||||||
// include Content-Length in HEAD requests. Using "bytes=0-" requests the
|
|
||||||
// entire file starting from byte 0, forcing the server to include file size
|
|
||||||
// information in the response headers.
|
|
||||||
.header(reqwest::header::RANGE, "bytes=0-")
|
|
||||||
.timeout(request.timeout)
|
.timeout(request.timeout)
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
|
|
|
@ -226,6 +226,18 @@ fn default_storage_cache_capacity() -> ByteSize {
|
||||||
ByteSize::mib(64)
|
ByteSize::mib(64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// default_seed_peer_cluster_id is the default cluster id of seed peer.
|
||||||
|
#[inline]
|
||||||
|
fn default_seed_peer_cluster_id() -> u64 {
|
||||||
|
1
|
||||||
|
}
|
||||||
|
|
||||||
|
/// default_seed_peer_keepalive_interval is the default interval to keepalive with manager.
|
||||||
|
#[inline]
|
||||||
|
fn default_seed_peer_keepalive_interval() -> Duration {
|
||||||
|
Duration::from_secs(15)
|
||||||
|
}
|
||||||
|
|
||||||
/// default_gc_interval is the default interval to do gc.
|
/// default_gc_interval is the default interval to do gc.
|
||||||
#[inline]
|
#[inline]
|
||||||
fn default_gc_interval() -> Duration {
|
fn default_gc_interval() -> Duration {
|
||||||
|
@ -912,6 +924,18 @@ pub struct SeedPeer {
|
||||||
/// kind is the type of seed peer.
|
/// kind is the type of seed peer.
|
||||||
#[serde(default, rename = "type")]
|
#[serde(default, rename = "type")]
|
||||||
pub kind: HostType,
|
pub kind: HostType,
|
||||||
|
|
||||||
|
/// cluster_id is the cluster id of the seed peer cluster.
|
||||||
|
#[serde(default = "default_seed_peer_cluster_id", rename = "clusterID")]
|
||||||
|
#[validate(range(min = 1))]
|
||||||
|
pub cluster_id: u64,
|
||||||
|
|
||||||
|
/// keepalive_interval is the interval to keep alive with manager.
|
||||||
|
#[serde(
|
||||||
|
default = "default_seed_peer_keepalive_interval",
|
||||||
|
with = "humantime_serde"
|
||||||
|
)]
|
||||||
|
pub keepalive_interval: Duration,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// SeedPeer implements Default.
|
/// SeedPeer implements Default.
|
||||||
|
@ -920,6 +944,8 @@ impl Default for SeedPeer {
|
||||||
SeedPeer {
|
SeedPeer {
|
||||||
enable: false,
|
enable: false,
|
||||||
kind: HostType::Normal,
|
kind: HostType::Normal,
|
||||||
|
cluster_id: default_seed_peer_cluster_id(),
|
||||||
|
keepalive_interval: default_seed_peer_keepalive_interval(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1001,33 +1027,31 @@ pub struct Storage {
|
||||||
/// cache_capacity is the cache capacity for downloading, default is 100.
|
/// cache_capacity is the cache capacity for downloading, default is 100.
|
||||||
///
|
///
|
||||||
/// Cache storage:
|
/// Cache storage:
|
||||||
/// 1. Users can preheat task by caching to memory (via CacheTask) or to disk (via Task).
|
/// 1. Users can create preheating jobs and preheat tasks to memory and disk by setting `load_to_cache` to `true`.
|
||||||
/// For more details, refer to https://github.com/dragonflyoss/api/blob/main/proto/dfdaemon.proto#L174.
|
/// For more details, refer to https://github.com/dragonflyoss/api/blob/main/proto/common.proto#L443.
|
||||||
/// 2. If the download hits the memory cache, it will be faster than reading from the disk, because there is no
|
/// 2. If the download hits the memory cache, it will be faster than reading from the disk, because there is no
|
||||||
/// page cache for the first read.
|
/// page cache for the first read.
|
||||||
///
|
///
|
||||||
///```text
|
/// ```text
|
||||||
/// +--------+
|
/// 1.Preheat
|
||||||
/// │ Source │
|
/// |
|
||||||
/// +--------+
|
/// |
|
||||||
/// ^ ^ Preheat
|
/// +--------------------------------------------------+
|
||||||
/// │ │ |
|
/// | | Peer |
|
||||||
/// +-----------------+ │ │ +----------------------------+
|
/// | | +-----------+ |
|
||||||
/// │ Other Peers │ │ │ │ Peer | │
|
/// | | -- Partial -->| Cache | |
|
||||||
/// │ │ │ │ │ v │
|
/// | | | +-----------+ |
|
||||||
/// │ +----------+ │ │ │ │ +----------+ │
|
/// | v | | | |
|
||||||
/// │ │ Cache |<--|----------|<-Miss--| Cache |--Hit-->|<----Download CacheTask
|
/// | Download | Miss | |
|
||||||
/// │ +----------+ │ │ │ +----------+ │
|
/// | Task -->| | --- Hit ------>|<-- 2.Download
|
||||||
/// │ │ │ │ │
|
/// | | | ^ |
|
||||||
/// │ +----------+ │ │ │ +----------+ │
|
/// | | v | |
|
||||||
/// │ │ Disk |<--|----------|<-Miss--| Disk |--Hit-->|<----Download Task
|
/// | | +-----------+ | |
|
||||||
/// │ +----------+ │ │ +----------+ │
|
/// | -- Full -->| Disk |---------- |
|
||||||
/// │ │ │ ^ │
|
/// | +-----------+ |
|
||||||
/// │ │ │ | │
|
/// | |
|
||||||
/// +-----------------+ +----------------------------+
|
/// +--------------------------------------------------+
|
||||||
/// |
|
/// ```
|
||||||
/// Preheat
|
|
||||||
///```
|
|
||||||
#[serde(with = "bytesize_serde", default = "default_storage_cache_capacity")]
|
#[serde(with = "bytesize_serde", default = "default_storage_cache_capacity")]
|
||||||
pub cache_capacity: ByteSize,
|
pub cache_capacity: ByteSize,
|
||||||
}
|
}
|
||||||
|
@ -1989,6 +2013,11 @@ key: /etc/ssl/private/client.pem
|
||||||
let default_seed_peer = SeedPeer::default();
|
let default_seed_peer = SeedPeer::default();
|
||||||
assert!(!default_seed_peer.enable);
|
assert!(!default_seed_peer.enable);
|
||||||
assert_eq!(default_seed_peer.kind, HostType::Normal);
|
assert_eq!(default_seed_peer.kind, HostType::Normal);
|
||||||
|
assert_eq!(default_seed_peer.cluster_id, 1);
|
||||||
|
assert_eq!(
|
||||||
|
default_seed_peer.keepalive_interval,
|
||||||
|
default_seed_peer_keepalive_interval()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -1996,9 +2025,20 @@ key: /etc/ssl/private/client.pem
|
||||||
let valid_seed_peer = SeedPeer {
|
let valid_seed_peer = SeedPeer {
|
||||||
enable: true,
|
enable: true,
|
||||||
kind: HostType::Weak,
|
kind: HostType::Weak,
|
||||||
|
cluster_id: 5,
|
||||||
|
keepalive_interval: Duration::from_secs(90),
|
||||||
};
|
};
|
||||||
|
|
||||||
assert!(valid_seed_peer.validate().is_ok());
|
assert!(valid_seed_peer.validate().is_ok());
|
||||||
|
|
||||||
|
let invalid_seed_peer = SeedPeer {
|
||||||
|
enable: true,
|
||||||
|
kind: HostType::Weak,
|
||||||
|
cluster_id: 0,
|
||||||
|
keepalive_interval: Duration::from_secs(90),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(invalid_seed_peer.validate().is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -2015,6 +2055,8 @@ key: /etc/ssl/private/client.pem
|
||||||
|
|
||||||
assert!(seed_peer.enable);
|
assert!(seed_peer.enable);
|
||||||
assert_eq!(seed_peer.kind, HostType::Super);
|
assert_eq!(seed_peer.kind, HostType::Super);
|
||||||
|
assert_eq!(seed_peer.cluster_id, 2);
|
||||||
|
assert_eq!(seed_peer.keepalive_interval, Duration::from_secs(60));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
|
@ -23,6 +23,7 @@ tokio.workspace = true
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
toml_edit.workspace = true
|
toml_edit.workspace = true
|
||||||
|
toml.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
tempfile.workspace = true
|
tempfile.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
|
|
|
@ -22,8 +22,10 @@ tracing.workspace = true
|
||||||
prost-wkt-types.workspace = true
|
prost-wkt-types.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tokio-util.workspace = true
|
tokio-util.workspace = true
|
||||||
|
sha2.workspace = true
|
||||||
crc32fast.workspace = true
|
crc32fast.workspace = true
|
||||||
fs2.workspace = true
|
fs2.workspace = true
|
||||||
|
lru.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
bytesize.workspace = true
|
bytesize.workspace = true
|
||||||
num_cpus = "1.17"
|
num_cpus = "1.17"
|
||||||
|
|
|
@ -76,33 +76,31 @@ impl Task {
|
||||||
/// Cache is the cache for storing piece content by LRU algorithm.
|
/// Cache is the cache for storing piece content by LRU algorithm.
|
||||||
///
|
///
|
||||||
/// Cache storage:
|
/// Cache storage:
|
||||||
/// 1. Users can preheat task by caching to memory (via CacheTask) or to disk (via Task).
|
/// 1. Users can create preheating jobs and preheat tasks to memory and disk by setting `load_to_cache` to `true`.
|
||||||
/// For more details, refer to https://github.com/dragonflyoss/api/blob/main/proto/dfdaemon.proto#L174.
|
/// For more details, refer to https://github.com/dragonflyoss/api/blob/main/proto/common.proto#L443.
|
||||||
/// 2. If the download hits the memory cache, it will be faster than reading from the disk, because there is no
|
/// 2. If the download hits the memory cache, it will be faster than reading from the disk, because there is no
|
||||||
/// page cache for the first read.
|
/// page cache for the first read.
|
||||||
///
|
///
|
||||||
///```text
|
/// ```text
|
||||||
/// +--------+
|
/// 1.Preheat
|
||||||
/// │ Source │
|
/// |
|
||||||
/// +--------+
|
/// |
|
||||||
/// ^ ^ Preheat
|
/// +--------------------------------------------------+
|
||||||
/// │ │ |
|
/// | | Peer |
|
||||||
/// +-----------------+ │ │ +----------------------------+
|
/// | | +-----------+ |
|
||||||
/// │ Other Peers │ │ │ │ Peer | │
|
/// | | -- Partial -->| Cache | |
|
||||||
/// │ │ │ │ │ v │
|
/// | | | +-----------+ |
|
||||||
/// │ +----------+ │ │ │ │ +----------+ │
|
/// | v | | | |
|
||||||
/// │ │ Cache |<--|----------|<-Miss--| Cache |--Hit-->|<----Download CacheTask
|
/// | Download | Miss | |
|
||||||
/// │ +----------+ │ │ │ +----------+ │
|
/// | Task -->| | --- Hit ------>|<-- 2.Download
|
||||||
/// │ │ │ │ │
|
/// | | | ^ |
|
||||||
/// │ +----------+ │ │ │ +----------+ │
|
/// | | v | |
|
||||||
/// │ │ Disk |<--|----------|<-Miss--| Disk |--Hit-->|<----Download Task
|
/// | | +-----------+ | |
|
||||||
/// │ +----------+ │ │ +----------+ │
|
/// | -- Full -->| Disk |---------- |
|
||||||
/// │ │ │ ^ │
|
/// | +-----------+ |
|
||||||
/// │ │ │ | │
|
/// | |
|
||||||
/// +-----------------+ +----------------------------+
|
/// +--------------------------------------------------+
|
||||||
/// |
|
/// ```
|
||||||
/// Preheat
|
|
||||||
///```
|
|
||||||
/// Task is the metadata of the task.
|
/// Task is the metadata of the task.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Cache {
|
pub struct Cache {
|
||||||
|
|
|
@ -27,6 +27,7 @@ use std::time::Duration;
|
||||||
use tokio::io::AsyncRead;
|
use tokio::io::AsyncRead;
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
use tokio_util::either::Either;
|
use tokio_util::either::Either;
|
||||||
|
use tokio_util::io::InspectReader;
|
||||||
use tracing::{debug, error, info, instrument, warn};
|
use tracing::{debug, error, info, instrument, warn};
|
||||||
|
|
||||||
pub mod cache;
|
pub mod cache;
|
||||||
|
@ -116,8 +117,14 @@ impl Storage {
|
||||||
piece_length: u64,
|
piece_length: u64,
|
||||||
content_length: u64,
|
content_length: u64,
|
||||||
response_header: Option<HeaderMap>,
|
response_header: Option<HeaderMap>,
|
||||||
|
load_to_cache: bool,
|
||||||
) -> Result<metadata::Task> {
|
) -> Result<metadata::Task> {
|
||||||
self.content.create_task(id, content_length).await?;
|
self.content.create_task(id, content_length).await?;
|
||||||
|
if load_to_cache {
|
||||||
|
let mut cache = self.cache.clone();
|
||||||
|
cache.put_task(id, content_length).await;
|
||||||
|
debug!("put task to cache: {}", id);
|
||||||
|
}
|
||||||
|
|
||||||
self.metadata.download_task_started(
|
self.metadata.download_task_started(
|
||||||
id,
|
id,
|
||||||
|
@ -415,10 +422,11 @@ impl Storage {
|
||||||
offset: u64,
|
offset: u64,
|
||||||
length: u64,
|
length: u64,
|
||||||
reader: &mut R,
|
reader: &mut R,
|
||||||
|
load_to_cache: bool,
|
||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
) -> Result<metadata::Piece> {
|
) -> Result<metadata::Piece> {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
piece = self.handle_downloaded_from_source_finished(piece_id, task_id, offset, length, reader) => {
|
piece = self.handle_downloaded_from_source_finished(piece_id, task_id, offset, length, reader, load_to_cache) => {
|
||||||
piece
|
piece
|
||||||
}
|
}
|
||||||
_ = sleep(timeout) => {
|
_ = sleep(timeout) => {
|
||||||
|
@ -436,11 +444,30 @@ impl Storage {
|
||||||
offset: u64,
|
offset: u64,
|
||||||
length: u64,
|
length: u64,
|
||||||
reader: &mut R,
|
reader: &mut R,
|
||||||
|
load_to_cache: bool,
|
||||||
) -> Result<metadata::Piece> {
|
) -> Result<metadata::Piece> {
|
||||||
let response = self
|
let response = if load_to_cache {
|
||||||
.content
|
let mut buffer = Vec::with_capacity(length as usize);
|
||||||
.write_piece(task_id, offset, length, reader)
|
let mut tee = InspectReader::new(reader, |bytes| {
|
||||||
.await?;
|
buffer.extend_from_slice(bytes);
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = self
|
||||||
|
.content
|
||||||
|
.write_piece(task_id, offset, length, &mut tee)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
self.cache
|
||||||
|
.write_piece(task_id, piece_id, bytes::Bytes::from(buffer))
|
||||||
|
.await?;
|
||||||
|
debug!("put piece to cache: {}", piece_id);
|
||||||
|
|
||||||
|
response
|
||||||
|
} else {
|
||||||
|
self.content
|
||||||
|
.write_piece(task_id, offset, length, reader)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
|
||||||
let digest = Digest::new(Algorithm::Crc32, response.hash);
|
let digest = Digest::new(Algorithm::Crc32, response.hash);
|
||||||
self.metadata.download_piece_finished(
|
self.metadata.download_piece_finished(
|
||||||
|
@ -464,10 +491,11 @@ impl Storage {
|
||||||
expected_digest: &str,
|
expected_digest: &str,
|
||||||
parent_id: &str,
|
parent_id: &str,
|
||||||
reader: &mut R,
|
reader: &mut R,
|
||||||
|
load_to_cache: bool,
|
||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
) -> Result<metadata::Piece> {
|
) -> Result<metadata::Piece> {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
piece = self.handle_downloaded_piece_from_parent_finished(piece_id, task_id, offset, length, expected_digest, parent_id, reader) => {
|
piece = self.handle_downloaded_piece_from_parent_finished(piece_id, task_id, offset, length, expected_digest, parent_id, reader, load_to_cache) => {
|
||||||
piece
|
piece
|
||||||
}
|
}
|
||||||
_ = sleep(timeout) => {
|
_ = sleep(timeout) => {
|
||||||
|
@ -488,11 +516,30 @@ impl Storage {
|
||||||
expected_digest: &str,
|
expected_digest: &str,
|
||||||
parent_id: &str,
|
parent_id: &str,
|
||||||
reader: &mut R,
|
reader: &mut R,
|
||||||
|
load_to_cache: bool,
|
||||||
) -> Result<metadata::Piece> {
|
) -> Result<metadata::Piece> {
|
||||||
let response = self
|
let response = if load_to_cache {
|
||||||
.content
|
let mut buffer = Vec::with_capacity(length as usize);
|
||||||
.write_piece(task_id, offset, length, reader)
|
let mut tee = InspectReader::new(reader, |bytes| {
|
||||||
.await?;
|
buffer.extend_from_slice(bytes);
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = self
|
||||||
|
.content
|
||||||
|
.write_piece(task_id, offset, length, &mut tee)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
self.cache
|
||||||
|
.write_piece(task_id, piece_id, bytes::Bytes::from(buffer))
|
||||||
|
.await?;
|
||||||
|
debug!("put piece to cache: {}", piece_id);
|
||||||
|
|
||||||
|
response
|
||||||
|
} else {
|
||||||
|
self.content
|
||||||
|
.write_piece(task_id, offset, length, reader)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
|
||||||
let length = response.length;
|
let length = response.length;
|
||||||
let digest = Digest::new(Algorithm::Crc32, response.hash);
|
let digest = Digest::new(Algorithm::Crc32, response.hash);
|
||||||
|
|
|
@ -13,6 +13,7 @@ edition.workspace = true
|
||||||
dragonfly-client-core.workspace = true
|
dragonfly-client-core.workspace = true
|
||||||
dragonfly-api.workspace = true
|
dragonfly-api.workspace = true
|
||||||
reqwest.workspace = true
|
reqwest.workspace = true
|
||||||
|
hyper.workspace = true
|
||||||
http-range-header.workspace = true
|
http-range-header.workspace = true
|
||||||
http.workspace = true
|
http.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
@ -23,10 +24,9 @@ rustls-pki-types.workspace = true
|
||||||
rustls-pemfile.workspace = true
|
rustls-pemfile.workspace = true
|
||||||
sha2.workspace = true
|
sha2.workspace = true
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
sysinfo.workspace = true
|
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
crc32fast.workspace = true
|
|
||||||
openssl.workspace = true
|
openssl.workspace = true
|
||||||
|
crc32fast.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
bytesize.workspace = true
|
bytesize.workspace = true
|
||||||
lru.workspace = true
|
lru.workspace = true
|
||||||
|
|
|
@ -14,15 +14,48 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
use bytesize::ByteSize;
|
use bytesize::{ByteSize, MB};
|
||||||
use pnet::datalink::{self, NetworkInterface};
|
use pnet::datalink::{self, NetworkInterface};
|
||||||
use std::cmp::min;
|
use std::cmp::min;
|
||||||
use std::net::IpAddr;
|
use std::net::IpAddr;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
#[cfg(not(target_os = "linux"))]
|
||||||
use sysinfo::Networks;
|
use tracing::warn;
|
||||||
use tokio::sync::Mutex;
|
|
||||||
use tracing::{info, warn};
|
/// get_interface_by_ip returns the name of the network interface that has the specified IP
|
||||||
|
/// address.
|
||||||
|
pub fn get_interface_by_ip(ip: IpAddr) -> Option<NetworkInterface> {
|
||||||
|
for interface in datalink::interfaces() {
|
||||||
|
for ip_network in interface.ips.iter() {
|
||||||
|
if ip_network.ip() == ip {
|
||||||
|
return Some(interface);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// get_interface_speed_by_ip returns the speed of the network interface that has the specified IP
|
||||||
|
/// address in Mbps.
|
||||||
|
pub fn get_interface_speed(interface_name: &str) -> Option<u64> {
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
|
{
|
||||||
|
let speed_path = format!("/sys/class/net/{}/speed", interface_name);
|
||||||
|
std::fs::read_to_string(&speed_path)
|
||||||
|
.ok()
|
||||||
|
.and_then(|speed_str| speed_str.trim().parse::<u64>().ok())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(target_os = "linux"))]
|
||||||
|
{
|
||||||
|
warn!(
|
||||||
|
"can not get interface {} speed on non-linux platform",
|
||||||
|
interface_name
|
||||||
|
);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Interface represents a network interface with its information.
|
/// Interface represents a network interface with its information.
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
|
@ -30,201 +63,23 @@ pub struct Interface {
|
||||||
/// name is the name of the network interface.
|
/// name is the name of the network interface.
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
|
||||||
/// bandwidth is the bandwidth of the network interface in bps.
|
// bandwidth is the bandwidth of the network interface in Mbps.
|
||||||
pub bandwidth: u64,
|
pub bandwidth: u64,
|
||||||
|
|
||||||
// network_data_mutex is a mutex to protect access to network data.
|
|
||||||
network_data_mutex: Arc<Mutex<()>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// NetworkData represents the network data for a specific interface,
|
/// get_interface_info returns the network interface information for the specified IP address.
|
||||||
#[derive(Debug, Clone, Default)]
|
pub fn get_interface_info(ip: IpAddr, rate_limit: ByteSize) -> Option<Interface> {
|
||||||
pub struct NetworkData {
|
let rate_limit = rate_limit.as_u64() / MB * 8; // convert to Mbps
|
||||||
/// max_rx_bandwidth is the maximum receive bandwidth of the interface in bps.
|
|
||||||
pub max_rx_bandwidth: u64,
|
|
||||||
|
|
||||||
/// rx_bandwidth is the current receive bandwidth of the interface in bps.
|
let interface = get_interface_by_ip(ip)?;
|
||||||
pub rx_bandwidth: Option<u64>,
|
match get_interface_speed(&interface.name) {
|
||||||
|
Some(speed) => Some(Interface {
|
||||||
/// max_tx_bandwidth is the maximum transmit bandwidth of the interface in bps.
|
name: interface.name,
|
||||||
pub max_tx_bandwidth: u64,
|
bandwidth: min(speed, rate_limit),
|
||||||
|
}),
|
||||||
/// tx_bandwidth is the current transmit bandwidth of the interface in bps.
|
None => Some(Interface {
|
||||||
pub tx_bandwidth: Option<u64>,
|
name: interface.name,
|
||||||
}
|
bandwidth: rate_limit,
|
||||||
|
}),
|
||||||
/// Interface methods provide functionality to get network interface information.
|
|
||||||
impl Interface {
|
|
||||||
/// DEFAULT_NETWORKS_REFRESH_INTERVAL is the default interval for refreshing network data.
|
|
||||||
const DEFAULT_NETWORKS_REFRESH_INTERVAL: Duration = Duration::from_secs(2);
|
|
||||||
|
|
||||||
/// new creates a new Interface instance based on the provided IP address and rate limit.
|
|
||||||
pub fn new(ip: IpAddr, rate_limit: ByteSize) -> Interface {
|
|
||||||
let rate_limit = Self::byte_size_to_bits(rate_limit); // convert to bps
|
|
||||||
let Some(interface) = Self::get_network_interface_by_ip(ip) else {
|
|
||||||
warn!(
|
|
||||||
"can not find interface for IP address {}, network interface unknown with bandwidth {} bps",
|
|
||||||
ip, rate_limit
|
|
||||||
);
|
|
||||||
return Interface {
|
|
||||||
name: "unknown".to_string(),
|
|
||||||
bandwidth: rate_limit,
|
|
||||||
network_data_mutex: Arc::new(Mutex::new(())),
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
match Self::get_speed(&interface.name) {
|
|
||||||
Some(speed) => {
|
|
||||||
let bandwidth = min(Self::megabits_to_bits(speed), rate_limit);
|
|
||||||
info!(
|
|
||||||
"network interface {} with bandwidth {} bps",
|
|
||||||
interface.name, bandwidth
|
|
||||||
);
|
|
||||||
|
|
||||||
Interface {
|
|
||||||
name: interface.name,
|
|
||||||
bandwidth,
|
|
||||||
network_data_mutex: Arc::new(Mutex::new(())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
warn!(
|
|
||||||
"can not get speed, network interface {} with bandwidth {} bps",
|
|
||||||
interface.name, rate_limit
|
|
||||||
);
|
|
||||||
|
|
||||||
Interface {
|
|
||||||
name: interface.name,
|
|
||||||
bandwidth: rate_limit,
|
|
||||||
network_data_mutex: Arc::new(Mutex::new(())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// get_network_data retrieves the network data for the interface.
|
|
||||||
pub async fn get_network_data(&self) -> NetworkData {
|
|
||||||
// Lock the mutex to ensure exclusive access to network data.
|
|
||||||
let _guard = self.network_data_mutex.lock().await;
|
|
||||||
|
|
||||||
// Initialize sysinfo network.
|
|
||||||
let mut networks = Networks::new_with_refreshed_list();
|
|
||||||
|
|
||||||
// Sleep to calculate the network traffic difference over
|
|
||||||
// the DEFAULT_NETWORKS_REFRESH_INTERVAL.
|
|
||||||
tokio::time::sleep(Self::DEFAULT_NETWORKS_REFRESH_INTERVAL).await;
|
|
||||||
|
|
||||||
// Refresh network information.
|
|
||||||
networks.refresh();
|
|
||||||
let Some(network_data) = networks.get(self.name.as_str()) else {
|
|
||||||
warn!("can not find network data for interface {}", self.name);
|
|
||||||
return NetworkData {
|
|
||||||
max_rx_bandwidth: self.bandwidth,
|
|
||||||
max_tx_bandwidth: self.bandwidth,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
// Calculate the receive and transmit bandwidth in bits per second.
|
|
||||||
let rx_bandwidth = (Self::bytes_to_bits(network_data.received()) as f64
|
|
||||||
/ Self::DEFAULT_NETWORKS_REFRESH_INTERVAL.as_secs_f64())
|
|
||||||
.round() as u64;
|
|
||||||
|
|
||||||
// Calculate the transmit bandwidth in bits per second.
|
|
||||||
let tx_bandwidth = (Self::bytes_to_bits(network_data.transmitted()) as f64
|
|
||||||
/ Self::DEFAULT_NETWORKS_REFRESH_INTERVAL.as_secs_f64())
|
|
||||||
.round() as u64;
|
|
||||||
|
|
||||||
NetworkData {
|
|
||||||
max_rx_bandwidth: self.bandwidth,
|
|
||||||
rx_bandwidth: Some(rx_bandwidth),
|
|
||||||
max_tx_bandwidth: self.bandwidth,
|
|
||||||
tx_bandwidth: Some(tx_bandwidth),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// get_speed returns the speed of the network interface in Mbps.
|
|
||||||
pub fn get_speed(name: &str) -> Option<u64> {
|
|
||||||
#[cfg(target_os = "linux")]
|
|
||||||
{
|
|
||||||
let speed_path = format!("/sys/class/net/{}/speed", name);
|
|
||||||
std::fs::read_to_string(&speed_path)
|
|
||||||
.ok()
|
|
||||||
.and_then(|speed_str| speed_str.trim().parse::<u64>().ok())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(target_os = "linux"))]
|
|
||||||
{
|
|
||||||
warn!("can not get interface {} speed on non-linux platform", name);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// get_network_interface_by_ip returns the network interface that has the specified
|
|
||||||
/// IP address.
|
|
||||||
pub fn get_network_interface_by_ip(ip: IpAddr) -> Option<NetworkInterface> {
|
|
||||||
datalink::interfaces()
|
|
||||||
.into_iter()
|
|
||||||
.find(|interface| interface.ips.iter().any(|ip_net| ip_net.ip() == ip))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// byte_size_to_bits converts a ByteSize to bits.
|
|
||||||
pub fn byte_size_to_bits(size: ByteSize) -> u64 {
|
|
||||||
size.as_u64() * 8
|
|
||||||
}
|
|
||||||
|
|
||||||
/// megabits_to_bit converts megabits to bits.
|
|
||||||
pub fn megabits_to_bits(size: u64) -> u64 {
|
|
||||||
size * 1_000_000 // 1 Mbit = 1,000,000 bits
|
|
||||||
}
|
|
||||||
|
|
||||||
/// bytes_to_bits converts bytes to bits.
|
|
||||||
pub fn bytes_to_bits(size: u64) -> u64 {
|
|
||||||
size * 8 // 1 byte = 8 bits
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use bytesize::ByteSize;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_byte_size_to_bits() {
|
|
||||||
let test_cases = vec![
|
|
||||||
(ByteSize::kb(1), 8_000u64),
|
|
||||||
(ByteSize::mb(1), 8_000_000u64),
|
|
||||||
(ByteSize::gb(1), 8_000_000_000u64),
|
|
||||||
(ByteSize::b(0), 0u64),
|
|
||||||
];
|
|
||||||
|
|
||||||
for (input, expected) in test_cases {
|
|
||||||
let result = Interface::byte_size_to_bits(input);
|
|
||||||
assert_eq!(result, expected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_megabits_to_bits() {
|
|
||||||
let test_cases = vec![
|
|
||||||
(1u64, 1_000_000u64),
|
|
||||||
(1000u64, 1_000_000_000u64),
|
|
||||||
(0u64, 0u64),
|
|
||||||
];
|
|
||||||
|
|
||||||
for (input, expected) in test_cases {
|
|
||||||
let result = Interface::megabits_to_bits(input);
|
|
||||||
assert_eq!(result, expected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_bytes_to_bits() {
|
|
||||||
let test_cases = vec![(1u64, 8u64), (1000u64, 8_000u64), (0u64, 0u64)];
|
|
||||||
|
|
||||||
for (input, expected) in test_cases {
|
|
||||||
let result = Interface::bytes_to_bits(input);
|
|
||||||
assert_eq!(result, expected);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,6 +34,8 @@ hyper.workspace = true
|
||||||
hyper-util.workspace = true
|
hyper-util.workspace = true
|
||||||
hyper-rustls.workspace = true
|
hyper-rustls.workspace = true
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
validator.workspace = true
|
||||||
|
humantime.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
prost-wkt-types.workspace = true
|
prost-wkt-types.workspace = true
|
||||||
|
@ -53,16 +55,15 @@ clap.workspace = true
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
bytesize.workspace = true
|
bytesize.workspace = true
|
||||||
humantime.workspace = true
|
|
||||||
uuid.workspace = true
|
uuid.workspace = true
|
||||||
percent-encoding.workspace = true
|
percent-encoding.workspace = true
|
||||||
tokio-rustls.workspace = true
|
tokio-rustls.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
|
lru.workspace = true
|
||||||
fs2.workspace = true
|
fs2.workspace = true
|
||||||
lazy_static.workspace = true
|
lazy_static.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
local-ip-address.workspace = true
|
local-ip-address.workspace = true
|
||||||
sysinfo.workspace = true
|
|
||||||
tracing-appender = "0.2.3"
|
tracing-appender = "0.2.3"
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "time", "chrono"] }
|
tracing-subscriber = { version = "0.3", features = ["env-filter", "time", "chrono"] }
|
||||||
tracing-panic = "0.1.2"
|
tracing-panic = "0.1.2"
|
||||||
|
@ -75,9 +76,11 @@ rolling-file = "0.2.0"
|
||||||
pprof = { version = "0.15", features = ["flamegraph", "protobuf-codec"] }
|
pprof = { version = "0.15", features = ["flamegraph", "protobuf-codec"] }
|
||||||
prometheus = { version = "0.13", features = ["process"] }
|
prometheus = { version = "0.13", features = ["process"] }
|
||||||
tonic-health = "0.12.3"
|
tonic-health = "0.12.3"
|
||||||
|
sysinfo = { version = "0.32.1", default-features = false, features = ["component", "disk", "network", "system", "user"] }
|
||||||
tower = { version = "0.4.13", features = ["limit", "load-shed", "buffer"] }
|
tower = { version = "0.4.13", features = ["limit", "load-shed", "buffer"] }
|
||||||
indicatif = "0.18.0"
|
indicatif = "0.18.0"
|
||||||
hashring = "0.3.6"
|
hashring = "0.3.6"
|
||||||
|
fslock = "0.2.1"
|
||||||
leaky-bucket = "1.1.2"
|
leaky-bucket = "1.1.2"
|
||||||
http-body-util = "0.1.3"
|
http-body-util = "0.1.3"
|
||||||
termion = "4.0.5"
|
termion = "4.0.5"
|
||||||
|
@ -85,8 +88,7 @@ tabled = "0.20.0"
|
||||||
path-absolutize = "3.1.1"
|
path-absolutize = "3.1.1"
|
||||||
dashmap = "6.1.0"
|
dashmap = "6.1.0"
|
||||||
fastrand = "2.3.0"
|
fastrand = "2.3.0"
|
||||||
glob = "0.3.3"
|
glob = "0.3.2"
|
||||||
console-subscriber = "0.4.1"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tempfile.workspace = true
|
tempfile.workspace = true
|
||||||
|
|
|
@ -14,9 +14,10 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
use crate::grpc::scheduler::SchedulerClient;
|
use crate::grpc::{manager::ManagerClient, scheduler::SchedulerClient};
|
||||||
use crate::shutdown;
|
use crate::shutdown;
|
||||||
use dragonfly_api::common::v2::{Build, Cpu, Disk, Host, Memory, Network};
|
use dragonfly_api::common::v2::{Build, Cpu, Disk, Host, Memory, Network};
|
||||||
|
use dragonfly_api::manager::v2::{DeleteSeedPeerRequest, SourceType, UpdateSeedPeerRequest};
|
||||||
use dragonfly_api::scheduler::v2::{AnnounceHostRequest, DeleteHostRequest};
|
use dragonfly_api::scheduler::v2::{AnnounceHostRequest, DeleteHostRequest};
|
||||||
use dragonfly_client_config::{
|
use dragonfly_client_config::{
|
||||||
dfdaemon::{Config, HostType},
|
dfdaemon::{Config, HostType},
|
||||||
|
@ -24,13 +25,89 @@ use dragonfly_client_config::{
|
||||||
};
|
};
|
||||||
use dragonfly_client_core::error::{ErrorType, OrErr};
|
use dragonfly_client_core::error::{ErrorType, OrErr};
|
||||||
use dragonfly_client_core::Result;
|
use dragonfly_client_core::Result;
|
||||||
use dragonfly_client_util::net::Interface;
|
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use sysinfo::System;
|
use sysinfo::System;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tracing::{debug, error, info, instrument};
|
use tracing::{error, info, instrument};
|
||||||
|
|
||||||
|
/// ManagerAnnouncer is used to announce the dfdaemon information to the manager.
|
||||||
|
pub struct ManagerAnnouncer {
|
||||||
|
/// config is the configuration of the dfdaemon.
|
||||||
|
config: Arc<Config>,
|
||||||
|
|
||||||
|
/// manager_client is the grpc client of the manager.
|
||||||
|
manager_client: Arc<ManagerClient>,
|
||||||
|
|
||||||
|
/// shutdown is used to shutdown the announcer.
|
||||||
|
shutdown: shutdown::Shutdown,
|
||||||
|
|
||||||
|
/// _shutdown_complete is used to notify the announcer is shutdown.
|
||||||
|
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// ManagerAnnouncer implements the manager announcer of the dfdaemon.
|
||||||
|
impl ManagerAnnouncer {
|
||||||
|
/// new creates a new manager announcer.
|
||||||
|
pub fn new(
|
||||||
|
config: Arc<Config>,
|
||||||
|
manager_client: Arc<ManagerClient>,
|
||||||
|
shutdown: shutdown::Shutdown,
|
||||||
|
shutdown_complete_tx: mpsc::UnboundedSender<()>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
config,
|
||||||
|
manager_client,
|
||||||
|
shutdown,
|
||||||
|
_shutdown_complete: shutdown_complete_tx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// run announces the dfdaemon information to the manager.
|
||||||
|
pub async fn run(&self) -> Result<()> {
|
||||||
|
// Clone the shutdown channel.
|
||||||
|
let mut shutdown = self.shutdown.clone();
|
||||||
|
|
||||||
|
// If the seed peer is enabled, we should announce the seed peer to the manager.
|
||||||
|
if self.config.seed_peer.enable {
|
||||||
|
// Register the seed peer to the manager.
|
||||||
|
self.manager_client
|
||||||
|
.update_seed_peer(UpdateSeedPeerRequest {
|
||||||
|
source_type: SourceType::SeedPeerSource.into(),
|
||||||
|
hostname: self.config.host.hostname.clone(),
|
||||||
|
r#type: self.config.seed_peer.kind.to_string(),
|
||||||
|
idc: self.config.host.idc.clone(),
|
||||||
|
location: self.config.host.location.clone(),
|
||||||
|
ip: self.config.host.ip.unwrap().to_string(),
|
||||||
|
port: self.config.upload.server.port as i32,
|
||||||
|
download_port: self.config.upload.server.port as i32,
|
||||||
|
seed_peer_cluster_id: self.config.seed_peer.cluster_id,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Announce to scheduler shutting down with signals.
|
||||||
|
shutdown.recv().await;
|
||||||
|
|
||||||
|
// Delete the seed peer from the manager.
|
||||||
|
self.manager_client
|
||||||
|
.delete_seed_peer(DeleteSeedPeerRequest {
|
||||||
|
source_type: SourceType::SeedPeerSource.into(),
|
||||||
|
hostname: self.config.host.hostname.clone(),
|
||||||
|
ip: self.config.host.ip.unwrap().to_string(),
|
||||||
|
seed_peer_cluster_id: self.config.seed_peer.cluster_id,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("announce to manager shutting down");
|
||||||
|
} else {
|
||||||
|
shutdown.recv().await;
|
||||||
|
info!("announce to manager shutting down");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Announcer is used to announce the dfdaemon information to the manager and scheduler.
|
/// Announcer is used to announce the dfdaemon information to the manager and scheduler.
|
||||||
pub struct SchedulerAnnouncer {
|
pub struct SchedulerAnnouncer {
|
||||||
|
@ -43,9 +120,6 @@ pub struct SchedulerAnnouncer {
|
||||||
/// scheduler_client is the grpc client of the scheduler.
|
/// scheduler_client is the grpc client of the scheduler.
|
||||||
scheduler_client: Arc<SchedulerClient>,
|
scheduler_client: Arc<SchedulerClient>,
|
||||||
|
|
||||||
/// interface is the network interface.
|
|
||||||
interface: Arc<Interface>,
|
|
||||||
|
|
||||||
/// shutdown is used to shutdown the announcer.
|
/// shutdown is used to shutdown the announcer.
|
||||||
shutdown: shutdown::Shutdown,
|
shutdown: shutdown::Shutdown,
|
||||||
|
|
||||||
|
@ -60,7 +134,6 @@ impl SchedulerAnnouncer {
|
||||||
config: Arc<Config>,
|
config: Arc<Config>,
|
||||||
host_id: String,
|
host_id: String,
|
||||||
scheduler_client: Arc<SchedulerClient>,
|
scheduler_client: Arc<SchedulerClient>,
|
||||||
interface: Arc<Interface>,
|
|
||||||
shutdown: shutdown::Shutdown,
|
shutdown: shutdown::Shutdown,
|
||||||
shutdown_complete_tx: mpsc::UnboundedSender<()>,
|
shutdown_complete_tx: mpsc::UnboundedSender<()>,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
|
@ -68,7 +141,6 @@ impl SchedulerAnnouncer {
|
||||||
config,
|
config,
|
||||||
host_id,
|
host_id,
|
||||||
scheduler_client,
|
scheduler_client,
|
||||||
interface,
|
|
||||||
shutdown,
|
shutdown,
|
||||||
_shutdown_complete: shutdown_complete_tx,
|
_shutdown_complete: shutdown_complete_tx,
|
||||||
};
|
};
|
||||||
|
@ -76,7 +148,7 @@ impl SchedulerAnnouncer {
|
||||||
// Initialize the scheduler announcer.
|
// Initialize the scheduler announcer.
|
||||||
announcer
|
announcer
|
||||||
.scheduler_client
|
.scheduler_client
|
||||||
.init_announce_host(announcer.make_announce_host_request(Duration::ZERO).await?)
|
.init_announce_host(announcer.make_announce_host_request(Duration::ZERO)?)
|
||||||
.await?;
|
.await?;
|
||||||
Ok(announcer)
|
Ok(announcer)
|
||||||
}
|
}
|
||||||
|
@ -91,7 +163,7 @@ impl SchedulerAnnouncer {
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = interval.tick() => {
|
_ = interval.tick() => {
|
||||||
let request = match self.make_announce_host_request(interval.period()).await {
|
let request = match self.make_announce_host_request(interval.period()) {
|
||||||
Ok(request) => request,
|
Ok(request) => request,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("make announce host request failed: {}", err);
|
error!("make announce host request failed: {}", err);
|
||||||
|
@ -120,7 +192,7 @@ impl SchedulerAnnouncer {
|
||||||
|
|
||||||
/// make_announce_host_request makes the announce host request.
|
/// make_announce_host_request makes the announce host request.
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
async fn make_announce_host_request(&self, interval: Duration) -> Result<AnnounceHostRequest> {
|
fn make_announce_host_request(&self, interval: Duration) -> Result<AnnounceHostRequest> {
|
||||||
// If the seed peer is enabled, we should announce the seed peer to the scheduler.
|
// If the seed peer is enabled, we should announce the seed peer to the scheduler.
|
||||||
let host_type = if self.config.seed_peer.enable {
|
let host_type = if self.config.seed_peer.enable {
|
||||||
self.config.seed_peer.kind
|
self.config.seed_peer.kind
|
||||||
|
@ -156,25 +228,25 @@ impl SchedulerAnnouncer {
|
||||||
free: sys.free_memory(),
|
free: sys.free_memory(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Wait for getting the network data.
|
|
||||||
let network_data = self.interface.get_network_data().await;
|
|
||||||
debug!(
|
|
||||||
"network data: rx bandwidth {}/{} bps, tx bandwidth {}/{} bps",
|
|
||||||
network_data.rx_bandwidth.unwrap_or(0),
|
|
||||||
network_data.max_rx_bandwidth,
|
|
||||||
network_data.tx_bandwidth.unwrap_or(0),
|
|
||||||
network_data.max_tx_bandwidth
|
|
||||||
);
|
|
||||||
|
|
||||||
// Get the network information.
|
// Get the network information.
|
||||||
let network = Network {
|
let network = Network {
|
||||||
|
// TODO: Get the count of the tcp connection.
|
||||||
|
tcp_connection_count: 0,
|
||||||
|
|
||||||
|
// TODO: Get the count of the upload tcp connection.
|
||||||
|
upload_tcp_connection_count: 0,
|
||||||
idc: self.config.host.idc.clone(),
|
idc: self.config.host.idc.clone(),
|
||||||
location: self.config.host.location.clone(),
|
location: self.config.host.location.clone(),
|
||||||
max_rx_bandwidth: network_data.max_rx_bandwidth,
|
|
||||||
rx_bandwidth: network_data.rx_bandwidth,
|
// TODO: Get the network download rate, refer to
|
||||||
max_tx_bandwidth: network_data.max_tx_bandwidth,
|
// https://docs.rs/sysinfo/latest/sysinfo/struct.NetworkData.html#method.received.
|
||||||
tx_bandwidth: network_data.tx_bandwidth,
|
download_rate: 0,
|
||||||
..Default::default()
|
download_rate_limit: self.config.download.rate_limit.as_u64(),
|
||||||
|
|
||||||
|
// TODO: Get the network download rate, refer to
|
||||||
|
// https://docs.rs/sysinfo/latest/sysinfo/struct.NetworkData.html#method.transmitted
|
||||||
|
upload_rate: 0,
|
||||||
|
upload_rate_limit: self.config.upload.rate_limit.as_u64(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get the disk information.
|
// Get the disk information.
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use dragonfly_client::announcer::SchedulerAnnouncer;
|
use dragonfly_client::announcer::{ManagerAnnouncer, SchedulerAnnouncer};
|
||||||
use dragonfly_client::dynconfig::Dynconfig;
|
use dragonfly_client::dynconfig::Dynconfig;
|
||||||
use dragonfly_client::gc::GC;
|
use dragonfly_client::gc::GC;
|
||||||
use dragonfly_client::grpc::{
|
use dragonfly_client::grpc::{
|
||||||
|
@ -32,7 +32,7 @@ use dragonfly_client::tracing::init_tracing;
|
||||||
use dragonfly_client_backend::BackendFactory;
|
use dragonfly_client_backend::BackendFactory;
|
||||||
use dragonfly_client_config::{dfdaemon, VersionValueParser};
|
use dragonfly_client_config::{dfdaemon, VersionValueParser};
|
||||||
use dragonfly_client_storage::Storage;
|
use dragonfly_client_storage::Storage;
|
||||||
use dragonfly_client_util::{id_generator::IDGenerator, net::Interface};
|
use dragonfly_client_util::id_generator::IDGenerator;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -229,9 +229,6 @@ async fn main() -> Result<(), anyhow::Error> {
|
||||||
)?;
|
)?;
|
||||||
let persistent_cache_task = Arc::new(persistent_cache_task);
|
let persistent_cache_task = Arc::new(persistent_cache_task);
|
||||||
|
|
||||||
let interface = Interface::new(config.host.ip.unwrap(), config.upload.rate_limit);
|
|
||||||
let interface = Arc::new(interface);
|
|
||||||
|
|
||||||
// Initialize health server.
|
// Initialize health server.
|
||||||
let health = Health::new(
|
let health = Health::new(
|
||||||
SocketAddr::new(config.health.server.ip.unwrap(), config.health.server.port),
|
SocketAddr::new(config.health.server.ip.unwrap(), config.health.server.port),
|
||||||
|
@ -261,12 +258,19 @@ async fn main() -> Result<(), anyhow::Error> {
|
||||||
shutdown_complete_tx.clone(),
|
shutdown_complete_tx.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Initialize manager announcer.
|
||||||
|
let manager_announcer = ManagerAnnouncer::new(
|
||||||
|
config.clone(),
|
||||||
|
manager_client.clone(),
|
||||||
|
shutdown.clone(),
|
||||||
|
shutdown_complete_tx.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
// Initialize scheduler announcer.
|
// Initialize scheduler announcer.
|
||||||
let scheduler_announcer = SchedulerAnnouncer::new(
|
let scheduler_announcer = SchedulerAnnouncer::new(
|
||||||
config.clone(),
|
config.clone(),
|
||||||
id_generator.host_id(),
|
id_generator.host_id(),
|
||||||
scheduler_client.clone(),
|
scheduler_client.clone(),
|
||||||
interface.clone(),
|
|
||||||
shutdown.clone(),
|
shutdown.clone(),
|
||||||
shutdown_complete_tx.clone(),
|
shutdown_complete_tx.clone(),
|
||||||
)
|
)
|
||||||
|
@ -281,7 +285,6 @@ async fn main() -> Result<(), anyhow::Error> {
|
||||||
SocketAddr::new(config.upload.server.ip.unwrap(), config.upload.server.port),
|
SocketAddr::new(config.upload.server.ip.unwrap(), config.upload.server.port),
|
||||||
task.clone(),
|
task.clone(),
|
||||||
persistent_cache_task.clone(),
|
persistent_cache_task.clone(),
|
||||||
interface.clone(),
|
|
||||||
shutdown.clone(),
|
shutdown.clone(),
|
||||||
shutdown_complete_tx.clone(),
|
shutdown_complete_tx.clone(),
|
||||||
);
|
);
|
||||||
|
@ -330,6 +333,10 @@ async fn main() -> Result<(), anyhow::Error> {
|
||||||
info!("stats server exited");
|
info!("stats server exited");
|
||||||
},
|
},
|
||||||
|
|
||||||
|
_ = tokio::spawn(async move { manager_announcer.run().await.unwrap_or_else(|err| error!("announcer manager failed: {}", err))} ) => {
|
||||||
|
info!("announcer manager exited");
|
||||||
|
},
|
||||||
|
|
||||||
_ = tokio::spawn(async move { scheduler_announcer.run().await }) => {
|
_ = tokio::spawn(async move { scheduler_announcer.run().await }) => {
|
||||||
info!("announcer scheduler exited");
|
info!("announcer scheduler exited");
|
||||||
},
|
},
|
||||||
|
|
|
@ -808,6 +808,7 @@ async fn download(
|
||||||
need_piece_content,
|
need_piece_content,
|
||||||
object_storage,
|
object_storage,
|
||||||
hdfs,
|
hdfs,
|
||||||
|
load_to_cache: false,
|
||||||
force_hard_link: args.force_hard_link,
|
force_hard_link: args.force_hard_link,
|
||||||
content_for_calculating_task_id: args.content_for_calculating_task_id,
|
content_for_calculating_task_id: args.content_for_calculating_task_id,
|
||||||
remote_ip: Some(local_ip().unwrap().to_string()),
|
remote_ip: Some(local_ip().unwrap().to_string()),
|
||||||
|
@ -1310,343 +1311,157 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn should_filter_entries() {
|
fn should_filter_entries() {
|
||||||
let test_cases = vec![
|
let url = Url::parse("http://example.com/root/").unwrap();
|
||||||
(
|
let entries = vec![
|
||||||
Url::parse("http://example.com/root/").unwrap(),
|
DirEntry {
|
||||||
vec![
|
url: "http://example.com/root/dir/".to_string(),
|
||||||
DirEntry {
|
content_length: 10,
|
||||||
url: "http://example.com/root/dir/".to_string(),
|
is_dir: true,
|
||||||
content_length: 10,
|
},
|
||||||
is_dir: true,
|
DirEntry {
|
||||||
},
|
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||||
DirEntry {
|
content_length: 100,
|
||||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
is_dir: false,
|
||||||
content_length: 100,
|
},
|
||||||
is_dir: false,
|
DirEntry {
|
||||||
},
|
url: "http://example.com/root/dir/file2.txt".to_string(),
|
||||||
DirEntry {
|
content_length: 100,
|
||||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
is_dir: false,
|
||||||
content_length: 100,
|
},
|
||||||
is_dir: false,
|
DirEntry {
|
||||||
},
|
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||||
DirEntry {
|
content_length: 10,
|
||||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
is_dir: true,
|
||||||
content_length: 10,
|
},
|
||||||
is_dir: true,
|
DirEntry {
|
||||||
},
|
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
||||||
DirEntry {
|
content_length: 100,
|
||||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
is_dir: false,
|
||||||
content_length: 100,
|
},
|
||||||
is_dir: false,
|
DirEntry {
|
||||||
},
|
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||||
DirEntry {
|
content_length: 100,
|
||||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
is_dir: false,
|
||||||
content_length: 100,
|
},
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
vec!["dir/file.txt".to_string()],
|
|
||||||
vec![
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
),
|
|
||||||
(
|
|
||||||
Url::parse("http://example.com/root/").unwrap(),
|
|
||||||
vec![
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
vec![
|
|
||||||
"dir/file.txt".to_string(),
|
|
||||||
"dir/subdir/file4.png".to_string(),
|
|
||||||
],
|
|
||||||
vec![
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
),
|
|
||||||
(
|
|
||||||
Url::parse("http://example.com/root/").unwrap(),
|
|
||||||
vec![
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
vec!["dir/subdir/*.png".to_string()],
|
|
||||||
vec![
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
),
|
|
||||||
(
|
|
||||||
Url::parse("http://example.com/root/").unwrap(),
|
|
||||||
vec![
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
vec!["dir/*".to_string()],
|
|
||||||
vec![
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
),
|
|
||||||
(
|
|
||||||
Url::parse("http://example.com/root/").unwrap(),
|
|
||||||
vec![
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
vec!["dir/".to_string()],
|
|
||||||
vec![DirEntry {
|
|
||||||
url: "http://example.com/root/dir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
}],
|
|
||||||
),
|
|
||||||
(
|
|
||||||
Url::parse("http://example.com/root/").unwrap(),
|
|
||||||
vec![
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file2.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/".to_string(),
|
|
||||||
content_length: 10,
|
|
||||||
is_dir: true,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
vec!["test".to_string()],
|
|
||||||
vec![],
|
|
||||||
),
|
|
||||||
(
|
|
||||||
Url::parse("http://example.com/root/").unwrap(),
|
|
||||||
vec![
|
|
||||||
DirEntry {
|
|
||||||
url: "http://example.com/root/dir/file.txt".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
DirEntry {
|
|
||||||
url: " ".to_string(),
|
|
||||||
content_length: 100,
|
|
||||||
is_dir: false,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
vec!["dir/file.txt".to_string()],
|
|
||||||
vec![],
|
|
||||||
),
|
|
||||||
];
|
];
|
||||||
|
|
||||||
for (url, entries, include_files, expected_entries) in test_cases {
|
let filtered_entries =
|
||||||
let result = filter_entries(&url, entries, &include_files);
|
filter_entries(&url, entries, &["dir/file.txt".to_string()]).unwrap();
|
||||||
if result.is_err() {
|
assert_eq!(filtered_entries.len(), 2);
|
||||||
assert!(matches!(result, Err(Error::ValidationError(_))));
|
assert!(filtered_entries
|
||||||
} else {
|
.iter()
|
||||||
let filtered_entries = result.unwrap();
|
.any(|entry| entry.url == "http://example.com/root/dir/file.txt"));
|
||||||
assert_eq!(filtered_entries.len(), expected_entries.len());
|
assert!(filtered_entries
|
||||||
|
.iter()
|
||||||
|
.any(|entry| entry.url == "http://example.com/root/dir/"));
|
||||||
|
}
|
||||||
|
|
||||||
for filtered_entry in &filtered_entries {
|
#[test]
|
||||||
assert!(expected_entries
|
fn should_filter_entries_with_regex() {
|
||||||
.iter()
|
let url = Url::parse("http://example.com/root/").unwrap();
|
||||||
.any(|expected_entry| { expected_entry.url == filtered_entry.url }));
|
let entries = vec![
|
||||||
}
|
DirEntry {
|
||||||
}
|
url: "http://example.com/root/dir/".to_string(),
|
||||||
}
|
content_length: 10,
|
||||||
|
is_dir: true,
|
||||||
|
},
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||||
|
content_length: 100,
|
||||||
|
is_dir: false,
|
||||||
|
},
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/file2.txt".to_string(),
|
||||||
|
content_length: 100,
|
||||||
|
is_dir: false,
|
||||||
|
},
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||||
|
content_length: 10,
|
||||||
|
is_dir: true,
|
||||||
|
},
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
||||||
|
content_length: 100,
|
||||||
|
is_dir: false,
|
||||||
|
},
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||||
|
content_length: 100,
|
||||||
|
is_dir: false,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let filtered_entries =
|
||||||
|
filter_entries(&url, entries, &["dir/subdir/*.txt".to_string()]).unwrap();
|
||||||
|
assert_eq!(filtered_entries.len(), 2);
|
||||||
|
assert!(filtered_entries
|
||||||
|
.iter()
|
||||||
|
.any(|entry| entry.url == "http://example.com/root/dir/subdir/file3.txt"));
|
||||||
|
assert!(filtered_entries
|
||||||
|
.iter()
|
||||||
|
.any(|entry| entry.url == "http://example.com/root/dir/subdir/"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_filter_entries_with_dir_path() {
|
||||||
|
let url = Url::parse("http://example.com/root/").unwrap();
|
||||||
|
let entries = vec![
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/".to_string(),
|
||||||
|
content_length: 10,
|
||||||
|
is_dir: true,
|
||||||
|
},
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||||
|
content_length: 100,
|
||||||
|
is_dir: false,
|
||||||
|
},
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/file2.txt".to_string(),
|
||||||
|
content_length: 100,
|
||||||
|
is_dir: false,
|
||||||
|
},
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/subdir/".to_string(),
|
||||||
|
content_length: 10,
|
||||||
|
is_dir: true,
|
||||||
|
},
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/subdir/file3.txt".to_string(),
|
||||||
|
content_length: 100,
|
||||||
|
is_dir: false,
|
||||||
|
},
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/subdir/file4.png".to_string(),
|
||||||
|
content_length: 100,
|
||||||
|
is_dir: false,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let filtered_entries =
|
||||||
|
filter_entries(&url, entries, &["dir/subdir/*".to_string()]).unwrap();
|
||||||
|
println!("{:?}", filtered_entries);
|
||||||
|
assert_eq!(filtered_entries.len(), 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_filter_entries_with_empty() {
|
||||||
|
let url = Url::parse("http://example.com/root/").unwrap();
|
||||||
|
let entries = vec![
|
||||||
|
DirEntry {
|
||||||
|
url: "http://example.com/root/dir/file.txt".to_string(),
|
||||||
|
content_length: 100,
|
||||||
|
is_dir: false,
|
||||||
|
},
|
||||||
|
DirEntry {
|
||||||
|
url: " ".to_string(),
|
||||||
|
content_length: 100,
|
||||||
|
is_dir: false,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
let filtered_entries = filter_entries(&url, entries, &["dir/subdir/*".to_string()]);
|
||||||
|
assert!(matches!(filtered_entries, Err(Error::ValidationError(_))));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,16 +25,15 @@ use crate::metrics::{
|
||||||
};
|
};
|
||||||
use crate::resource::{persistent_cache_task, task};
|
use crate::resource::{persistent_cache_task, task};
|
||||||
use crate::shutdown;
|
use crate::shutdown;
|
||||||
use dragonfly_api::common::v2::{CacheTask, PersistentCacheTask, Priority, Task, TaskType};
|
use dragonfly_api::common::v2::{PersistentCacheTask, Priority, Task, TaskType};
|
||||||
use dragonfly_api::dfdaemon::v2::{
|
use dragonfly_api::dfdaemon::v2::{
|
||||||
dfdaemon_download_client::DfdaemonDownloadClient as DfdaemonDownloadGRPCClient,
|
dfdaemon_download_client::DfdaemonDownloadClient as DfdaemonDownloadGRPCClient,
|
||||||
dfdaemon_download_server::{
|
dfdaemon_download_server::{
|
||||||
DfdaemonDownload, DfdaemonDownloadServer as DfdaemonDownloadGRPCServer,
|
DfdaemonDownload, DfdaemonDownloadServer as DfdaemonDownloadGRPCServer,
|
||||||
},
|
},
|
||||||
DeleteCacheTaskRequest, DeleteTaskRequest, DownloadCacheTaskRequest, DownloadCacheTaskResponse,
|
DeleteTaskRequest, DownloadPersistentCacheTaskRequest, DownloadPersistentCacheTaskResponse,
|
||||||
DownloadPersistentCacheTaskRequest, DownloadPersistentCacheTaskResponse, DownloadTaskRequest,
|
DownloadTaskRequest, DownloadTaskResponse, Entry, ListTaskEntriesRequest,
|
||||||
DownloadTaskResponse, Entry, ListTaskEntriesRequest, ListTaskEntriesResponse,
|
ListTaskEntriesResponse, StatPersistentCacheTaskRequest,
|
||||||
StatCacheTaskRequest as DfdaemonStatCacheTaskRequest, StatPersistentCacheTaskRequest,
|
|
||||||
StatTaskRequest as DfdaemonStatTaskRequest, UploadPersistentCacheTaskRequest,
|
StatTaskRequest as DfdaemonStatTaskRequest, UploadPersistentCacheTaskRequest,
|
||||||
};
|
};
|
||||||
use dragonfly_api::errordetails::v2::Backend;
|
use dragonfly_api::errordetails::v2::Backend;
|
||||||
|
@ -1298,39 +1297,6 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
||||||
|
|
||||||
Ok(Response::new(task))
|
Ok(Response::new(task))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// DownloadCacheTaskStream is the stream of the download cache task response.
|
|
||||||
type DownloadCacheTaskStream = ReceiverStream<Result<DownloadCacheTaskResponse, Status>>;
|
|
||||||
|
|
||||||
/// download_cache_task tells the dfdaemon to download the cache task.
|
|
||||||
#[instrument(
|
|
||||||
skip_all,
|
|
||||||
fields(host_id, task_id, peer_id, url, remote_ip, content_length)
|
|
||||||
)]
|
|
||||||
async fn download_cache_task(
|
|
||||||
&self,
|
|
||||||
_request: Request<DownloadCacheTaskRequest>,
|
|
||||||
) -> Result<Response<Self::DownloadCacheTaskStream>, Status> {
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// stat_cache_task gets the status of the cache task.
|
|
||||||
#[instrument(skip_all, fields(host_id, task_id, remote_pi, local_only))]
|
|
||||||
async fn stat_cache_task(
|
|
||||||
&self,
|
|
||||||
_request: Request<DfdaemonStatCacheTaskRequest>,
|
|
||||||
) -> Result<Response<CacheTask>, Status> {
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// delete_cache_task calls the dfdaemon to delete the cache task.
|
|
||||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip))]
|
|
||||||
async fn delete_cache_task(
|
|
||||||
&self,
|
|
||||||
_request: Request<DeleteCacheTaskRequest>,
|
|
||||||
) -> Result<Response<()>, Status> {
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// DfdaemonDownloadClient is a wrapper of DfdaemonDownloadGRPCClient.
|
/// DfdaemonDownloadClient is a wrapper of DfdaemonDownloadGRPCClient.
|
||||||
|
|
|
@ -24,22 +24,20 @@ use crate::metrics::{
|
||||||
};
|
};
|
||||||
use crate::resource::{persistent_cache_task, task};
|
use crate::resource::{persistent_cache_task, task};
|
||||||
use crate::shutdown;
|
use crate::shutdown;
|
||||||
|
use bytesize::MB;
|
||||||
use dragonfly_api::common::v2::{
|
use dragonfly_api::common::v2::{
|
||||||
CacheTask, Host, Network, PersistentCacheTask, Piece, Priority, Task, TaskType,
|
Host, Network, PersistentCacheTask, Piece, Priority, Task, TaskType,
|
||||||
};
|
};
|
||||||
use dragonfly_api::dfdaemon::v2::{
|
use dragonfly_api::dfdaemon::v2::{
|
||||||
dfdaemon_upload_client::DfdaemonUploadClient as DfdaemonUploadGRPCClient,
|
dfdaemon_upload_client::DfdaemonUploadClient as DfdaemonUploadGRPCClient,
|
||||||
dfdaemon_upload_server::{DfdaemonUpload, DfdaemonUploadServer as DfdaemonUploadGRPCServer},
|
dfdaemon_upload_server::{DfdaemonUpload, DfdaemonUploadServer as DfdaemonUploadGRPCServer},
|
||||||
DeleteCacheTaskRequest, DeletePersistentCacheTaskRequest, DeleteTaskRequest,
|
DeletePersistentCacheTaskRequest, DeleteTaskRequest, DownloadPersistentCachePieceRequest,
|
||||||
DownloadCachePieceRequest, DownloadCachePieceResponse, DownloadCacheTaskRequest,
|
|
||||||
DownloadCacheTaskResponse, DownloadPersistentCachePieceRequest,
|
|
||||||
DownloadPersistentCachePieceResponse, DownloadPersistentCacheTaskRequest,
|
DownloadPersistentCachePieceResponse, DownloadPersistentCacheTaskRequest,
|
||||||
DownloadPersistentCacheTaskResponse, DownloadPieceRequest, DownloadPieceResponse,
|
DownloadPersistentCacheTaskResponse, DownloadPieceRequest, DownloadPieceResponse,
|
||||||
DownloadTaskRequest, DownloadTaskResponse, ExchangeIbVerbsQueuePairEndpointRequest,
|
DownloadTaskRequest, DownloadTaskResponse, ExchangeIbVerbsQueuePairEndpointRequest,
|
||||||
ExchangeIbVerbsQueuePairEndpointResponse, StatCacheTaskRequest, StatPersistentCacheTaskRequest,
|
ExchangeIbVerbsQueuePairEndpointResponse, StatPersistentCacheTaskRequest, StatTaskRequest,
|
||||||
StatTaskRequest, SyncCachePiecesRequest, SyncCachePiecesResponse, SyncHostRequest,
|
SyncHostRequest, SyncPersistentCachePiecesRequest, SyncPersistentCachePiecesResponse,
|
||||||
SyncPersistentCachePiecesRequest, SyncPersistentCachePiecesResponse, SyncPiecesRequest,
|
SyncPiecesRequest, SyncPiecesResponse, UpdatePersistentCacheTaskRequest,
|
||||||
SyncPiecesResponse, UpdatePersistentCacheTaskRequest,
|
|
||||||
};
|
};
|
||||||
use dragonfly_api::errordetails::v2::Backend;
|
use dragonfly_api::errordetails::v2::Backend;
|
||||||
use dragonfly_client_config::dfdaemon::Config;
|
use dragonfly_client_config::dfdaemon::Config;
|
||||||
|
@ -50,13 +48,14 @@ use dragonfly_client_core::{
|
||||||
use dragonfly_client_util::{
|
use dragonfly_client_util::{
|
||||||
http::{get_range, hashmap_to_headermap, headermap_to_hashmap},
|
http::{get_range, hashmap_to_headermap, headermap_to_hashmap},
|
||||||
id_generator::TaskIDParameter,
|
id_generator::TaskIDParameter,
|
||||||
net::Interface,
|
net::{get_interface_info, Interface},
|
||||||
};
|
};
|
||||||
use opentelemetry::Context;
|
use opentelemetry::Context;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
use sysinfo::Networks;
|
||||||
use tokio::io::AsyncReadExt;
|
use tokio::io::AsyncReadExt;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tokio::sync::mpsc::Sender;
|
use tokio::sync::mpsc::Sender;
|
||||||
|
@ -68,7 +67,7 @@ use tonic::{
|
||||||
Code, Request, Response, Status,
|
Code, Request, Response, Status,
|
||||||
};
|
};
|
||||||
use tower::ServiceBuilder;
|
use tower::ServiceBuilder;
|
||||||
use tracing::{debug, error, info, instrument, Instrument, Span};
|
use tracing::{error, info, instrument, Instrument, Span};
|
||||||
use tracing_opentelemetry::OpenTelemetrySpanExt;
|
use tracing_opentelemetry::OpenTelemetrySpanExt;
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
|
@ -88,9 +87,6 @@ pub struct DfdaemonUploadServer {
|
||||||
/// persistent_cache_task is the persistent cache task manager.
|
/// persistent_cache_task is the persistent cache task manager.
|
||||||
persistent_cache_task: Arc<persistent_cache_task::PersistentCacheTask>,
|
persistent_cache_task: Arc<persistent_cache_task::PersistentCacheTask>,
|
||||||
|
|
||||||
/// interface is the network interface.
|
|
||||||
interface: Arc<Interface>,
|
|
||||||
|
|
||||||
/// shutdown is used to shutdown the grpc server.
|
/// shutdown is used to shutdown the grpc server.
|
||||||
shutdown: shutdown::Shutdown,
|
shutdown: shutdown::Shutdown,
|
||||||
|
|
||||||
|
@ -106,7 +102,6 @@ impl DfdaemonUploadServer {
|
||||||
addr: SocketAddr,
|
addr: SocketAddr,
|
||||||
task: Arc<task::Task>,
|
task: Arc<task::Task>,
|
||||||
persistent_cache_task: Arc<persistent_cache_task::PersistentCacheTask>,
|
persistent_cache_task: Arc<persistent_cache_task::PersistentCacheTask>,
|
||||||
interface: Arc<Interface>,
|
|
||||||
shutdown: shutdown::Shutdown,
|
shutdown: shutdown::Shutdown,
|
||||||
shutdown_complete_tx: mpsc::UnboundedSender<()>,
|
shutdown_complete_tx: mpsc::UnboundedSender<()>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
@ -114,7 +109,6 @@ impl DfdaemonUploadServer {
|
||||||
config,
|
config,
|
||||||
addr,
|
addr,
|
||||||
task,
|
task,
|
||||||
interface,
|
|
||||||
persistent_cache_task,
|
persistent_cache_task,
|
||||||
shutdown,
|
shutdown,
|
||||||
_shutdown_complete: shutdown_complete_tx,
|
_shutdown_complete: shutdown_complete_tx,
|
||||||
|
@ -123,12 +117,16 @@ impl DfdaemonUploadServer {
|
||||||
|
|
||||||
/// run starts the upload server.
|
/// run starts the upload server.
|
||||||
pub async fn run(&mut self, grpc_server_started_barrier: Arc<Barrier>) -> ClientResult<()> {
|
pub async fn run(&mut self, grpc_server_started_barrier: Arc<Barrier>) -> ClientResult<()> {
|
||||||
|
// Initialize the grpc service.
|
||||||
|
let interface =
|
||||||
|
get_interface_info(self.config.host.ip.unwrap(), self.config.upload.rate_limit);
|
||||||
|
|
||||||
let service = DfdaemonUploadGRPCServer::with_interceptor(
|
let service = DfdaemonUploadGRPCServer::with_interceptor(
|
||||||
DfdaemonUploadServerHandler {
|
DfdaemonUploadServerHandler {
|
||||||
|
interface,
|
||||||
socket_path: self.config.download.server.socket_path.clone(),
|
socket_path: self.config.download.server.socket_path.clone(),
|
||||||
task: self.task.clone(),
|
task: self.task.clone(),
|
||||||
persistent_cache_task: self.persistent_cache_task.clone(),
|
persistent_cache_task: self.persistent_cache_task.clone(),
|
||||||
interface: self.interface.clone(),
|
|
||||||
},
|
},
|
||||||
ExtractTracingInterceptor,
|
ExtractTracingInterceptor,
|
||||||
);
|
);
|
||||||
|
@ -204,6 +202,9 @@ impl DfdaemonUploadServer {
|
||||||
|
|
||||||
/// DfdaemonUploadServerHandler is the handler of the dfdaemon upload grpc service.
|
/// DfdaemonUploadServerHandler is the handler of the dfdaemon upload grpc service.
|
||||||
pub struct DfdaemonUploadServerHandler {
|
pub struct DfdaemonUploadServerHandler {
|
||||||
|
/// interface is the network interface.
|
||||||
|
interface: Option<Interface>,
|
||||||
|
|
||||||
/// socket_path is the path of the unix domain socket.
|
/// socket_path is the path of the unix domain socket.
|
||||||
socket_path: PathBuf,
|
socket_path: PathBuf,
|
||||||
|
|
||||||
|
@ -212,9 +213,6 @@ pub struct DfdaemonUploadServerHandler {
|
||||||
|
|
||||||
/// persistent_cache_task is the persistent cache task manager.
|
/// persistent_cache_task is the persistent cache task manager.
|
||||||
persistent_cache_task: Arc<persistent_cache_task::PersistentCacheTask>,
|
persistent_cache_task: Arc<persistent_cache_task::PersistentCacheTask>,
|
||||||
|
|
||||||
/// interface is the network interface.
|
|
||||||
interface: Arc<Interface>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// DfdaemonUploadServerHandler implements the dfdaemon upload grpc service.
|
/// DfdaemonUploadServerHandler implements the dfdaemon upload grpc service.
|
||||||
|
@ -1002,6 +1000,9 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
||||||
Span::current().set_parent(parent_ctx.clone());
|
Span::current().set_parent(parent_ctx.clone());
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// DEFAULT_HOST_INFO_REFRESH_INTERVAL is the default interval for refreshing the host info.
|
||||||
|
const DEFAULT_HOST_INFO_REFRESH_INTERVAL: Duration = Duration::from_millis(500);
|
||||||
|
|
||||||
// Clone the request.
|
// Clone the request.
|
||||||
let request = request.into_inner();
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
@ -1023,42 +1024,42 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
||||||
// Get local interface.
|
// Get local interface.
|
||||||
let interface = self.interface.clone();
|
let interface = self.interface.clone();
|
||||||
|
|
||||||
// DEFAULT_HOST_INFO_REFRESH_INTERVAL is the default interval for refreshing the host info.
|
|
||||||
const DEFAULT_HOST_INFO_REFRESH_INTERVAL: Duration = Duration::from_millis(500);
|
|
||||||
|
|
||||||
// Initialize stream channel.
|
// Initialize stream channel.
|
||||||
let (out_stream_tx, out_stream_rx) = mpsc::channel(10 * 1024);
|
let (out_stream_tx, out_stream_rx) = mpsc::channel(10 * 1024);
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
async move {
|
async move {
|
||||||
|
// Initialize sysinfo network.
|
||||||
|
let mut networks = Networks::new_with_refreshed_list();
|
||||||
|
|
||||||
// Start the host info update loop.
|
// Start the host info update loop.
|
||||||
loop {
|
loop {
|
||||||
// Wait for the host info refresh interval.
|
// Sleep to calculate the network traffic difference over
|
||||||
|
// the DEFAULT_HOST_INFO_REFRESH_INTERVAL.
|
||||||
tokio::time::sleep(DEFAULT_HOST_INFO_REFRESH_INTERVAL).await;
|
tokio::time::sleep(DEFAULT_HOST_INFO_REFRESH_INTERVAL).await;
|
||||||
|
|
||||||
// Wait for getting the network data.
|
// Refresh network information.
|
||||||
let network_data = interface.get_network_data().await;
|
networks.refresh();
|
||||||
debug!(
|
|
||||||
"network data: rx bandwidth {}/{} bps, tx bandwidth {}/{} bps",
|
// Init response.
|
||||||
network_data.rx_bandwidth.unwrap_or(0),
|
let mut host = Host::default();
|
||||||
network_data.max_rx_bandwidth,
|
if let Some(interface) = &interface {
|
||||||
network_data.tx_bandwidth.unwrap_or(0),
|
if let Some(network_data) = networks.get(&interface.name) {
|
||||||
network_data.max_tx_bandwidth
|
host.network = Some(Network {
|
||||||
);
|
download_rate: network_data.received()
|
||||||
|
/ DEFAULT_HOST_INFO_REFRESH_INTERVAL.as_secs(),
|
||||||
|
// Convert bandwidth to bytes per second.
|
||||||
|
download_rate_limit: interface.bandwidth / 8 * MB,
|
||||||
|
upload_rate: network_data.transmitted()
|
||||||
|
/ DEFAULT_HOST_INFO_REFRESH_INTERVAL.as_secs(),
|
||||||
|
// Convert bandwidth to bytes per second.
|
||||||
|
upload_rate_limit: interface.bandwidth / 8 * MB,
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Send host info.
|
// Send host info.
|
||||||
match out_stream_tx
|
match out_stream_tx.send(Ok(host.clone())).await {
|
||||||
.send(Ok(Host {
|
|
||||||
network: Some(Network {
|
|
||||||
max_rx_bandwidth: network_data.max_rx_bandwidth,
|
|
||||||
rx_bandwidth: network_data.rx_bandwidth,
|
|
||||||
max_tx_bandwidth: network_data.max_tx_bandwidth,
|
|
||||||
tx_bandwidth: network_data.tx_bandwidth,
|
|
||||||
..Default::default()
|
|
||||||
}),
|
|
||||||
..Default::default()
|
|
||||||
}))
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!(
|
error!(
|
||||||
|
@ -1066,7 +1067,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
||||||
remote_host_id, err
|
remote_host_id, err
|
||||||
);
|
);
|
||||||
|
|
||||||
return;
|
break;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -1691,63 +1692,6 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
||||||
) -> Result<Response<ExchangeIbVerbsQueuePairEndpointResponse>, Status> {
|
) -> Result<Response<ExchangeIbVerbsQueuePairEndpointResponse>, Status> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// DownloadCacheTaskStream is the stream of the download cache task response.
|
|
||||||
type DownloadCacheTaskStream = ReceiverStream<Result<DownloadCacheTaskResponse, Status>>;
|
|
||||||
|
|
||||||
/// download_cache_task downloads the cache task.
|
|
||||||
#[instrument(
|
|
||||||
skip_all,
|
|
||||||
fields(host_id, task_id, peer_id, url, remote_ip, content_length)
|
|
||||||
)]
|
|
||||||
async fn download_cache_task(
|
|
||||||
&self,
|
|
||||||
_request: Request<DownloadCacheTaskRequest>,
|
|
||||||
) -> Result<Response<Self::DownloadCacheTaskStream>, Status> {
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// stat_cache_task stats the cache task.
|
|
||||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip, local_only))]
|
|
||||||
async fn stat_cache_task(
|
|
||||||
&self,
|
|
||||||
_request: Request<StatCacheTaskRequest>,
|
|
||||||
) -> Result<Response<CacheTask>, Status> {
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// delete_cache_task deletes the cache task.
|
|
||||||
#[instrument(skip_all, fields(host_id, task_id, remote_ip))]
|
|
||||||
async fn delete_cache_task(
|
|
||||||
&self,
|
|
||||||
_request: Request<DeleteCacheTaskRequest>,
|
|
||||||
) -> Result<Response<()>, Status> {
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// SyncCachePiecesStream is the stream of the sync cache pieces response.
|
|
||||||
type SyncCachePiecesStream = ReceiverStream<Result<SyncCachePiecesResponse, Status>>;
|
|
||||||
|
|
||||||
/// sync_cache_pieces provides the cache piece metadata for parent.
|
|
||||||
#[instrument(skip_all, fields(host_id, remote_host_id, task_id))]
|
|
||||||
async fn sync_cache_pieces(
|
|
||||||
&self,
|
|
||||||
_request: Request<SyncCachePiecesRequest>,
|
|
||||||
) -> Result<Response<Self::SyncCachePiecesStream>, Status> {
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// download_cache_piece provides the cache piece content for parent.
|
|
||||||
#[instrument(
|
|
||||||
skip_all,
|
|
||||||
fields(host_id, remote_host_id, task_id, piece_id, piece_length)
|
|
||||||
)]
|
|
||||||
async fn download_cache_piece(
|
|
||||||
&self,
|
|
||||||
_request: Request<DownloadCachePieceRequest>,
|
|
||||||
) -> Result<Response<DownloadCachePieceResponse>, Status> {
|
|
||||||
todo!();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// DfdaemonUploadClient is a wrapper of DfdaemonUploadGRPCClient.
|
/// DfdaemonUploadClient is a wrapper of DfdaemonUploadGRPCClient.
|
||||||
|
|
|
@ -231,7 +231,7 @@ impl Proxy {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// handler handles the request from the client.
|
/// handler handles the request from the client.
|
||||||
#[instrument(skip_all, fields(url, method, remote_ip))]
|
#[instrument(skip_all, fields(uri, method, remote_ip))]
|
||||||
pub async fn handler(
|
pub async fn handler(
|
||||||
config: Arc<Config>,
|
config: Arc<Config>,
|
||||||
task: Arc<Task>,
|
task: Arc<Task>,
|
||||||
|
@ -241,8 +241,8 @@ pub async fn handler(
|
||||||
server_ca_cert: Arc<Option<Certificate>>,
|
server_ca_cert: Arc<Option<Certificate>>,
|
||||||
remote_ip: std::net::IpAddr,
|
remote_ip: std::net::IpAddr,
|
||||||
) -> ClientResult<Response> {
|
) -> ClientResult<Response> {
|
||||||
// Span record the url and method.
|
// Span record the uri and method.
|
||||||
Span::current().record("url", request.uri().to_string().as_str());
|
Span::current().record("uri", request.uri().to_string().as_str());
|
||||||
Span::current().record("method", request.method().as_str());
|
Span::current().record("method", request.method().as_str());
|
||||||
Span::current().record("remote_ip", remote_ip.to_string().as_str());
|
Span::current().record("remote_ip", remote_ip.to_string().as_str());
|
||||||
|
|
||||||
|
@ -556,7 +556,7 @@ async fn upgraded_tunnel(
|
||||||
|
|
||||||
/// upgraded_handler handles the upgraded https request from the client.
|
/// upgraded_handler handles the upgraded https request from the client.
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
#[instrument(skip_all, fields(url, method))]
|
#[instrument(skip_all, fields(uri, method))]
|
||||||
pub async fn upgraded_handler(
|
pub async fn upgraded_handler(
|
||||||
config: Arc<Config>,
|
config: Arc<Config>,
|
||||||
task: Arc<Task>,
|
task: Arc<Task>,
|
||||||
|
@ -567,8 +567,8 @@ pub async fn upgraded_handler(
|
||||||
dfdaemon_download_client: DfdaemonDownloadClient,
|
dfdaemon_download_client: DfdaemonDownloadClient,
|
||||||
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
|
registry_cert: Arc<Option<Vec<CertificateDer<'static>>>>,
|
||||||
) -> ClientResult<Response> {
|
) -> ClientResult<Response> {
|
||||||
// Span record the url and method.
|
// Span record the uri and method.
|
||||||
Span::current().record("url", request.uri().to_string().as_str());
|
Span::current().record("uri", request.uri().to_string().as_str());
|
||||||
Span::current().record("method", request.method().as_str());
|
Span::current().record("method", request.method().as_str());
|
||||||
|
|
||||||
// Authenticate the request with the basic auth.
|
// Authenticate the request with the basic auth.
|
||||||
|
@ -1102,6 +1102,7 @@ fn make_download_task_request(
|
||||||
hdfs: None,
|
hdfs: None,
|
||||||
is_prefetch: false,
|
is_prefetch: false,
|
||||||
need_piece_content: false,
|
need_piece_content: false,
|
||||||
|
load_to_cache: false,
|
||||||
force_hard_link: header::get_force_hard_link(&header),
|
force_hard_link: header::get_force_hard_link(&header),
|
||||||
content_for_calculating_task_id: header::get_content_for_calculating_task_id(&header),
|
content_for_calculating_task_id: header::get_content_for_calculating_task_id(&header),
|
||||||
remote_ip: Some(remote_ip.to_string()),
|
remote_ip: Some(remote_ip.to_string()),
|
||||||
|
|
|
@ -412,6 +412,7 @@ impl Piece {
|
||||||
length: u64,
|
length: u64,
|
||||||
parent: piece_collector::CollectedParent,
|
parent: piece_collector::CollectedParent,
|
||||||
is_prefetch: bool,
|
is_prefetch: bool,
|
||||||
|
load_to_cache: bool,
|
||||||
) -> Result<metadata::Piece> {
|
) -> Result<metadata::Piece> {
|
||||||
// Span record the piece_id.
|
// Span record the piece_id.
|
||||||
Span::current().record("piece_id", piece_id);
|
Span::current().record("piece_id", piece_id);
|
||||||
|
@ -476,6 +477,7 @@ impl Piece {
|
||||||
digest.as_str(),
|
digest.as_str(),
|
||||||
parent.id.as_str(),
|
parent.id.as_str(),
|
||||||
&mut reader,
|
&mut reader,
|
||||||
|
load_to_cache,
|
||||||
self.config.storage.write_piece_timeout,
|
self.config.storage.write_piece_timeout,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -513,6 +515,7 @@ impl Piece {
|
||||||
length: u64,
|
length: u64,
|
||||||
request_header: HeaderMap,
|
request_header: HeaderMap,
|
||||||
is_prefetch: bool,
|
is_prefetch: bool,
|
||||||
|
load_to_cache: bool,
|
||||||
object_storage: Option<ObjectStorage>,
|
object_storage: Option<ObjectStorage>,
|
||||||
hdfs: Option<Hdfs>,
|
hdfs: Option<Hdfs>,
|
||||||
) -> Result<metadata::Piece> {
|
) -> Result<metadata::Piece> {
|
||||||
|
@ -638,6 +641,7 @@ impl Piece {
|
||||||
offset,
|
offset,
|
||||||
length,
|
length,
|
||||||
&mut response.reader,
|
&mut response.reader,
|
||||||
|
load_to_cache,
|
||||||
self.config.storage.write_piece_timeout,
|
self.config.storage.write_piece_timeout,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
|
|
@ -248,7 +248,13 @@ impl Task {
|
||||||
|
|
||||||
let task = self
|
let task = self
|
||||||
.storage
|
.storage
|
||||||
.download_task_started(id, piece_length, content_length, response.http_header)
|
.download_task_started(
|
||||||
|
id,
|
||||||
|
piece_length,
|
||||||
|
content_length,
|
||||||
|
response.http_header,
|
||||||
|
request.load_to_cache,
|
||||||
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
// Attempt to create a hard link from the task file to the output path.
|
// Attempt to create a hard link from the task file to the output path.
|
||||||
|
@ -734,6 +740,7 @@ impl Task {
|
||||||
remaining_interested_pieces.clone(),
|
remaining_interested_pieces.clone(),
|
||||||
request.is_prefetch,
|
request.is_prefetch,
|
||||||
request.need_piece_content,
|
request.need_piece_content,
|
||||||
|
request.load_to_cache,
|
||||||
download_progress_tx.clone(),
|
download_progress_tx.clone(),
|
||||||
in_stream_tx.clone(),
|
in_stream_tx.clone(),
|
||||||
)
|
)
|
||||||
|
@ -977,6 +984,7 @@ impl Task {
|
||||||
interested_pieces: Vec<metadata::Piece>,
|
interested_pieces: Vec<metadata::Piece>,
|
||||||
is_prefetch: bool,
|
is_prefetch: bool,
|
||||||
need_piece_content: bool,
|
need_piece_content: bool,
|
||||||
|
load_to_cache: bool,
|
||||||
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
||||||
in_stream_tx: Sender<AnnouncePeerRequest>,
|
in_stream_tx: Sender<AnnouncePeerRequest>,
|
||||||
) -> ClientResult<Vec<metadata::Piece>> {
|
) -> ClientResult<Vec<metadata::Piece>> {
|
||||||
|
@ -1037,6 +1045,7 @@ impl Task {
|
||||||
finished_pieces: Arc<Mutex<Vec<metadata::Piece>>>,
|
finished_pieces: Arc<Mutex<Vec<metadata::Piece>>>,
|
||||||
is_prefetch: bool,
|
is_prefetch: bool,
|
||||||
need_piece_content: bool,
|
need_piece_content: bool,
|
||||||
|
load_to_cache: bool,
|
||||||
) -> ClientResult<metadata::Piece> {
|
) -> ClientResult<metadata::Piece> {
|
||||||
// Limit the concurrent piece count.
|
// Limit the concurrent piece count.
|
||||||
let _permit = semaphore.acquire().await.unwrap();
|
let _permit = semaphore.acquire().await.unwrap();
|
||||||
|
@ -1057,6 +1066,7 @@ impl Task {
|
||||||
length,
|
length,
|
||||||
parent.clone(),
|
parent.clone(),
|
||||||
is_prefetch,
|
is_prefetch,
|
||||||
|
load_to_cache,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
|
@ -1190,6 +1200,7 @@ impl Task {
|
||||||
finished_pieces.clone(),
|
finished_pieces.clone(),
|
||||||
is_prefetch,
|
is_prefetch,
|
||||||
need_piece_content,
|
need_piece_content,
|
||||||
|
load_to_cache,
|
||||||
)
|
)
|
||||||
.in_current_span(),
|
.in_current_span(),
|
||||||
);
|
);
|
||||||
|
@ -1303,6 +1314,7 @@ impl Task {
|
||||||
request_header: HeaderMap,
|
request_header: HeaderMap,
|
||||||
is_prefetch: bool,
|
is_prefetch: bool,
|
||||||
need_piece_content: bool,
|
need_piece_content: bool,
|
||||||
|
load_to_cache: bool,
|
||||||
piece_manager: Arc<piece::Piece>,
|
piece_manager: Arc<piece::Piece>,
|
||||||
semaphore: Arc<Semaphore>,
|
semaphore: Arc<Semaphore>,
|
||||||
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
||||||
|
@ -1326,6 +1338,7 @@ impl Task {
|
||||||
length,
|
length,
|
||||||
request_header,
|
request_header,
|
||||||
is_prefetch,
|
is_prefetch,
|
||||||
|
load_to_cache,
|
||||||
object_storage,
|
object_storage,
|
||||||
hdfs,
|
hdfs,
|
||||||
)
|
)
|
||||||
|
@ -1430,6 +1443,7 @@ impl Task {
|
||||||
request_header.clone(),
|
request_header.clone(),
|
||||||
request.is_prefetch,
|
request.is_prefetch,
|
||||||
request.need_piece_content,
|
request.need_piece_content,
|
||||||
|
request.load_to_cache,
|
||||||
self.piece.clone(),
|
self.piece.clone(),
|
||||||
semaphore.clone(),
|
semaphore.clone(),
|
||||||
download_progress_tx.clone(),
|
download_progress_tx.clone(),
|
||||||
|
@ -1696,6 +1710,7 @@ impl Task {
|
||||||
length: u64,
|
length: u64,
|
||||||
request_header: HeaderMap,
|
request_header: HeaderMap,
|
||||||
is_prefetch: bool,
|
is_prefetch: bool,
|
||||||
|
load_to_cache: bool,
|
||||||
piece_manager: Arc<piece::Piece>,
|
piece_manager: Arc<piece::Piece>,
|
||||||
semaphore: Arc<Semaphore>,
|
semaphore: Arc<Semaphore>,
|
||||||
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
||||||
|
@ -1718,6 +1733,7 @@ impl Task {
|
||||||
length,
|
length,
|
||||||
request_header,
|
request_header,
|
||||||
is_prefetch,
|
is_prefetch,
|
||||||
|
load_to_cache,
|
||||||
object_storage,
|
object_storage,
|
||||||
hdfs,
|
hdfs,
|
||||||
)
|
)
|
||||||
|
@ -1776,6 +1792,7 @@ impl Task {
|
||||||
interested_piece.length,
|
interested_piece.length,
|
||||||
request_header.clone(),
|
request_header.clone(),
|
||||||
request.is_prefetch,
|
request.is_prefetch,
|
||||||
|
request.load_to_cache,
|
||||||
self.piece.clone(),
|
self.piece.clone(),
|
||||||
semaphore.clone(),
|
semaphore.clone(),
|
||||||
download_progress_tx.clone(),
|
download_progress_tx.clone(),
|
||||||
|
@ -1969,7 +1986,7 @@ mod tests {
|
||||||
// Create a task and save it to storage.
|
// Create a task and save it to storage.
|
||||||
let task_id = "test-task-id";
|
let task_id = "test-task-id";
|
||||||
storage
|
storage
|
||||||
.download_task_started(task_id, 1024, 4096, None)
|
.download_task_started(task_id, 1024, 4096, None, false)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
|
|
@ -102,16 +102,8 @@ pub fn init_tracing(
|
||||||
let env_filter = EnvFilter::try_from_default_env()
|
let env_filter = EnvFilter::try_from_default_env()
|
||||||
.unwrap_or_else(|_| EnvFilter::default().add_directive(log_level.into()));
|
.unwrap_or_else(|_| EnvFilter::default().add_directive(log_level.into()));
|
||||||
|
|
||||||
// Enable console subscriber layer for tracing spawn tasks on `127.0.0.1:6669` when log level is TRACE.
|
|
||||||
let console_subscriber_layer = if log_level == Level::TRACE {
|
|
||||||
Some(console_subscriber::spawn())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let subscriber = Registry::default()
|
let subscriber = Registry::default()
|
||||||
.with(env_filter)
|
.with(env_filter)
|
||||||
.with(console_subscriber_layer)
|
|
||||||
.with(file_logging_layer)
|
.with(file_logging_layer)
|
||||||
.with(stdout_logging_layer);
|
.with(stdout_logging_layer);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue