refactor(downloader): streamline download concurrency handling (#1341)
* style(error): update error message formatting Signed-off-by: Gaius <gaius.qi@gmail.com> * refactor(downloader): streamline download concurrency handling Signed-off-by: Gaius <gaius.qi@gmail.com> --------- Signed-off-by: Gaius <gaius.qi@gmail.com>
This commit is contained in:
parent
af354b7e59
commit
79fa36ca97
|
|
@ -986,7 +986,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client"
|
name = "dragonfly-client"
|
||||||
version = "1.0.17"
|
version = "1.0.18"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
|
@ -1059,7 +1059,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-backend"
|
name = "dragonfly-client-backend"
|
||||||
version = "1.0.17"
|
version = "1.0.18"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"dragonfly-api",
|
"dragonfly-api",
|
||||||
"dragonfly-client-core",
|
"dragonfly-client-core",
|
||||||
|
|
@ -1090,7 +1090,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-config"
|
name = "dragonfly-client-config"
|
||||||
version = "1.0.17"
|
version = "1.0.18"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytesize",
|
"bytesize",
|
||||||
"bytesize-serde",
|
"bytesize-serde",
|
||||||
|
|
@ -1120,7 +1120,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-core"
|
name = "dragonfly-client-core"
|
||||||
version = "1.0.17"
|
version = "1.0.18"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"headers 0.4.1",
|
"headers 0.4.1",
|
||||||
"hyper 1.6.0",
|
"hyper 1.6.0",
|
||||||
|
|
@ -1138,7 +1138,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-init"
|
name = "dragonfly-client-init"
|
||||||
version = "1.0.17"
|
version = "1.0.18"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"clap",
|
"clap",
|
||||||
|
|
@ -1155,7 +1155,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-storage"
|
name = "dragonfly-client-storage"
|
||||||
version = "1.0.17"
|
version = "1.0.18"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bincode",
|
"bincode",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
|
@ -1182,7 +1182,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dragonfly-client-util"
|
name = "dragonfly-client-util"
|
||||||
version = "1.0.17"
|
version = "1.0.18"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"bytesize",
|
"bytesize",
|
||||||
|
|
@ -1593,7 +1593,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hdfs"
|
name = "hdfs"
|
||||||
version = "1.0.17"
|
version = "1.0.18"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"dragonfly-client-backend",
|
"dragonfly-client-backend",
|
||||||
"dragonfly-client-core",
|
"dragonfly-client-core",
|
||||||
|
|
|
||||||
16
Cargo.toml
16
Cargo.toml
|
|
@ -12,7 +12,7 @@ members = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.0.17"
|
version = "1.0.18"
|
||||||
authors = ["The Dragonfly Developers"]
|
authors = ["The Dragonfly Developers"]
|
||||||
homepage = "https://d7y.io/"
|
homepage = "https://d7y.io/"
|
||||||
repository = "https://github.com/dragonflyoss/client.git"
|
repository = "https://github.com/dragonflyoss/client.git"
|
||||||
|
|
@ -22,13 +22,13 @@ readme = "README.md"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
dragonfly-client = { path = "dragonfly-client", version = "1.0.17" }
|
dragonfly-client = { path = "dragonfly-client", version = "1.0.18" }
|
||||||
dragonfly-client-core = { path = "dragonfly-client-core", version = "1.0.17" }
|
dragonfly-client-core = { path = "dragonfly-client-core", version = "1.0.18" }
|
||||||
dragonfly-client-config = { path = "dragonfly-client-config", version = "1.0.17" }
|
dragonfly-client-config = { path = "dragonfly-client-config", version = "1.0.18" }
|
||||||
dragonfly-client-storage = { path = "dragonfly-client-storage", version = "1.0.17" }
|
dragonfly-client-storage = { path = "dragonfly-client-storage", version = "1.0.18" }
|
||||||
dragonfly-client-backend = { path = "dragonfly-client-backend", version = "1.0.17" }
|
dragonfly-client-backend = { path = "dragonfly-client-backend", version = "1.0.18" }
|
||||||
dragonfly-client-util = { path = "dragonfly-client-util", version = "1.0.17" }
|
dragonfly-client-util = { path = "dragonfly-client-util", version = "1.0.18" }
|
||||||
dragonfly-client-init = { path = "dragonfly-client-init", version = "1.0.17" }
|
dragonfly-client-init = { path = "dragonfly-client-init", version = "1.0.18" }
|
||||||
dragonfly-api = "=2.1.62"
|
dragonfly-api = "=2.1.62"
|
||||||
thiserror = "2.0"
|
thiserror = "2.0"
|
||||||
futures = "0.3.31"
|
futures = "0.3.31"
|
||||||
|
|
|
||||||
|
|
@ -926,7 +926,7 @@ mod tests {
|
||||||
assert!(result.is_err());
|
assert!(result.is_err());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
result.unwrap_err().to_string(),
|
result.unwrap_err().to_string(),
|
||||||
"backend error s3 need object_storage parameter"
|
"backend error: s3 need object_storage parameter"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -935,28 +935,28 @@ mod tests {
|
||||||
let test_cases = vec![
|
let test_cases = vec![
|
||||||
(
|
(
|
||||||
ObjectStorageInfo::default(),
|
ObjectStorageInfo::default(),
|
||||||
"backend error s3 need access_key_id, access_key_secret, region",
|
"backend error: s3 need access_key_id, access_key_secret, region",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
access_key_id: Some("access_key_id".into()),
|
access_key_id: Some("access_key_id".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error s3 need access_key_secret, region",
|
"backend error: s3 need access_key_secret, region",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
access_key_secret: Some("access_key_secret".into()),
|
access_key_secret: Some("access_key_secret".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error s3 need access_key_id, region",
|
"backend error: s3 need access_key_id, region",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
region: Some("test-region".into()),
|
region: Some("test-region".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error s3 need access_key_id, access_key_secret",
|
"backend error: s3 need access_key_id, access_key_secret",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -964,7 +964,7 @@ mod tests {
|
||||||
access_key_secret: Some("access_key_secret".into()),
|
access_key_secret: Some("access_key_secret".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error s3 need region",
|
"backend error: s3 need region",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -972,7 +972,7 @@ mod tests {
|
||||||
region: Some("test-region".into()),
|
region: Some("test-region".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error s3 need access_key_secret",
|
"backend error: s3 need access_key_secret",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -980,7 +980,7 @@ mod tests {
|
||||||
region: Some("test-region".into()),
|
region: Some("test-region".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error s3 need access_key_id",
|
"backend error: s3 need access_key_id",
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
@ -1004,28 +1004,28 @@ mod tests {
|
||||||
let test_cases = vec![
|
let test_cases = vec![
|
||||||
(
|
(
|
||||||
ObjectStorageInfo::default(),
|
ObjectStorageInfo::default(),
|
||||||
"backend error abs need access_key_id, access_key_secret, endpoint",
|
"backend error: abs need access_key_id, access_key_secret, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
access_key_id: Some("access_key_id".into()),
|
access_key_id: Some("access_key_id".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error abs need access_key_secret, endpoint",
|
"backend error: abs need access_key_secret, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
access_key_secret: Some("access_key_secret".into()),
|
access_key_secret: Some("access_key_secret".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error abs need access_key_id, endpoint",
|
"backend error: abs need access_key_id, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error abs need access_key_id, access_key_secret",
|
"backend error: abs need access_key_id, access_key_secret",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1033,7 +1033,7 @@ mod tests {
|
||||||
access_key_secret: Some("access_key_secret".into()),
|
access_key_secret: Some("access_key_secret".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error abs need endpoint",
|
"backend error: abs need endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1041,7 +1041,7 @@ mod tests {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error abs need access_key_secret",
|
"backend error: abs need access_key_secret",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1049,7 +1049,7 @@ mod tests {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error abs need access_key_id",
|
"backend error: abs need access_key_id",
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
@ -1073,28 +1073,28 @@ mod tests {
|
||||||
let test_cases = vec![
|
let test_cases = vec![
|
||||||
(
|
(
|
||||||
ObjectStorageInfo::default(),
|
ObjectStorageInfo::default(),
|
||||||
"backend error oss need access_key_id, access_key_secret, endpoint",
|
"backend error: oss need access_key_id, access_key_secret, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
access_key_id: Some("access_key_id".into()),
|
access_key_id: Some("access_key_id".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error oss need access_key_secret, endpoint",
|
"backend error: oss need access_key_secret, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
access_key_secret: Some("access_key_secret".into()),
|
access_key_secret: Some("access_key_secret".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error oss need access_key_id, endpoint",
|
"backend error: oss need access_key_id, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error oss need access_key_id, access_key_secret",
|
"backend error: oss need access_key_id, access_key_secret",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1102,7 +1102,7 @@ mod tests {
|
||||||
access_key_secret: Some("access_key_secret".into()),
|
access_key_secret: Some("access_key_secret".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error oss need endpoint",
|
"backend error: oss need endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1110,7 +1110,7 @@ mod tests {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error oss need access_key_secret",
|
"backend error: oss need access_key_secret",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1118,7 +1118,7 @@ mod tests {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error oss need access_key_id",
|
"backend error: oss need access_key_id",
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
@ -1142,28 +1142,28 @@ mod tests {
|
||||||
let test_cases = vec![
|
let test_cases = vec![
|
||||||
(
|
(
|
||||||
ObjectStorageInfo::default(),
|
ObjectStorageInfo::default(),
|
||||||
"backend error obs need access_key_id, access_key_secret, endpoint",
|
"backend error: obs need access_key_id, access_key_secret, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
access_key_id: Some("access_key_id".into()),
|
access_key_id: Some("access_key_id".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error obs need access_key_secret, endpoint",
|
"backend error: obs need access_key_secret, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
access_key_secret: Some("access_key_secret".into()),
|
access_key_secret: Some("access_key_secret".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error obs need access_key_id, endpoint",
|
"backend error: obs need access_key_id, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error obs need access_key_id, access_key_secret",
|
"backend error: obs need access_key_id, access_key_secret",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1171,7 +1171,7 @@ mod tests {
|
||||||
access_key_secret: Some("access_key_secret".into()),
|
access_key_secret: Some("access_key_secret".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error obs need endpoint",
|
"backend error: obs need endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1179,7 +1179,7 @@ mod tests {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error obs need access_key_secret",
|
"backend error: obs need access_key_secret",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1187,7 +1187,7 @@ mod tests {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error obs need access_key_id",
|
"backend error: obs need access_key_id",
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
@ -1211,28 +1211,28 @@ mod tests {
|
||||||
let test_cases = vec![
|
let test_cases = vec![
|
||||||
(
|
(
|
||||||
ObjectStorageInfo::default(),
|
ObjectStorageInfo::default(),
|
||||||
"backend error cos need access_key_id, access_key_secret, endpoint",
|
"backend error: cos need access_key_id, access_key_secret, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
access_key_id: Some("access_key_id".into()),
|
access_key_id: Some("access_key_id".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error cos need access_key_secret, endpoint",
|
"backend error: cos need access_key_secret, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
access_key_secret: Some("access_key_secret".into()),
|
access_key_secret: Some("access_key_secret".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error cos need access_key_id, endpoint",
|
"backend error: cos need access_key_id, endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error cos need access_key_id, access_key_secret",
|
"backend error: cos need access_key_id, access_key_secret",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1240,7 +1240,7 @@ mod tests {
|
||||||
access_key_secret: Some("access_key_secret".into()),
|
access_key_secret: Some("access_key_secret".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error cos need endpoint",
|
"backend error: cos need endpoint",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1248,7 +1248,7 @@ mod tests {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error cos need access_key_secret",
|
"backend error: cos need access_key_secret",
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
ObjectStorageInfo {
|
ObjectStorageInfo {
|
||||||
|
|
@ -1256,7 +1256,7 @@ mod tests {
|
||||||
endpoint: Some("test-endpoint.local".into()),
|
endpoint: Some("test-endpoint.local".into()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
},
|
},
|
||||||
"backend error cos need access_key_id",
|
"backend error: cos need access_key_id",
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -159,7 +159,7 @@ impl<T, E> OrErr<T, E> for Result<T, E> {
|
||||||
|
|
||||||
/// BackendError is the error for backend.
|
/// BackendError is the error for backend.
|
||||||
#[derive(Debug, thiserror::Error)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
#[error("backend error {message}")]
|
#[error("backend error: {message}")]
|
||||||
pub struct BackendError {
|
pub struct BackendError {
|
||||||
/// message is the error message.
|
/// message is the error message.
|
||||||
pub message: String,
|
pub message: String,
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,8 @@
|
||||||
|
|
||||||
use dragonfly_client_core::Result;
|
use dragonfly_client_core::Result;
|
||||||
use tokio::fs;
|
use tokio::fs;
|
||||||
|
|
||||||
|
#[cfg(target_os = "linux")]
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
/// fallocate allocates the space for the file and fills it with zero, only on Linux.
|
/// fallocate allocates the space for the file and fills it with zero, only on Linux.
|
||||||
|
|
|
||||||
|
|
@ -201,7 +201,7 @@ struct Args {
|
||||||
short = 'I',
|
short = 'I',
|
||||||
long = "include-files",
|
long = "include-files",
|
||||||
required = false,
|
required = false,
|
||||||
help = "Filter files to download in a directory using glob patterns relative to the root URL's path. Examples: --include-files='file.txt' --include-files='subdir/file.txt' --include-files='subdir/dir/'"
|
help = "Filter files to download in a directory using glob patterns relative to the root URL's path. Examples: --include-files file.txt --include-files subdir/file.txt --include-files subdir/dir/"
|
||||||
)]
|
)]
|
||||||
include_files: Option<Vec<String>>,
|
include_files: Option<Vec<String>>,
|
||||||
|
|
||||||
|
|
@ -264,7 +264,7 @@ struct Args {
|
||||||
|
|
||||||
#[arg(
|
#[arg(
|
||||||
long,
|
long,
|
||||||
default_value_t = 5,
|
default_value_t = 1,
|
||||||
help = "Specify the max count of concurrent download files when downloading a directory"
|
help = "Specify the max count of concurrent download files when downloading a directory"
|
||||||
)]
|
)]
|
||||||
max_concurrent_requests: usize,
|
max_concurrent_requests: usize,
|
||||||
|
|
@ -709,23 +709,12 @@ async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Re
|
||||||
entry_args.url = entry_url;
|
entry_args.url = entry_url;
|
||||||
|
|
||||||
let progress_bar = multi_progress_bar.add(ProgressBar::new(0));
|
let progress_bar = multi_progress_bar.add(ProgressBar::new(0));
|
||||||
async fn download_entry(
|
let download_client = download_client.clone();
|
||||||
args: Args,
|
let permit = semaphore.clone().acquire_owned().await.unwrap();
|
||||||
progress_bar: ProgressBar,
|
join_set.spawn(async move {
|
||||||
download_client: DfdaemonDownloadClient,
|
let _permit = permit;
|
||||||
semaphore: Arc<Semaphore>,
|
download(entry_args, progress_bar, download_client).await
|
||||||
) -> Result<()> {
|
});
|
||||||
// Limit the concurrent download tasks.
|
|
||||||
let _permit = semaphore.acquire().await.unwrap();
|
|
||||||
download(args, progress_bar, download_client).await
|
|
||||||
}
|
|
||||||
|
|
||||||
join_set.spawn(download_entry(
|
|
||||||
entry_args,
|
|
||||||
progress_bar,
|
|
||||||
download_client.clone(),
|
|
||||||
semaphore.clone(),
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -740,7 +729,7 @@ async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Re
|
||||||
Ok(_) => continue,
|
Ok(_) => continue,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("download entry failed: {}", err);
|
error!("download entry failed: {}", err);
|
||||||
join_set.abort_all();
|
join_set.shutdown().await;
|
||||||
return Err(err);
|
return Err(err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -272,7 +272,7 @@ impl PersistentCacheTask {
|
||||||
finished_pieces.push(metadata.clone());
|
finished_pieces.push(metadata.clone());
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
join_set.detach_all();
|
join_set.shutdown().await;
|
||||||
|
|
||||||
// Delete the persistent cache task.
|
// Delete the persistent cache task.
|
||||||
self.storage
|
self.storage
|
||||||
|
|
@ -1042,8 +1042,6 @@ impl PersistentCacheTask {
|
||||||
let semaphore = Arc::new(Semaphore::new(
|
let semaphore = Arc::new(Semaphore::new(
|
||||||
self.config.download.concurrent_piece_count as usize,
|
self.config.download.concurrent_piece_count as usize,
|
||||||
));
|
));
|
||||||
|
|
||||||
// Download the pieces from the parents.
|
|
||||||
while let Some(collect_piece) = piece_collector_rx.recv().await {
|
while let Some(collect_piece) = piece_collector_rx.recv().await {
|
||||||
if interrupt.load(Ordering::SeqCst) {
|
if interrupt.load(Ordering::SeqCst) {
|
||||||
// If the interrupt is true, break the collector loop.
|
// If the interrupt is true, break the collector loop.
|
||||||
|
|
@ -1061,15 +1059,11 @@ impl PersistentCacheTask {
|
||||||
need_piece_content: bool,
|
need_piece_content: bool,
|
||||||
parent: piece_collector::CollectedParent,
|
parent: piece_collector::CollectedParent,
|
||||||
piece_manager: Arc<super::piece::Piece>,
|
piece_manager: Arc<super::piece::Piece>,
|
||||||
semaphore: Arc<Semaphore>,
|
|
||||||
download_progress_tx: Sender<Result<DownloadPersistentCacheTaskResponse, Status>>,
|
download_progress_tx: Sender<Result<DownloadPersistentCacheTaskResponse, Status>>,
|
||||||
in_stream_tx: Sender<AnnouncePersistentCachePeerRequest>,
|
in_stream_tx: Sender<AnnouncePersistentCachePeerRequest>,
|
||||||
interrupt: Arc<AtomicBool>,
|
interrupt: Arc<AtomicBool>,
|
||||||
finished_pieces: Arc<Mutex<Vec<metadata::Piece>>>,
|
finished_pieces: Arc<Mutex<Vec<metadata::Piece>>>,
|
||||||
) -> ClientResult<metadata::Piece> {
|
) -> ClientResult<metadata::Piece> {
|
||||||
// Limit the concurrent download count.
|
|
||||||
let _permit = semaphore.acquire().await.unwrap();
|
|
||||||
|
|
||||||
let piece_id = piece_manager.persistent_cache_id(task_id.as_str(), number);
|
let piece_id = piece_manager.persistent_cache_id(task_id.as_str(), number);
|
||||||
info!(
|
info!(
|
||||||
"start to download persistent cache piece {} from parent {:?}",
|
"start to download persistent cache piece {} from parent {:?}",
|
||||||
|
|
@ -1203,24 +1197,34 @@ impl PersistentCacheTask {
|
||||||
Ok(metadata)
|
Ok(metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
join_set.spawn(
|
let task_id = task_id.to_string();
|
||||||
|
let host_id = host_id.to_string();
|
||||||
|
let peer_id = peer_id.to_string();
|
||||||
|
let piece_manager = self.piece.clone();
|
||||||
|
let download_progress_tx = download_progress_tx.clone();
|
||||||
|
let in_stream_tx = in_stream_tx.clone();
|
||||||
|
let interrupt = interrupt.clone();
|
||||||
|
let finished_pieces = finished_pieces.clone();
|
||||||
|
let permit = semaphore.clone().acquire_owned().await.unwrap();
|
||||||
|
join_set.spawn(async move {
|
||||||
|
let _permit = permit;
|
||||||
download_from_parent(
|
download_from_parent(
|
||||||
task.id.clone(),
|
task_id,
|
||||||
host_id.to_string(),
|
host_id,
|
||||||
peer_id.to_string(),
|
peer_id,
|
||||||
collect_piece.number,
|
collect_piece.number,
|
||||||
collect_piece.length,
|
collect_piece.length,
|
||||||
need_piece_content,
|
need_piece_content,
|
||||||
collect_piece.parent.clone(),
|
collect_piece.parent.clone(),
|
||||||
self.piece.clone(),
|
piece_manager,
|
||||||
semaphore.clone(),
|
download_progress_tx,
|
||||||
download_progress_tx.clone(),
|
in_stream_tx,
|
||||||
in_stream_tx.clone(),
|
interrupt,
|
||||||
interrupt.clone(),
|
finished_pieces,
|
||||||
finished_pieces.clone(),
|
|
||||||
)
|
)
|
||||||
.in_current_span(),
|
.in_current_span()
|
||||||
);
|
.await
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the pieces to be downloaded.
|
// Wait for the pieces to be downloaded.
|
||||||
|
|
@ -1233,7 +1237,7 @@ impl PersistentCacheTask {
|
||||||
match message {
|
match message {
|
||||||
Ok(_) => {}
|
Ok(_) => {}
|
||||||
Err(Error::DownloadFromParentFailed(err)) => {
|
Err(Error::DownloadFromParentFailed(err)) => {
|
||||||
join_set.detach_all();
|
join_set.shutdown().await;
|
||||||
|
|
||||||
// Send the download piece failed request.
|
// Send the download piece failed request.
|
||||||
let (piece_number, parent_id) = (err.piece_number, err.parent_id.clone());
|
let (piece_number, parent_id) = (err.piece_number, err.parent_id.clone());
|
||||||
|
|
@ -1263,14 +1267,14 @@ impl PersistentCacheTask {
|
||||||
return Err(Error::DownloadFromParentFailed(err));
|
return Err(Error::DownloadFromParentFailed(err));
|
||||||
}
|
}
|
||||||
Err(Error::SendTimeout) => {
|
Err(Error::SendTimeout) => {
|
||||||
join_set.detach_all();
|
join_set.shutdown().await;
|
||||||
|
|
||||||
// If the send timeout with scheduler or download progress, return the error
|
// If the send timeout with scheduler or download progress, return the error
|
||||||
// and interrupt the collector.
|
// and interrupt the collector.
|
||||||
return Err(Error::SendTimeout);
|
return Err(Error::SendTimeout);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
join_set.detach_all();
|
join_set.shutdown().await;
|
||||||
error!("download from parent error: {:?}", err);
|
error!("download from parent error: {:?}", err);
|
||||||
return Err(err);
|
return Err(err);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -290,7 +290,7 @@ impl PieceCollector {
|
||||||
// If all pieces are collected, abort all tasks.
|
// If all pieces are collected, abort all tasks.
|
||||||
if collected_pieces.is_empty() {
|
if collected_pieces.is_empty() {
|
||||||
info!("all pieces are collected, abort all tasks");
|
info!("all pieces are collected, abort all tasks");
|
||||||
join_set.abort_all();
|
join_set.shutdown().await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(Err(err)) => {
|
Ok(Err(err)) => {
|
||||||
|
|
@ -550,7 +550,7 @@ impl PersistentCachePieceCollector {
|
||||||
// If all pieces are collected, abort all tasks.
|
// If all pieces are collected, abort all tasks.
|
||||||
if collected_pieces.is_empty() {
|
if collected_pieces.is_empty() {
|
||||||
info!("all persistent cache pieces are collected, abort all tasks");
|
info!("all persistent cache pieces are collected, abort all tasks");
|
||||||
join_set.abort_all();
|
join_set.shutdown().await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(Err(err)) => {
|
Ok(Err(err)) => {
|
||||||
|
|
|
||||||
|
|
@ -1012,8 +1012,6 @@ impl Task {
|
||||||
let semaphore = Arc::new(Semaphore::new(
|
let semaphore = Arc::new(Semaphore::new(
|
||||||
self.config.download.concurrent_piece_count as usize,
|
self.config.download.concurrent_piece_count as usize,
|
||||||
));
|
));
|
||||||
|
|
||||||
// Download the pieces from the parents.
|
|
||||||
while let Some(collect_piece) = piece_collector_rx.recv().await {
|
while let Some(collect_piece) = piece_collector_rx.recv().await {
|
||||||
if interrupt.load(Ordering::SeqCst) {
|
if interrupt.load(Ordering::SeqCst) {
|
||||||
// If the interrupt is true, break the collector loop.
|
// If the interrupt is true, break the collector loop.
|
||||||
|
|
@ -1030,7 +1028,6 @@ impl Task {
|
||||||
length: u64,
|
length: u64,
|
||||||
parent: piece_collector::CollectedParent,
|
parent: piece_collector::CollectedParent,
|
||||||
piece_manager: Arc<piece::Piece>,
|
piece_manager: Arc<piece::Piece>,
|
||||||
semaphore: Arc<Semaphore>,
|
|
||||||
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
||||||
in_stream_tx: Sender<AnnouncePeerRequest>,
|
in_stream_tx: Sender<AnnouncePeerRequest>,
|
||||||
interrupt: Arc<AtomicBool>,
|
interrupt: Arc<AtomicBool>,
|
||||||
|
|
@ -1038,9 +1035,6 @@ impl Task {
|
||||||
is_prefetch: bool,
|
is_prefetch: bool,
|
||||||
need_piece_content: bool,
|
need_piece_content: bool,
|
||||||
) -> ClientResult<metadata::Piece> {
|
) -> ClientResult<metadata::Piece> {
|
||||||
// Limit the concurrent piece count.
|
|
||||||
let _permit = semaphore.acquire().await.unwrap();
|
|
||||||
|
|
||||||
let piece_id = piece_manager.id(task_id.as_str(), number);
|
let piece_id = piece_manager.id(task_id.as_str(), number);
|
||||||
info!(
|
info!(
|
||||||
"start to download piece {} from parent {:?}",
|
"start to download piece {} from parent {:?}",
|
||||||
|
|
@ -1174,25 +1168,35 @@ impl Task {
|
||||||
Ok(metadata)
|
Ok(metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
join_set.spawn(
|
let task_id = task_id.to_string();
|
||||||
|
let host_id = host_id.to_string();
|
||||||
|
let peer_id = peer_id.to_string();
|
||||||
|
let piece_manager = self.piece.clone();
|
||||||
|
let download_progress_tx = download_progress_tx.clone();
|
||||||
|
let in_stream_tx = in_stream_tx.clone();
|
||||||
|
let interrupt = interrupt.clone();
|
||||||
|
let finished_pieces = finished_pieces.clone();
|
||||||
|
let permit = semaphore.clone().acquire_owned().await.unwrap();
|
||||||
|
join_set.spawn(async move {
|
||||||
|
let _permit = permit;
|
||||||
download_from_parent(
|
download_from_parent(
|
||||||
task_id.to_string(),
|
task_id,
|
||||||
host_id.to_string(),
|
host_id,
|
||||||
peer_id.to_string(),
|
peer_id,
|
||||||
collect_piece.number,
|
collect_piece.number,
|
||||||
collect_piece.length,
|
collect_piece.length,
|
||||||
collect_piece.parent.clone(),
|
collect_piece.parent.clone(),
|
||||||
self.piece.clone(),
|
piece_manager,
|
||||||
semaphore.clone(),
|
download_progress_tx,
|
||||||
download_progress_tx.clone(),
|
in_stream_tx,
|
||||||
in_stream_tx.clone(),
|
interrupt,
|
||||||
interrupt.clone(),
|
finished_pieces,
|
||||||
finished_pieces.clone(),
|
|
||||||
is_prefetch,
|
is_prefetch,
|
||||||
need_piece_content,
|
need_piece_content,
|
||||||
)
|
)
|
||||||
.in_current_span(),
|
.in_current_span()
|
||||||
);
|
.await
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the pieces to be downloaded.
|
// Wait for the pieces to be downloaded.
|
||||||
|
|
@ -1240,7 +1244,7 @@ impl Task {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Err(Error::SendTimeout) => {
|
Err(Error::SendTimeout) => {
|
||||||
join_set.detach_all();
|
join_set.shutdown().await;
|
||||||
|
|
||||||
// If the send timeout with scheduler or download progress, return the finished pieces.
|
// If the send timeout with scheduler or download progress, return the finished pieces.
|
||||||
// It will stop the download from the parent with scheduler
|
// It will stop the download from the parent with scheduler
|
||||||
|
|
@ -1304,15 +1308,11 @@ impl Task {
|
||||||
is_prefetch: bool,
|
is_prefetch: bool,
|
||||||
need_piece_content: bool,
|
need_piece_content: bool,
|
||||||
piece_manager: Arc<piece::Piece>,
|
piece_manager: Arc<piece::Piece>,
|
||||||
semaphore: Arc<Semaphore>,
|
|
||||||
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
||||||
in_stream_tx: Sender<AnnouncePeerRequest>,
|
in_stream_tx: Sender<AnnouncePeerRequest>,
|
||||||
object_storage: Option<ObjectStorage>,
|
object_storage: Option<ObjectStorage>,
|
||||||
hdfs: Option<Hdfs>,
|
hdfs: Option<Hdfs>,
|
||||||
) -> ClientResult<metadata::Piece> {
|
) -> ClientResult<metadata::Piece> {
|
||||||
// Limit the concurrent download count.
|
|
||||||
let _permit = semaphore.acquire().await.unwrap();
|
|
||||||
|
|
||||||
let piece_id = piece_manager.id(task_id.as_str(), number);
|
let piece_id = piece_manager.id(task_id.as_str(), number);
|
||||||
info!("start to download piece {} from source", piece_id);
|
info!("start to download piece {} from source", piece_id);
|
||||||
|
|
||||||
|
|
@ -1418,27 +1418,39 @@ impl Task {
|
||||||
Ok(metadata)
|
Ok(metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
join_set.spawn(
|
let task_id = task_id.to_string();
|
||||||
|
let host_id = host_id.to_string();
|
||||||
|
let peer_id = peer_id.to_string();
|
||||||
|
let url = request.url.clone();
|
||||||
|
let request_header = request_header.clone();
|
||||||
|
let piece_manager = self.piece.clone();
|
||||||
|
let download_progress_tx = download_progress_tx.clone();
|
||||||
|
let in_stream_tx = in_stream_tx.clone();
|
||||||
|
let object_storage = request.object_storage.clone();
|
||||||
|
let hdfs = request.hdfs.clone();
|
||||||
|
let permit = semaphore.clone().acquire_owned().await.unwrap();
|
||||||
|
join_set.spawn(async move {
|
||||||
|
let _permit = permit;
|
||||||
download_from_source(
|
download_from_source(
|
||||||
task_id.to_string(),
|
task_id,
|
||||||
host_id.to_string(),
|
host_id,
|
||||||
peer_id.to_string(),
|
peer_id,
|
||||||
interested_piece.number,
|
interested_piece.number,
|
||||||
request.url.clone(),
|
url,
|
||||||
interested_piece.offset,
|
interested_piece.offset,
|
||||||
interested_piece.length,
|
interested_piece.length,
|
||||||
request_header.clone(),
|
request_header,
|
||||||
request.is_prefetch,
|
request.is_prefetch,
|
||||||
request.need_piece_content,
|
request.need_piece_content,
|
||||||
self.piece.clone(),
|
piece_manager,
|
||||||
semaphore.clone(),
|
download_progress_tx,
|
||||||
download_progress_tx.clone(),
|
in_stream_tx,
|
||||||
in_stream_tx.clone(),
|
object_storage,
|
||||||
request.object_storage.clone(),
|
hdfs,
|
||||||
request.hdfs.clone(),
|
|
||||||
)
|
)
|
||||||
.in_current_span(),
|
.in_current_span()
|
||||||
);
|
.await
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the pieces to be downloaded.
|
// Wait for the pieces to be downloaded.
|
||||||
|
|
@ -1454,7 +1466,7 @@ impl Task {
|
||||||
finished_pieces.push(metadata.clone());
|
finished_pieces.push(metadata.clone());
|
||||||
}
|
}
|
||||||
Err(Error::BackendError(err)) => {
|
Err(Error::BackendError(err)) => {
|
||||||
join_set.detach_all();
|
join_set.shutdown().await;
|
||||||
|
|
||||||
// Send the download piece http failed request.
|
// Send the download piece http failed request.
|
||||||
in_stream_tx.send_timeout(AnnouncePeerRequest {
|
in_stream_tx.send_timeout(AnnouncePeerRequest {
|
||||||
|
|
@ -1483,7 +1495,7 @@ impl Task {
|
||||||
return Err(Error::BackendError(err));
|
return Err(Error::BackendError(err));
|
||||||
}
|
}
|
||||||
Err(Error::SendTimeout) => {
|
Err(Error::SendTimeout) => {
|
||||||
join_set.detach_all();
|
join_set.shutdown().await;
|
||||||
|
|
||||||
// Send the download piece failed request.
|
// Send the download piece failed request.
|
||||||
in_stream_tx.send_timeout(AnnouncePeerRequest {
|
in_stream_tx.send_timeout(AnnouncePeerRequest {
|
||||||
|
|
@ -1510,7 +1522,7 @@ impl Task {
|
||||||
return Ok(finished_pieces);
|
return Ok(finished_pieces);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
join_set.detach_all();
|
join_set.shutdown().await;
|
||||||
|
|
||||||
// Send the download piece failed request.
|
// Send the download piece failed request.
|
||||||
in_stream_tx.send_timeout(AnnouncePeerRequest {
|
in_stream_tx.send_timeout(AnnouncePeerRequest {
|
||||||
|
|
@ -1684,8 +1696,7 @@ impl Task {
|
||||||
let semaphore = Arc::new(Semaphore::new(
|
let semaphore = Arc::new(Semaphore::new(
|
||||||
self.config.download.concurrent_piece_count as usize,
|
self.config.download.concurrent_piece_count as usize,
|
||||||
));
|
));
|
||||||
|
for interested_piece in interested_pieces.clone() {
|
||||||
for interested_piece in &interested_pieces {
|
|
||||||
async fn download_from_source(
|
async fn download_from_source(
|
||||||
task_id: String,
|
task_id: String,
|
||||||
host_id: String,
|
host_id: String,
|
||||||
|
|
@ -1698,14 +1709,10 @@ impl Task {
|
||||||
is_prefetch: bool,
|
is_prefetch: bool,
|
||||||
need_piece_content: bool,
|
need_piece_content: bool,
|
||||||
piece_manager: Arc<piece::Piece>,
|
piece_manager: Arc<piece::Piece>,
|
||||||
semaphore: Arc<Semaphore>,
|
|
||||||
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
download_progress_tx: Sender<Result<DownloadTaskResponse, Status>>,
|
||||||
object_storage: Option<ObjectStorage>,
|
object_storage: Option<ObjectStorage>,
|
||||||
hdfs: Option<Hdfs>,
|
hdfs: Option<Hdfs>,
|
||||||
) -> ClientResult<metadata::Piece> {
|
) -> ClientResult<metadata::Piece> {
|
||||||
// Limit the concurrent download count.
|
|
||||||
let _permit = semaphore.acquire().await.unwrap();
|
|
||||||
|
|
||||||
let piece_id = piece_manager.id(task_id.as_str(), number);
|
let piece_id = piece_manager.id(task_id.as_str(), number);
|
||||||
info!("start to download piece {} from source", piece_id);
|
info!("start to download piece {} from source", piece_id);
|
||||||
|
|
||||||
|
|
@ -1790,26 +1797,37 @@ impl Task {
|
||||||
Ok(metadata)
|
Ok(metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
join_set.spawn(
|
let task_id = task_id.to_string();
|
||||||
|
let host_id = host_id.to_string();
|
||||||
|
let peer_id = peer_id.to_string();
|
||||||
|
let url = request.url.clone();
|
||||||
|
let request_header = request_header.clone();
|
||||||
|
let piece_manager = self.piece.clone();
|
||||||
|
let download_progress_tx = download_progress_tx.clone();
|
||||||
|
let object_storage = request.object_storage.clone();
|
||||||
|
let hdfs = request.hdfs.clone();
|
||||||
|
let permit = semaphore.clone().acquire_owned().await.unwrap();
|
||||||
|
join_set.spawn(async move {
|
||||||
|
let _permit = permit;
|
||||||
download_from_source(
|
download_from_source(
|
||||||
task_id.to_string(),
|
task_id,
|
||||||
host_id.to_string(),
|
host_id,
|
||||||
peer_id.to_string(),
|
peer_id,
|
||||||
interested_piece.number,
|
interested_piece.number,
|
||||||
request.url.clone(),
|
url,
|
||||||
interested_piece.offset,
|
interested_piece.offset,
|
||||||
interested_piece.length,
|
interested_piece.length,
|
||||||
request_header.clone(),
|
request_header,
|
||||||
request.is_prefetch,
|
request.is_prefetch,
|
||||||
request.need_piece_content,
|
request.need_piece_content,
|
||||||
self.piece.clone(),
|
piece_manager,
|
||||||
semaphore.clone(),
|
download_progress_tx,
|
||||||
download_progress_tx.clone(),
|
object_storage,
|
||||||
request.object_storage.clone(),
|
hdfs,
|
||||||
request.hdfs.clone(),
|
|
||||||
)
|
)
|
||||||
.in_current_span(),
|
.in_current_span()
|
||||||
);
|
.await
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the pieces to be downloaded.
|
// Wait for the pieces to be downloaded.
|
||||||
|
|
@ -1825,7 +1843,7 @@ impl Task {
|
||||||
finished_pieces.push(metadata.clone());
|
finished_pieces.push(metadata.clone());
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
join_set.detach_all();
|
join_set.shutdown().await;
|
||||||
|
|
||||||
// If the download failed from the source, return the error.
|
// If the download failed from the source, return the error.
|
||||||
// It will stop the download from the source.
|
// It will stop the download from the source.
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue