refactor: change comments to rust style (#742)
Signed-off-by: Gaius <gaius.qi@gmail.com>
This commit is contained in:
parent
d9f15bdbfe
commit
e2209a8e61
|
|
@ -17,39 +17,39 @@
|
|||
use dragonfly_client_backend::{Backend, Body, GetRequest, GetResponse, HeadRequest, HeadResponse};
|
||||
use dragonfly_client_core::{Error, Result};
|
||||
|
||||
// Hdfs is a struct that implements the Backend trait
|
||||
/// Hdfs is a struct that implements the Backend trait
|
||||
struct Hdfs;
|
||||
|
||||
// Hdfs implements the Backend trait
|
||||
/// Hdfs implements the Backend trait
|
||||
impl Hdfs {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
// Implement the Backend trait for Hdfs.
|
||||
/// Implement the Backend trait for Hdfs.
|
||||
#[tonic::async_trait]
|
||||
impl Backend for Hdfs {
|
||||
// scheme returns the scheme of the backend.
|
||||
/// scheme returns the scheme of the backend.
|
||||
fn scheme(&self) -> String {
|
||||
"hdfs".to_string()
|
||||
}
|
||||
|
||||
// head is an async function that takes a HeadRequest and returns a HeadResponse.
|
||||
/// head is an async function that takes a HeadRequest and returns a HeadResponse.
|
||||
async fn head(&self, request: HeadRequest) -> Result<HeadResponse> {
|
||||
println!("HDFS head url: {}", request.url);
|
||||
Err(Error::Unimplemented)
|
||||
}
|
||||
|
||||
// get is an async function that takes a GetRequest and returns a GetResponse.
|
||||
/// get is an async function that takes a GetRequest and returns a GetResponse.
|
||||
async fn get(&self, request: GetRequest) -> Result<GetResponse<Body>> {
|
||||
println!("HDFS get url: {}", request.url);
|
||||
Err(Error::Unimplemented)
|
||||
}
|
||||
}
|
||||
|
||||
// register_plugin is a function that returns a Box<dyn Backend + Send + Sync>.
|
||||
// This function is used to register the HDFS plugin to the Backend.
|
||||
/// register_plugin is a function that returns a Box<dyn Backend + Send + Sync>.
|
||||
/// This function is used to register the HDFS plugin to the Backend.
|
||||
#[no_mangle]
|
||||
pub fn register_plugin() -> Box<dyn Backend + Send + Sync> {
|
||||
Box::new(Hdfs::new())
|
||||
|
|
|
|||
|
|
@ -22,15 +22,15 @@ use std::io::{Error as IOError, ErrorKind};
|
|||
use tokio_util::io::StreamReader;
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
// HTTP is the HTTP backend.
|
||||
/// HTTP is the HTTP backend.
|
||||
pub struct HTTP {
|
||||
// scheme is the scheme of the HTTP backend.
|
||||
/// scheme is the scheme of the HTTP backend.
|
||||
scheme: String,
|
||||
}
|
||||
|
||||
// HTTP implements the http interface.
|
||||
/// HTTP implements the http interface.
|
||||
impl HTTP {
|
||||
// new returns a new HTTP.
|
||||
/// new returns a new HTTP.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(scheme: &str) -> HTTP {
|
||||
Self {
|
||||
|
|
@ -38,7 +38,7 @@ impl HTTP {
|
|||
}
|
||||
}
|
||||
|
||||
// client returns a new reqwest client.
|
||||
/// client returns a new reqwest client.
|
||||
#[instrument(skip_all)]
|
||||
fn client(
|
||||
&self,
|
||||
|
|
@ -68,16 +68,16 @@ impl HTTP {
|
|||
}
|
||||
}
|
||||
|
||||
// Backend implements the Backend trait.
|
||||
/// Backend implements the Backend trait.
|
||||
#[tonic::async_trait]
|
||||
impl super::Backend for HTTP {
|
||||
// scheme returns the scheme of the HTTP backend.
|
||||
/// scheme returns the scheme of the HTTP backend.
|
||||
#[instrument(skip_all)]
|
||||
fn scheme(&self) -> String {
|
||||
self.scheme.clone()
|
||||
}
|
||||
|
||||
// head gets the header of the request.
|
||||
/// head gets the header of the request.
|
||||
#[instrument(skip_all)]
|
||||
async fn head(&self, request: super::HeadRequest) -> Result<super::HeadResponse> {
|
||||
info!(
|
||||
|
|
@ -124,7 +124,7 @@ impl super::Backend for HTTP {
|
|||
})
|
||||
}
|
||||
|
||||
// get gets the content of the request.
|
||||
/// get gets the content of the request.
|
||||
#[instrument(skip_all)]
|
||||
async fn get(&self, request: super::GetRequest) -> Result<super::GetResponse<super::Body>> {
|
||||
info!(
|
||||
|
|
@ -171,9 +171,9 @@ impl super::Backend for HTTP {
|
|||
}
|
||||
}
|
||||
|
||||
// Default implements the Default trait.
|
||||
/// Default implements the Default trait.
|
||||
impl Default for HTTP {
|
||||
// default returns a new default HTTP.
|
||||
/// default returns a new default HTTP.
|
||||
fn default() -> Self {
|
||||
Self::new("http")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,104 +32,104 @@ use url::Url;
|
|||
pub mod http;
|
||||
pub mod object_storage;
|
||||
|
||||
// NAME is the name of the package.
|
||||
/// NAME is the name of the package.
|
||||
pub const NAME: &str = "backend";
|
||||
|
||||
// Body is the body of the response.
|
||||
/// Body is the body of the response.
|
||||
pub type Body = Box<dyn AsyncRead + Send + Unpin>;
|
||||
|
||||
// HeadRequest is the head request for backend.
|
||||
/// HeadRequest is the head request for backend.
|
||||
pub struct HeadRequest {
|
||||
// task_id is the id of the task.
|
||||
/// task_id is the id of the task.
|
||||
pub task_id: String,
|
||||
|
||||
// url is the url of the request.
|
||||
/// url is the url of the request.
|
||||
pub url: String,
|
||||
|
||||
// http_header is the headers of the request.
|
||||
/// http_header is the headers of the request.
|
||||
pub http_header: Option<HeaderMap>,
|
||||
|
||||
// timeout is the timeout of the request.
|
||||
/// timeout is the timeout of the request.
|
||||
pub timeout: Duration,
|
||||
|
||||
// client_certs is the client certificates for the request.
|
||||
/// client_certs is the client certificates for the request.
|
||||
pub client_certs: Option<Vec<CertificateDer<'static>>>,
|
||||
|
||||
// object_storage is the object storage related information.
|
||||
/// object_storage is the object storage related information.
|
||||
pub object_storage: Option<ObjectStorage>,
|
||||
}
|
||||
|
||||
// HeadResponse is the head response for backend.
|
||||
/// HeadResponse is the head response for backend.
|
||||
#[derive(Debug)]
|
||||
pub struct HeadResponse {
|
||||
// success is the success of the response.
|
||||
/// success is the success of the response.
|
||||
pub success: bool,
|
||||
|
||||
// content_length is the content length of the response.
|
||||
/// content_length is the content length of the response.
|
||||
pub content_length: Option<u64>,
|
||||
|
||||
// http_header is the headers of the response.
|
||||
/// http_header is the headers of the response.
|
||||
pub http_header: Option<HeaderMap>,
|
||||
|
||||
// http_status_code is the status code of the response.
|
||||
/// http_status_code is the status code of the response.
|
||||
pub http_status_code: Option<reqwest::StatusCode>,
|
||||
|
||||
// Entries is the information of the entries in the directory.
|
||||
/// Entries is the information of the entries in the directory.
|
||||
pub entries: Vec<DirEntry>,
|
||||
|
||||
// error_message is the error message of the response.
|
||||
/// error_message is the error message of the response.
|
||||
pub error_message: Option<String>,
|
||||
}
|
||||
|
||||
// GetRequest is the get request for backend.
|
||||
/// GetRequest is the get request for backend.
|
||||
pub struct GetRequest {
|
||||
// task_id is the id of the task.
|
||||
/// task_id is the id of the task.
|
||||
pub task_id: String,
|
||||
|
||||
// piece_id is the id of the piece.
|
||||
/// piece_id is the id of the piece.
|
||||
pub piece_id: String,
|
||||
|
||||
// url is the url of the request.
|
||||
/// url is the url of the request.
|
||||
pub url: String,
|
||||
|
||||
// range is the range of the request.
|
||||
/// range is the range of the request.
|
||||
pub range: Option<Range>,
|
||||
|
||||
// http_header is the headers of the request.
|
||||
/// http_header is the headers of the request.
|
||||
pub http_header: Option<HeaderMap>,
|
||||
|
||||
// timeout is the timeout of the request.
|
||||
/// timeout is the timeout of the request.
|
||||
pub timeout: Duration,
|
||||
|
||||
// client_certs is the client certificates for the request.
|
||||
/// client_certs is the client certificates for the request.
|
||||
pub client_certs: Option<Vec<CertificateDer<'static>>>,
|
||||
|
||||
// the object storage related information.
|
||||
/// the object storage related information.
|
||||
pub object_storage: Option<ObjectStorage>,
|
||||
}
|
||||
|
||||
// GetResponse is the get response for backend.
|
||||
/// GetResponse is the get response for backend.
|
||||
pub struct GetResponse<R>
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
// success is the success of the response.
|
||||
/// success is the success of the response.
|
||||
pub success: bool,
|
||||
|
||||
// http_header is the headers of the response.
|
||||
/// http_header is the headers of the response.
|
||||
pub http_header: Option<HeaderMap>,
|
||||
|
||||
// http_status_code is the status code of the response.
|
||||
/// http_status_code is the status code of the response.
|
||||
pub http_status_code: Option<reqwest::StatusCode>,
|
||||
|
||||
// body is the content of the response.
|
||||
/// body is the content of the response.
|
||||
pub reader: R,
|
||||
|
||||
// error_message is the error message of the response.
|
||||
/// error_message is the error message of the response.
|
||||
pub error_message: Option<String>,
|
||||
}
|
||||
|
||||
// GetResponse implements the response functions.
|
||||
/// GetResponse implements the response functions.
|
||||
impl<R> GetResponse<R>
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
|
|
@ -146,64 +146,64 @@ where
|
|||
/// The File Entry of a directory, including some relevant file metadata.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct DirEntry {
|
||||
// url is the url of the entry.
|
||||
/// url is the url of the entry.
|
||||
pub url: String,
|
||||
|
||||
// content_length is the content length of the entry.
|
||||
/// content_length is the content length of the entry.
|
||||
pub content_length: usize,
|
||||
|
||||
// is_dir is the flag of the entry is a directory.
|
||||
/// is_dir is the flag of the entry is a directory.
|
||||
pub is_dir: bool,
|
||||
}
|
||||
|
||||
// Backend is the interface of the backend.
|
||||
/// Backend is the interface of the backend.
|
||||
#[tonic::async_trait]
|
||||
pub trait Backend {
|
||||
// scheme returns the scheme of the backend.
|
||||
/// scheme returns the scheme of the backend.
|
||||
fn scheme(&self) -> String;
|
||||
|
||||
// head gets the header of the request.
|
||||
/// head gets the header of the request.
|
||||
async fn head(&self, request: HeadRequest) -> Result<HeadResponse>;
|
||||
|
||||
// get gets the content of the request.
|
||||
/// get gets the content of the request.
|
||||
async fn get(&self, request: GetRequest) -> Result<GetResponse<Body>>;
|
||||
}
|
||||
|
||||
// BackendFactory is the factory of the backend.
|
||||
/// BackendFactory is the factory of the backend.
|
||||
#[derive(Default)]
|
||||
pub struct BackendFactory {
|
||||
// backends is the backends of the factory, including the plugin backends and
|
||||
// the builtin backends.
|
||||
/// backends is the backends of the factory, including the plugin backends and
|
||||
/// the builtin backends.
|
||||
backends: HashMap<String, Box<dyn Backend + Send + Sync>>,
|
||||
// libraries is used to store the plugin's dynamic library, because when not saving the `Library`,
|
||||
// it will drop when out of scope, resulting in the null pointer error.
|
||||
/// libraries is used to store the plugin's dynamic library, because when not saving the `Library`,
|
||||
/// it will drop when out of scope, resulting in the null pointer error.
|
||||
libraries: Vec<Library>,
|
||||
}
|
||||
|
||||
// BackendFactory implements the factory of the backend. It supports loading builtin
|
||||
// backends and plugin backends.
|
||||
//
|
||||
// The builtin backends are http, https, etc, which are implemented
|
||||
// by the HTTP struct.
|
||||
//
|
||||
// The plugin backends are shared libraries, which are loaded
|
||||
// by the `register_plugin` function. The file name of the shared
|
||||
// library is the scheme of the backend. The shared library
|
||||
// should implement the Backend trait. Default plugin directory
|
||||
// is `/var/lib/dragonfly/plugins/` in linux and `~/.dragonfly/plugins`
|
||||
// in macos. The plugin directory can be set by the dfdaemon configuration.
|
||||
//
|
||||
// For example:
|
||||
// If implement a plugin backend named `hdfs`, the shared library
|
||||
// should be named `libhdfs.so` or `libhdfs.dylib` and move the file to the backend plugin directory
|
||||
// `/var/lib/dragonfly/plugins/backend/` in linux or `~/.dragonfly/plugins/backend/`
|
||||
// in macos. When the dfdaemon starts, it will load the `hdfs` plugin backend in the
|
||||
// backend plugin directory. So the dfdaemon or dfget can use the `hdfs` plugin backend
|
||||
// to download the file by the url `hdfs://example.com/file`.
|
||||
// The backend plugin implementation can refer to
|
||||
// https://github.com/dragonflyoss/client/tree/main/dragonfly-client-backend/examples/plugin/.
|
||||
/// BackendFactory implements the factory of the backend. It supports loading builtin
|
||||
/// backends and plugin backends.
|
||||
///
|
||||
/// The builtin backends are http, https, etc, which are implemented
|
||||
/// by the HTTP struct.
|
||||
///
|
||||
/// The plugin backends are shared libraries, which are loaded
|
||||
/// by the `register_plugin` function. The file name of the shared
|
||||
/// library is the scheme of the backend. The shared library
|
||||
/// should implement the Backend trait. Default plugin directory
|
||||
/// is `/var/lib/dragonfly/plugins/` in linux and `~/.dragonfly/plugins`
|
||||
/// in macos. The plugin directory can be set by the dfdaemon configuration.
|
||||
///
|
||||
/// For example:
|
||||
/// If implement a plugin backend named `hdfs`, the shared library
|
||||
/// should be named `libhdfs.so` or `libhdfs.dylib` and move the file to the backend plugin directory
|
||||
/// `/var/lib/dragonfly/plugins/backend/` in linux or `~/.dragonfly/plugins/backend/`
|
||||
/// in macos. When the dfdaemon starts, it will load the `hdfs` plugin backend in the
|
||||
/// backend plugin directory. So the dfdaemon or dfget can use the `hdfs` plugin backend
|
||||
/// to download the file by the url `hdfs://example.com/file`.
|
||||
/// The backend plugin implementation can refer to
|
||||
/// https://github.com/dragonflyoss/client/tree/main/dragonfly-client-backend/examples/plugin/.
|
||||
impl BackendFactory {
|
||||
// new returns a new BackendFactory.
|
||||
/// new returns a new BackendFactory.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(plugin_dir: Option<&Path>) -> Result<Self> {
|
||||
let mut backend_factory = Self::default();
|
||||
|
|
@ -220,7 +220,7 @@ impl BackendFactory {
|
|||
Ok(backend_factory)
|
||||
}
|
||||
|
||||
// build returns the backend by the scheme of the url.
|
||||
/// build returns the backend by the scheme of the url.
|
||||
#[instrument(skip_all)]
|
||||
pub fn build(&self, url: &str) -> Result<&(dyn Backend + Send + Sync)> {
|
||||
let url = Url::parse(url).or_err(ErrorType::ParseError)?;
|
||||
|
|
@ -231,7 +231,7 @@ impl BackendFactory {
|
|||
.ok_or(Error::InvalidParameter)
|
||||
}
|
||||
|
||||
// load_builtin_backends loads the builtin backends.
|
||||
/// load_builtin_backends loads the builtin backends.
|
||||
#[instrument(skip_all)]
|
||||
fn load_builtin_backends(&mut self) {
|
||||
self.backends
|
||||
|
|
@ -291,7 +291,7 @@ impl BackendFactory {
|
|||
info!("load [cos] builtin backend");
|
||||
}
|
||||
|
||||
// load_plugin_backends loads the plugin backends.
|
||||
/// load_plugin_backends loads the plugin backends.
|
||||
#[instrument(skip_all)]
|
||||
fn load_plugin_backends(&mut self, plugin_dir: &Path) -> Result<()> {
|
||||
let backend_plugin_dir = plugin_dir.join(NAME);
|
||||
|
|
|
|||
|
|
@ -27,31 +27,31 @@ use tokio_util::io::StreamReader;
|
|||
use tracing::{error, info, instrument};
|
||||
use url::Url;
|
||||
|
||||
// Scheme is the scheme of the object storage.
|
||||
/// Scheme is the scheme of the object storage.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Scheme {
|
||||
// S3 is the Amazon Simple Storage Service.
|
||||
/// S3 is the Amazon Simple Storage Service.
|
||||
S3,
|
||||
|
||||
// GCS is the Google Cloud Storage Service.
|
||||
/// GCS is the Google Cloud Storage Service.
|
||||
GCS,
|
||||
|
||||
// ABS is the Azure Blob Storage Service.
|
||||
/// ABS is the Azure Blob Storage Service.
|
||||
ABS,
|
||||
|
||||
// OSS is the Aliyun Object Storage Service.
|
||||
/// OSS is the Aliyun Object Storage Service.
|
||||
OSS,
|
||||
|
||||
// OBS is the Huawei Cloud Object Storage Service.
|
||||
/// OBS is the Huawei Cloud Object Storage Service.
|
||||
OBS,
|
||||
|
||||
// COS is the Tencent Cloud Object Storage Service.
|
||||
/// COS is the Tencent Cloud Object Storage Service.
|
||||
COS,
|
||||
}
|
||||
|
||||
// Scheme implements the Display.
|
||||
/// Scheme implements the Display.
|
||||
impl fmt::Display for Scheme {
|
||||
// fmt formats the value using the given formatter.
|
||||
/// fmt formats the value using the given formatter.
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Scheme::S3 => write!(f, "s3"),
|
||||
|
|
@ -64,11 +64,11 @@ impl fmt::Display for Scheme {
|
|||
}
|
||||
}
|
||||
|
||||
// Scheme implements the FromStr.
|
||||
/// Scheme implements the FromStr.
|
||||
impl FromStr for Scheme {
|
||||
type Err = String;
|
||||
|
||||
// from_str parses an scheme string.
|
||||
/// from_str parses an scheme string.
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"s3" => Ok(Scheme::S3),
|
||||
|
|
@ -82,30 +82,30 @@ impl FromStr for Scheme {
|
|||
}
|
||||
}
|
||||
|
||||
// ParsedURL is a struct that contains the parsed URL, bucket, and path.
|
||||
/// ParsedURL is a struct that contains the parsed URL, bucket, and path.
|
||||
#[derive(Debug)]
|
||||
pub struct ParsedURL {
|
||||
// url is the requested URL of the object storage.
|
||||
/// url is the requested URL of the object storage.
|
||||
pub url: Url,
|
||||
|
||||
// scheme is the scheme of the object storage.
|
||||
/// scheme is the scheme of the object storage.
|
||||
pub scheme: Scheme,
|
||||
|
||||
// bucket is the bucket of the object storage.
|
||||
/// bucket is the bucket of the object storage.
|
||||
pub bucket: String,
|
||||
|
||||
// key is the key of the object storage.
|
||||
/// key is the key of the object storage.
|
||||
pub key: String,
|
||||
}
|
||||
|
||||
// ParsedURL implements the ParsedURL trait.
|
||||
/// ParsedURL implements the ParsedURL trait.
|
||||
impl ParsedURL {
|
||||
// is_dir returns true if the URL path ends with a slash.
|
||||
/// is_dir returns true if the URL path ends with a slash.
|
||||
pub fn is_dir(&self) -> bool {
|
||||
self.url.path().ends_with('/')
|
||||
}
|
||||
|
||||
// make_url_by_entry_path makes a URL by the entry path when the URL is a directory.
|
||||
/// make_url_by_entry_path makes a URL by the entry path when the URL is a directory.
|
||||
pub fn make_url_by_entry_path(&self, entry_path: &str) -> Url {
|
||||
let mut url = self.url.clone();
|
||||
url.set_path(entry_path);
|
||||
|
|
@ -113,13 +113,13 @@ impl ParsedURL {
|
|||
}
|
||||
}
|
||||
|
||||
// ParsedURL implements the TryFrom trait for the URL.
|
||||
//
|
||||
// The object storage URL should be in the format of `scheme://<bucket>/<path>`.
|
||||
/// ParsedURL implements the TryFrom trait for the URL.
|
||||
///
|
||||
/// The object storage URL should be in the format of `scheme://<bucket>/<path>`.
|
||||
impl TryFrom<Url> for ParsedURL {
|
||||
type Error = ClientError;
|
||||
|
||||
// try_from parses the URL and returns a ParsedURL.
|
||||
/// try_from parses the URL and returns a ParsedURL.
|
||||
fn try_from(url: Url) -> Result<Self, Self::Error> {
|
||||
// Get the bucket from the URL host.
|
||||
let bucket = url
|
||||
|
|
@ -150,7 +150,7 @@ impl TryFrom<Url> for ParsedURL {
|
|||
}
|
||||
}
|
||||
|
||||
// make_need_fields_message makes a message for the need fields in the object storage.
|
||||
/// make_need_fields_message makes a message for the need fields in the object storage.
|
||||
macro_rules! make_need_fields_message {
|
||||
($var:ident {$($field:ident),*}) => {{
|
||||
let mut need_fields: Vec<&'static str> = vec![];
|
||||
|
|
@ -165,21 +165,21 @@ macro_rules! make_need_fields_message {
|
|||
}};
|
||||
}
|
||||
|
||||
// ObjectStorage is a struct that implements the backend trait.
|
||||
/// ObjectStorage is a struct that implements the backend trait.
|
||||
pub struct ObjectStorage {
|
||||
// scheme is the scheme of the object storage.
|
||||
/// scheme is the scheme of the object storage.
|
||||
scheme: Scheme,
|
||||
}
|
||||
|
||||
// ObjectStorage implements the ObjectStorage trait.
|
||||
/// ObjectStorage implements the ObjectStorage trait.
|
||||
impl ObjectStorage {
|
||||
// Returns ObjectStorage that implements the Backend trait.
|
||||
/// Returns ObjectStorage that implements the Backend trait.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(scheme: Scheme) -> ObjectStorage {
|
||||
Self { scheme }
|
||||
}
|
||||
|
||||
// operator initializes the operator with the parsed URL and object storage.
|
||||
/// operator initializes the operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn operator(
|
||||
&self,
|
||||
|
|
@ -206,7 +206,7 @@ impl ObjectStorage {
|
|||
}
|
||||
}
|
||||
|
||||
// s3_operator initializes the S3 operator with the parsed URL and object storage.
|
||||
/// s3_operator initializes the S3 operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn s3_operator(
|
||||
&self,
|
||||
|
|
@ -260,7 +260,7 @@ impl ObjectStorage {
|
|||
Ok(Operator::new(builder)?.finish())
|
||||
}
|
||||
|
||||
// gcs_operator initializes the GCS operator with the parsed URL and object storage.
|
||||
/// gcs_operator initializes the GCS operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn gcs_operator(
|
||||
&self,
|
||||
|
|
@ -296,7 +296,7 @@ impl ObjectStorage {
|
|||
Ok(Operator::new(builder)?.finish())
|
||||
}
|
||||
|
||||
// abs_operator initializes the ABS operator with the parsed URL and object storage.
|
||||
/// abs_operator initializes the ABS operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn abs_operator(
|
||||
&self,
|
||||
|
|
@ -340,7 +340,7 @@ impl ObjectStorage {
|
|||
Ok(Operator::new(builder)?.finish())
|
||||
}
|
||||
|
||||
// oss_operator initializes the OSS operator with the parsed URL and object storage.
|
||||
/// oss_operator initializes the OSS operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn oss_operator(
|
||||
&self,
|
||||
|
|
@ -385,7 +385,7 @@ impl ObjectStorage {
|
|||
Ok(Operator::new(builder)?.finish())
|
||||
}
|
||||
|
||||
// obs_operator initializes the OBS operator with the parsed URL and object storage.
|
||||
/// obs_operator initializes the OBS operator with the parsed URL and object storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn obs_operator(
|
||||
&self,
|
||||
|
|
@ -429,7 +429,7 @@ impl ObjectStorage {
|
|||
Ok(Operator::new(builder)?.finish())
|
||||
}
|
||||
|
||||
// cos_operator initializes the COS operator with the parsed URL and object storage.
|
||||
/// cos_operator initializes the COS operator with the parsed URL and object storage.
|
||||
pub fn cos_operator(
|
||||
&self,
|
||||
parsed_url: &super::object_storage::ParsedURL,
|
||||
|
|
@ -473,16 +473,16 @@ impl ObjectStorage {
|
|||
}
|
||||
}
|
||||
|
||||
// Backend implements the Backend trait.
|
||||
/// Backend implements the Backend trait.
|
||||
#[tonic::async_trait]
|
||||
impl crate::Backend for ObjectStorage {
|
||||
// scheme returns the scheme of the object storage.
|
||||
/// scheme returns the scheme of the object storage.
|
||||
#[instrument(skip_all)]
|
||||
fn scheme(&self) -> String {
|
||||
self.scheme.to_string()
|
||||
}
|
||||
|
||||
//head gets the header of the request.
|
||||
/// head gets the header of the request.
|
||||
#[instrument(skip_all)]
|
||||
async fn head(&self, request: super::HeadRequest) -> ClientResult<super::HeadResponse> {
|
||||
info!(
|
||||
|
|
@ -568,7 +568,7 @@ impl crate::Backend for ObjectStorage {
|
|||
})
|
||||
}
|
||||
|
||||
// Returns content of requested file.
|
||||
/// Returns content of requested file.
|
||||
#[instrument(skip_all)]
|
||||
async fn get(
|
||||
&self,
|
||||
|
|
|
|||
|
|
@ -16,16 +16,16 @@
|
|||
|
||||
use std::path::PathBuf;
|
||||
|
||||
// NAME is the name of dfcache.
|
||||
/// NAME is the name of dfcache.
|
||||
pub const NAME: &str = "dfcache";
|
||||
|
||||
// default_dfcache_log_dir is the default log directory for dfcache.
|
||||
/// default_dfcache_log_dir is the default log directory for dfcache.
|
||||
#[inline]
|
||||
pub fn default_dfcache_log_dir() -> PathBuf {
|
||||
crate::default_log_dir().join(NAME)
|
||||
}
|
||||
|
||||
// default_dfcache_persistent_replica_count is the default replica count of the persistent cache task.
|
||||
/// default_dfcache_persistent_replica_count is the default replica count of the persistent cache task.
|
||||
#[inline]
|
||||
pub fn default_dfcache_persistent_replica_count() -> u64 {
|
||||
2
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -16,10 +16,10 @@
|
|||
|
||||
use std::path::PathBuf;
|
||||
|
||||
// NAME is the name of dfget.
|
||||
/// NAME is the name of dfget.
|
||||
pub const NAME: &str = "dfget";
|
||||
|
||||
// default_dfget_log_dir is the default log directory for dfget.
|
||||
/// default_dfget_log_dir is the default log directory for dfget.
|
||||
pub fn default_dfget_log_dir() -> PathBuf {
|
||||
crate::default_log_dir().join(NAME)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,40 +24,40 @@ use std::path::PathBuf;
|
|||
use tracing::{info, instrument};
|
||||
use validator::Validate;
|
||||
|
||||
// NAME is the name of dfinit.
|
||||
/// NAME is the name of dfinit.
|
||||
pub const NAME: &str = "dfinit";
|
||||
|
||||
// default_dfinit_config_path is the default config path for dfinit.
|
||||
/// default_dfinit_config_path is the default config path for dfinit.
|
||||
#[inline]
|
||||
pub fn default_dfinit_config_path() -> PathBuf {
|
||||
crate::default_config_dir().join("dfinit.yaml")
|
||||
}
|
||||
|
||||
// default_dfinit_log_dir is the default log directory for dfinit.
|
||||
/// default_dfinit_log_dir is the default log directory for dfinit.
|
||||
pub fn default_dfinit_log_dir() -> PathBuf {
|
||||
crate::default_log_dir().join(NAME)
|
||||
}
|
||||
|
||||
// default_container_runtime_containerd_config_path is the default containerd configuration path.
|
||||
/// default_container_runtime_containerd_config_path is the default containerd configuration path.
|
||||
#[inline]
|
||||
fn default_container_runtime_containerd_config_path() -> PathBuf {
|
||||
PathBuf::from("/etc/containerd/config.toml")
|
||||
}
|
||||
|
||||
// default_container_runtime_docker_config_path is the default docker configuration path.
|
||||
/// default_container_runtime_docker_config_path is the default docker configuration path.
|
||||
#[inline]
|
||||
fn default_container_runtime_docker_config_path() -> PathBuf {
|
||||
PathBuf::from("/etc/docker/daemon.json")
|
||||
}
|
||||
|
||||
// default_container_runtime_crio_config_path is the default cri-o configuration path.
|
||||
/// default_container_runtime_crio_config_path is the default cri-o configuration path.
|
||||
#[inline]
|
||||
fn default_container_runtime_crio_config_path() -> PathBuf {
|
||||
PathBuf::from("/etc/containers/registries.conf")
|
||||
}
|
||||
|
||||
// default_container_runtime_crio_unqualified_search_registries is the default unqualified search registries of cri-o,
|
||||
// refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings.
|
||||
/// default_container_runtime_crio_unqualified_search_registries is the default unqualified search registries of cri-o,
|
||||
/// refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings.
|
||||
#[inline]
|
||||
fn default_container_runtime_crio_unqualified_search_registries() -> Vec<String> {
|
||||
vec![
|
||||
|
|
@ -67,7 +67,7 @@ fn default_container_runtime_crio_unqualified_search_registries() -> Vec<String>
|
|||
]
|
||||
}
|
||||
|
||||
// default_proxy_addr is the default proxy address of dfdaemon.
|
||||
/// default_proxy_addr is the default proxy address of dfdaemon.
|
||||
#[inline]
|
||||
fn default_proxy_addr() -> String {
|
||||
format!(
|
||||
|
|
@ -77,95 +77,95 @@ fn default_proxy_addr() -> String {
|
|||
)
|
||||
}
|
||||
|
||||
// default_container_runtime_containerd_registry_host_capabilities is the default
|
||||
// capabilities of the containerd registry.
|
||||
/// default_container_runtime_containerd_registry_host_capabilities is the default
|
||||
/// capabilities of the containerd registry.
|
||||
#[inline]
|
||||
fn default_container_runtime_containerd_registry_capabilities() -> Vec<String> {
|
||||
vec!["pull".to_string(), "resolve".to_string()]
|
||||
}
|
||||
|
||||
// Registry is the registry configuration for containerd.
|
||||
/// Registry is the registry configuration for containerd.
|
||||
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
|
||||
#[serde(default, rename_all = "camelCase")]
|
||||
pub struct ContainerdRegistry {
|
||||
// host_namespace is the location where container images and artifacts are sourced,
|
||||
// refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#registry-host-namespace.
|
||||
// The registry host namespace portion is [registry_host_name|IP address][:port], such as
|
||||
// docker.io, ghcr.io, gcr.io, etc.
|
||||
/// host_namespace is the location where container images and artifacts are sourced,
|
||||
/// refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#registry-host-namespace.
|
||||
/// The registry host namespace portion is [registry_host_name|IP address][:port], such as
|
||||
/// docker.io, ghcr.io, gcr.io, etc.
|
||||
pub host_namespace: String,
|
||||
|
||||
// server_addr specifies the default server for this registry host namespace, refer to
|
||||
// https://github.com/containerd/containerd/blob/main/docs/hosts.md#server-field.
|
||||
/// server_addr specifies the default server for this registry host namespace, refer to
|
||||
/// https://github.com/containerd/containerd/blob/main/docs/hosts.md#server-field.
|
||||
pub server_addr: String,
|
||||
|
||||
// capabilities is the list of capabilities in containerd configuration, refer to
|
||||
// https://github.com/containerd/containerd/blob/main/docs/hosts.md#capabilities-field.
|
||||
/// capabilities is the list of capabilities in containerd configuration, refer to
|
||||
/// https://github.com/containerd/containerd/blob/main/docs/hosts.md#capabilities-field.
|
||||
#[serde(default = "default_container_runtime_containerd_registry_capabilities")]
|
||||
pub capabilities: Vec<String>,
|
||||
|
||||
// skip_verify is the flag to skip verifying the server's certificate, refer to
|
||||
// https://github.com/containerd/containerd/blob/main/docs/hosts.md#bypass-tls-verification-example.
|
||||
/// skip_verify is the flag to skip verifying the server's certificate, refer to
|
||||
/// https://github.com/containerd/containerd/blob/main/docs/hosts.md#bypass-tls-verification-example.
|
||||
pub skip_verify: Option<bool>,
|
||||
|
||||
// ca (Certificate Authority Certification) can be set to a path or an array of paths each pointing
|
||||
// to a ca file for use in authenticating with the registry namespace, refer to
|
||||
// https://github.com/containerd/containerd/blob/main/docs/hosts.md#ca-field.
|
||||
/// ca (Certificate Authority Certification) can be set to a path or an array of paths each pointing
|
||||
/// to a ca file for use in authenticating with the registry namespace, refer to
|
||||
/// https://github.com/containerd/containerd/blob/main/docs/hosts.md#ca-field.
|
||||
pub ca: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
// Containerd is the containerd configuration for dfinit.
|
||||
/// Containerd is the containerd configuration for dfinit.
|
||||
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
|
||||
#[serde(default, rename_all = "camelCase")]
|
||||
pub struct Containerd {
|
||||
// config_path is the path of containerd configuration file.
|
||||
/// config_path is the path of containerd configuration file.
|
||||
#[serde(default = "default_container_runtime_containerd_config_path")]
|
||||
pub config_path: PathBuf,
|
||||
|
||||
// registries is the list of containerd registries.
|
||||
/// registries is the list of containerd registries.
|
||||
pub registries: Vec<ContainerdRegistry>,
|
||||
}
|
||||
|
||||
// CRIORegistry is the registry configuration for cri-o.
|
||||
/// CRIORegistry is the registry configuration for cri-o.
|
||||
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize, PartialEq, Eq)]
|
||||
#[serde(default, rename_all = "camelCase")]
|
||||
pub struct CRIORegistry {
|
||||
// prefix is the prefix of the user-specified image name, refer to
|
||||
// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table.
|
||||
/// prefix is the prefix of the user-specified image name, refer to
|
||||
/// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table.
|
||||
pub prefix: String,
|
||||
|
||||
// location accepts the same format as the prefix field, and specifies the physical location of the prefix-rooted namespace,
|
||||
// refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#remapping-and-mirroring-registries.
|
||||
/// location accepts the same format as the prefix field, and specifies the physical location of the prefix-rooted namespace,
|
||||
/// refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#remapping-and-mirroring-registries.
|
||||
pub location: String,
|
||||
}
|
||||
|
||||
// CRIO is the cri-o configuration for dfinit.
|
||||
/// CRIO is the cri-o configuration for dfinit.
|
||||
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
|
||||
#[serde(default, rename_all = "camelCase")]
|
||||
pub struct CRIO {
|
||||
// config_path is the path of cri-o registries's configuration file.
|
||||
/// config_path is the path of cri-o registries's configuration file.
|
||||
#[serde(default = "default_container_runtime_crio_config_path")]
|
||||
pub config_path: PathBuf,
|
||||
|
||||
// unqualified_search_registries is an array of host[:port] registries to try when pulling an unqualified image, in order.
|
||||
// Refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings.
|
||||
/// unqualified_search_registries is an array of host[:port] registries to try when pulling an unqualified image, in order.
|
||||
/// Refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings.
|
||||
#[serde(default = "default_container_runtime_crio_unqualified_search_registries")]
|
||||
pub unqualified_search_registries: Vec<String>,
|
||||
|
||||
// registries is the list of cri-o registries, refer to
|
||||
// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#namespaced-registry-settings.
|
||||
/// registries is the list of cri-o registries, refer to
|
||||
/// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#namespaced-registry-settings.
|
||||
pub registries: Vec<CRIORegistry>,
|
||||
}
|
||||
|
||||
// Docker is the docker configuration for dfinit.
|
||||
/// Docker is the docker configuration for dfinit.
|
||||
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
|
||||
#[serde(default, rename_all = "camelCase")]
|
||||
pub struct Docker {
|
||||
// config_path is the path of docker configuration file.
|
||||
/// config_path is the path of docker configuration file.
|
||||
#[serde(default = "default_container_runtime_docker_config_path")]
|
||||
pub config_path: PathBuf,
|
||||
}
|
||||
|
||||
// ContainerRuntime is the container runtime configuration for dfinit.
|
||||
/// ContainerRuntime is the container runtime configuration for dfinit.
|
||||
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
|
||||
#[serde(default, rename_all = "camelCase")]
|
||||
pub struct ContainerRuntime {
|
||||
|
|
@ -173,7 +173,7 @@ pub struct ContainerRuntime {
|
|||
pub config: Option<ContainerRuntimeConfig>,
|
||||
}
|
||||
|
||||
// ContainerRuntimeConfig is the container runtime configuration for dfinit.
|
||||
/// ContainerRuntimeConfig is the container runtime configuration for dfinit.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ContainerRuntimeConfig {
|
||||
Containerd(Containerd),
|
||||
|
|
@ -181,7 +181,7 @@ pub enum ContainerRuntimeConfig {
|
|||
CRIO(CRIO),
|
||||
}
|
||||
|
||||
// Serialize is the implementation of the Serialize trait for ContainerRuntimeConfig.
|
||||
/// Serialize is the implementation of the Serialize trait for ContainerRuntimeConfig.
|
||||
impl Serialize for ContainerRuntimeConfig {
|
||||
fn serialize<S>(&self, serializer: S) -> std::prelude::v1::Result<S::Ok, S::Error>
|
||||
where
|
||||
|
|
@ -207,7 +207,7 @@ impl Serialize for ContainerRuntimeConfig {
|
|||
}
|
||||
}
|
||||
|
||||
// Deserialize is the implementation of the Deserialize trait for ContainerRuntimeConfig.
|
||||
/// Deserialize is the implementation of the Deserialize trait for ContainerRuntimeConfig.
|
||||
impl<'de> Deserialize<'de> for ContainerRuntimeConfig {
|
||||
fn deserialize<D>(deserializer: D) -> std::prelude::v1::Result<Self, D::Error>
|
||||
where
|
||||
|
|
@ -241,7 +241,7 @@ impl<'de> Deserialize<'de> for ContainerRuntimeConfig {
|
|||
}
|
||||
}
|
||||
|
||||
// Proxy is the proxy server configuration for dfdaemon.
|
||||
/// Proxy is the proxy server configuration for dfdaemon.
|
||||
#[derive(Debug, Clone, Validate, Deserialize, Serialize)]
|
||||
#[serde(default, rename_all = "camelCase")]
|
||||
pub struct Proxy {
|
||||
|
|
@ -250,7 +250,7 @@ pub struct Proxy {
|
|||
pub addr: String,
|
||||
}
|
||||
|
||||
// Proxy implements Default.
|
||||
/// Proxy implements Default.
|
||||
impl Default for Proxy {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
|
|
@ -259,22 +259,22 @@ impl Default for Proxy {
|
|||
}
|
||||
}
|
||||
|
||||
// Config is the configuration for dfinit.
|
||||
/// Config is the configuration for dfinit.
|
||||
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
|
||||
#[serde(default, rename_all = "camelCase")]
|
||||
pub struct Config {
|
||||
// proxy is the configuration of the dfdaemon's HTTP/HTTPS proxy.
|
||||
/// proxy is the configuration of the dfdaemon's HTTP/HTTPS proxy.
|
||||
#[validate]
|
||||
pub proxy: Proxy,
|
||||
|
||||
// container_runtime is the container runtime configuration.
|
||||
/// container_runtime is the container runtime configuration.
|
||||
#[validate]
|
||||
pub container_runtime: ContainerRuntime,
|
||||
}
|
||||
|
||||
// Config implements the config operation of dfinit.
|
||||
/// Config implements the config operation of dfinit.
|
||||
impl Config {
|
||||
// load loads configuration from file.
|
||||
/// load loads configuration from file.
|
||||
#[instrument(skip_all)]
|
||||
pub fn load(path: &PathBuf) -> Result<Config> {
|
||||
// Load configuration from file.
|
||||
|
|
|
|||
|
|
@ -16,10 +16,10 @@
|
|||
|
||||
use std::path::PathBuf;
|
||||
|
||||
// NAME is the name of dfstore.
|
||||
/// NAME is the name of dfstore.
|
||||
pub const NAME: &str = "dfstore";
|
||||
|
||||
// default_dfstore_log_dir is the default log directory for dfstore.
|
||||
/// default_dfstore_log_dir is the default log directory for dfstore.
|
||||
pub fn default_dfstore_log_dir() -> PathBuf {
|
||||
crate::default_log_dir().join(NAME)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,22 +22,22 @@ pub mod dfget;
|
|||
pub mod dfinit;
|
||||
pub mod dfstore;
|
||||
|
||||
// SERVICE_NAME is the name of the service.
|
||||
/// SERVICE_NAME is the name of the service.
|
||||
pub const SERVICE_NAME: &str = "dragonfly";
|
||||
|
||||
// NAME is the name of the package.
|
||||
/// NAME is the name of the package.
|
||||
pub const NAME: &str = "client";
|
||||
|
||||
// CARGO_PKG_VERSION is the version of the cargo package.
|
||||
/// CARGO_PKG_VERSION is the version of the cargo package.
|
||||
pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
// CARGO_PKG_RUSTC_VERSION is the minimum Rust version supported by the package, not the current Rust version.
|
||||
/// CARGO_PKG_RUSTC_VERSION is the minimum Rust version supported by the package, not the current Rust version.
|
||||
pub const CARGO_PKG_RUSTC_VERSION: &str = env!("CARGO_PKG_RUST_VERSION");
|
||||
|
||||
// GIT_HASH is the git hash of the package.
|
||||
/// GIT_HASH is the git hash of the package.
|
||||
pub const GIT_HASH: Option<&str> = option_env!("GIT_HASH");
|
||||
|
||||
// default_root_dir is the default root directory for client.
|
||||
/// default_root_dir is the default root directory for client.
|
||||
pub fn default_root_dir() -> PathBuf {
|
||||
#[cfg(target_os = "linux")]
|
||||
return PathBuf::from("/var/run/dragonfly/");
|
||||
|
|
@ -46,7 +46,7 @@ pub fn default_root_dir() -> PathBuf {
|
|||
return home::home_dir().unwrap().join(".dragonfly");
|
||||
}
|
||||
|
||||
// default_config_dir is the default config directory for client.
|
||||
/// default_config_dir is the default config directory for client.
|
||||
pub fn default_config_dir() -> PathBuf {
|
||||
#[cfg(target_os = "linux")]
|
||||
return PathBuf::from("/etc/dragonfly/");
|
||||
|
|
@ -55,7 +55,7 @@ pub fn default_config_dir() -> PathBuf {
|
|||
return home::home_dir().unwrap().join(".dragonfly").join("config");
|
||||
}
|
||||
|
||||
// default_log_dir is the default log directory for client.
|
||||
/// default_log_dir is the default log directory for client.
|
||||
pub fn default_log_dir() -> PathBuf {
|
||||
#[cfg(target_os = "linux")]
|
||||
return PathBuf::from("/var/log/dragonfly/");
|
||||
|
|
@ -64,7 +64,7 @@ pub fn default_log_dir() -> PathBuf {
|
|||
return home::home_dir().unwrap().join(".dragonfly").join("logs");
|
||||
}
|
||||
|
||||
// default_storage_dir is the default storage directory for client.
|
||||
/// default_storage_dir is the default storage directory for client.
|
||||
pub fn default_storage_dir() -> PathBuf {
|
||||
#[cfg(target_os = "linux")]
|
||||
return PathBuf::from("/var/lib/dragonfly/");
|
||||
|
|
@ -73,7 +73,7 @@ pub fn default_storage_dir() -> PathBuf {
|
|||
return home::home_dir().unwrap().join(".dragonfly").join("storage");
|
||||
}
|
||||
|
||||
// default_lock_dir is the default lock directory for client.
|
||||
/// default_lock_dir is the default lock directory for client.
|
||||
pub fn default_lock_dir() -> PathBuf {
|
||||
#[cfg(target_os = "linux")]
|
||||
return PathBuf::from("/var/lock/dragonfly/");
|
||||
|
|
@ -82,7 +82,7 @@ pub fn default_lock_dir() -> PathBuf {
|
|||
return home::home_dir().unwrap().join(".dragonfly");
|
||||
}
|
||||
|
||||
// default_plugin_dir is the default plugin directory for client.
|
||||
/// default_plugin_dir is the default plugin directory for client.
|
||||
pub fn default_plugin_dir() -> PathBuf {
|
||||
#[cfg(target_os = "linux")]
|
||||
return PathBuf::from("/var/lib/dragonfly/plugins/");
|
||||
|
|
@ -91,7 +91,7 @@ pub fn default_plugin_dir() -> PathBuf {
|
|||
return home::home_dir().unwrap().join(".dragonfly").join("plugins");
|
||||
}
|
||||
|
||||
// default_cache_dir is the default cache directory for client.
|
||||
/// default_cache_dir is the default cache directory for client.
|
||||
pub fn default_cache_dir() -> PathBuf {
|
||||
#[cfg(target_os = "linux")]
|
||||
return PathBuf::from("/var/cache/dragonfly/");
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ use std::{error::Error as ErrorTrait, fmt};
|
|||
|
||||
use super::message::Message;
|
||||
|
||||
// ErrorType is the type of the error.
|
||||
/// ErrorType is the type of the error.
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
pub enum ErrorType {
|
||||
StorageError,
|
||||
|
|
@ -34,9 +34,9 @@ pub enum ErrorType {
|
|||
PluginError,
|
||||
}
|
||||
|
||||
// ErrorType implements the display for the error type.
|
||||
/// ErrorType implements the display for the error type.
|
||||
impl ErrorType {
|
||||
// as_str returns the string of the error type.
|
||||
/// as_str returns the string of the error type.
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
ErrorType::StorageError => "StorageError",
|
||||
|
|
@ -54,7 +54,7 @@ impl ErrorType {
|
|||
}
|
||||
}
|
||||
|
||||
// ExternalError is the external error.
|
||||
/// ExternalError is the external error.
|
||||
#[derive(Debug)]
|
||||
pub struct ExternalError {
|
||||
pub etype: ErrorType,
|
||||
|
|
@ -62,9 +62,9 @@ pub struct ExternalError {
|
|||
pub context: Option<Message>,
|
||||
}
|
||||
|
||||
// ExternalError implements the error trait.
|
||||
/// ExternalError implements the error trait.
|
||||
impl ExternalError {
|
||||
// new returns a new ExternalError.
|
||||
/// new returns a new ExternalError.
|
||||
pub fn new(etype: ErrorType) -> Self {
|
||||
ExternalError {
|
||||
etype,
|
||||
|
|
@ -73,19 +73,19 @@ impl ExternalError {
|
|||
}
|
||||
}
|
||||
|
||||
// with_context returns a new ExternalError with the context.
|
||||
/// with_context returns a new ExternalError with the context.
|
||||
pub fn with_context(mut self, message: impl Into<Message>) -> Self {
|
||||
self.context = Some(message.into());
|
||||
self
|
||||
}
|
||||
|
||||
// with_cause returns a new ExternalError with the cause.
|
||||
/// with_cause returns a new ExternalError with the cause.
|
||||
pub fn with_cause(mut self, cause: Box<dyn ErrorTrait + Send + Sync>) -> Self {
|
||||
self.cause = Some(cause);
|
||||
self
|
||||
}
|
||||
|
||||
// chain_display returns the display of the error with the previous error.
|
||||
/// chain_display returns the display of the error with the previous error.
|
||||
fn chain_display(
|
||||
&self,
|
||||
previous: Option<&ExternalError>,
|
||||
|
|
@ -112,17 +112,17 @@ impl ExternalError {
|
|||
}
|
||||
}
|
||||
|
||||
// ExternalError implements the display for the error.
|
||||
/// ExternalError implements the display for the error.
|
||||
impl fmt::Display for ExternalError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
self.chain_display(None, f)
|
||||
}
|
||||
}
|
||||
|
||||
// ExternalError implements the error trait.
|
||||
/// ExternalError implements the error trait.
|
||||
impl ErrorTrait for ExternalError {}
|
||||
|
||||
// OrErr is the trait to extend the result with error.
|
||||
/// OrErr is the trait to extend the result with error.
|
||||
pub trait OrErr<T, E> {
|
||||
/// Wrap the E in [Result] with new [ErrorType] and context, the existing E will be the cause.
|
||||
///
|
||||
|
|
@ -136,7 +136,7 @@ pub trait OrErr<T, E> {
|
|||
E: Into<Box<dyn ErrorTrait + Send + Sync>>;
|
||||
}
|
||||
|
||||
// OrErr implements the OrErr for Result.
|
||||
/// OrErr implements the OrErr for Result.
|
||||
impl<T, E> OrErr<T, E> for Result<T, E> {
|
||||
fn or_err(self, et: ErrorType) -> Result<T, ExternalError>
|
||||
where
|
||||
|
|
@ -157,28 +157,28 @@ impl<T, E> OrErr<T, E> for Result<T, E> {
|
|||
}
|
||||
}
|
||||
|
||||
// BackendError is the error for backend.
|
||||
/// BackendError is the error for backend.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("backend error {message}")]
|
||||
pub struct BackendError {
|
||||
// message is the error message.
|
||||
/// message is the error message.
|
||||
pub message: String,
|
||||
|
||||
// status_code is the status code of the response.
|
||||
/// status_code is the status code of the response.
|
||||
pub status_code: Option<reqwest::StatusCode>,
|
||||
|
||||
// header is the headers of the response.
|
||||
/// header is the headers of the response.
|
||||
pub header: Option<reqwest::header::HeaderMap>,
|
||||
}
|
||||
|
||||
// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed.
|
||||
/// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("download piece {piece_number} from remote peer {parent_id} failed")]
|
||||
pub struct DownloadFromRemotePeerFailed {
|
||||
// piece_number is the number of the piece.
|
||||
/// piece_number is the number of the piece.
|
||||
pub piece_number: u32,
|
||||
|
||||
// parent_id is the parent id of the piece.
|
||||
/// parent_id is the parent id of the piece.
|
||||
pub parent_id: String,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -16,29 +16,29 @@
|
|||
|
||||
use std::borrow::Cow;
|
||||
|
||||
// Message is the message for the error.
|
||||
/// Message is the message for the error.
|
||||
#[derive(Debug)]
|
||||
pub struct Message(Cow<'static, str>);
|
||||
|
||||
// From<&'static str> for Message implements the conversion from &'static str to Message.
|
||||
/// From<&'static str> for Message implements the conversion from &'static str to Message.
|
||||
impl From<&'static str> for Message {
|
||||
// from returns the message from the string.
|
||||
/// from returns the message from the string.
|
||||
fn from(s: &'static str) -> Self {
|
||||
Message(Cow::Borrowed(s))
|
||||
}
|
||||
}
|
||||
|
||||
// From<String> for Message implements the conversion from String to Message.
|
||||
/// From<String> for Message implements the conversion from String to Message.
|
||||
impl From<String> for Message {
|
||||
// from returns the message from the string.
|
||||
/// from returns the message from the string.
|
||||
fn from(s: String) -> Self {
|
||||
Message(Cow::Owned(s))
|
||||
}
|
||||
}
|
||||
|
||||
// Message implements the message for the error.
|
||||
/// Message implements the message for the error.
|
||||
impl Message {
|
||||
// as_str returns the string of the message.
|
||||
/// as_str returns the string of the message.
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,181 +23,181 @@ pub use errors::ExternalError;
|
|||
pub use errors::OrErr;
|
||||
pub use errors::{BackendError, DownloadFromRemotePeerFailed};
|
||||
|
||||
// DFError is the error for dragonfly.
|
||||
/// DFError is the error for dragonfly.
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum DFError {
|
||||
// IO is the error for IO operation.
|
||||
/// IO is the error for IO operation.
|
||||
#[error(transparent)]
|
||||
IO(#[from] std::io::Error),
|
||||
|
||||
// MpscSend is the error for send.
|
||||
/// MpscSend is the error for send.
|
||||
#[error("mpsc send: {0}")]
|
||||
MpscSend(String),
|
||||
|
||||
// SendTimeout is the error for send timeout.
|
||||
/// SendTimeout is the error for send timeout.
|
||||
#[error("send timeout")]
|
||||
SendTimeout,
|
||||
|
||||
// HashRing is the error for hashring.
|
||||
/// HashRing is the error for hashring.
|
||||
#[error{"hashring {0} is failed"}]
|
||||
HashRing(String),
|
||||
|
||||
// HostNotFound is the error when the host is not found.
|
||||
/// HostNotFound is the error when the host is not found.
|
||||
#[error{"host {0} not found"}]
|
||||
HostNotFound(String),
|
||||
|
||||
// TaskNotFound is the error when the task is not found.
|
||||
/// TaskNotFound is the error when the task is not found.
|
||||
#[error{"task {0} not found"}]
|
||||
TaskNotFound(String),
|
||||
|
||||
// PieceNotFound is the error when the piece is not found.
|
||||
/// PieceNotFound is the error when the piece is not found.
|
||||
#[error{"piece {0} not found"}]
|
||||
PieceNotFound(String),
|
||||
|
||||
// PieceStateIsFailed is the error when the piece state is failed.
|
||||
/// PieceStateIsFailed is the error when the piece state is failed.
|
||||
#[error{"piece {0} state is failed"}]
|
||||
PieceStateIsFailed(String),
|
||||
|
||||
// WaitForPieceFinishedTimeout is the error when the wait for piece finished timeout.
|
||||
/// WaitForPieceFinishedTimeout is the error when the wait for piece finished timeout.
|
||||
#[error{"wait for piece {0} finished timeout"}]
|
||||
WaitForPieceFinishedTimeout(String),
|
||||
|
||||
// AvailableManagerNotFound is the error when the available manager is not found.
|
||||
/// AvailableManagerNotFound is the error when the available manager is not found.
|
||||
#[error{"available manager not found"}]
|
||||
AvailableManagerNotFound,
|
||||
|
||||
// AvailableSchedulersNotFound is the error when the available schedulers is not found.
|
||||
/// AvailableSchedulersNotFound is the error when the available schedulers is not found.
|
||||
#[error{"available schedulers not found"}]
|
||||
AvailableSchedulersNotFound,
|
||||
|
||||
// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed.
|
||||
/// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed.
|
||||
#[error(transparent)]
|
||||
DownloadFromRemotePeerFailed(DownloadFromRemotePeerFailed),
|
||||
|
||||
// ColumnFamilyNotFound is the error when the column family is not found.
|
||||
/// ColumnFamilyNotFound is the error when the column family is not found.
|
||||
#[error{"column family {0} not found"}]
|
||||
ColumnFamilyNotFound(String),
|
||||
|
||||
// InvalidStateTransition is the error when the state transition is invalid.
|
||||
/// InvalidStateTransition is the error when the state transition is invalid.
|
||||
#[error{"can not transit from {0} to {1}"}]
|
||||
InvalidStateTransition(String, String),
|
||||
|
||||
// InvalidState is the error when the state is invalid.
|
||||
/// InvalidState is the error when the state is invalid.
|
||||
#[error{"invalid state {0}"}]
|
||||
InvalidState(String),
|
||||
|
||||
// InvalidURI is the error when the uri is invalid.
|
||||
/// InvalidURI is the error when the uri is invalid.
|
||||
#[error("invalid uri {0}")]
|
||||
InvalidURI(String),
|
||||
|
||||
// InvalidPeer is the error when the peer is invalid.
|
||||
/// InvalidPeer is the error when the peer is invalid.
|
||||
#[error("invalid peer {0}")]
|
||||
InvalidPeer(String),
|
||||
|
||||
// SchedulerClientNotFound is the error when the scheduler client is not found.
|
||||
/// SchedulerClientNotFound is the error when the scheduler client is not found.
|
||||
#[error{"scheduler client not found"}]
|
||||
SchedulerClientNotFound,
|
||||
|
||||
// UnexpectedResponse is the error when the response is unexpected.
|
||||
/// UnexpectedResponse is the error when the response is unexpected.
|
||||
#[error{"unexpected response"}]
|
||||
UnexpectedResponse,
|
||||
|
||||
// DigestMismatch is the error when the digest is mismatch.
|
||||
/// DigestMismatch is the error when the digest is mismatch.
|
||||
#[error{"digest mismatch expected: {0}, actual: {1}"}]
|
||||
DigestMismatch(String, String),
|
||||
|
||||
// ContentLengthMismatch is the error when the content length is mismatch.
|
||||
/// ContentLengthMismatch is the error when the content length is mismatch.
|
||||
#[error("content length mismatch expected: {0}, actual: {1}")]
|
||||
ContentLengthMismatch(u64, u64),
|
||||
|
||||
// MaxScheduleCountExceeded is the error when the max schedule count is exceeded.
|
||||
/// MaxScheduleCountExceeded is the error when the max schedule count is exceeded.
|
||||
#[error("max schedule count {0} exceeded")]
|
||||
MaxScheduleCountExceeded(u32),
|
||||
|
||||
// InvalidContentLength is the error when the content length is invalid.
|
||||
/// InvalidContentLength is the error when the content length is invalid.
|
||||
#[error("invalid content length")]
|
||||
InvalidContentLength,
|
||||
|
||||
// InvalidPieceLength is the error when the piece length is invalid.
|
||||
/// InvalidPieceLength is the error when the piece length is invalid.
|
||||
#[error("invalid piece length")]
|
||||
InvalidPieceLength,
|
||||
|
||||
// InvalidParameter is the error when the parameter is invalid.
|
||||
/// InvalidParameter is the error when the parameter is invalid.
|
||||
#[error("invalid parameter")]
|
||||
InvalidParameter,
|
||||
|
||||
#[error(transparent)]
|
||||
Utf8(#[from] std::str::Utf8Error),
|
||||
|
||||
// Unknown is the error when the error is unknown.
|
||||
/// Unknown is the error when the error is unknown.
|
||||
#[error("unknown {0}")]
|
||||
Unknown(String),
|
||||
|
||||
// Unimplemented is the error when the feature is not implemented.
|
||||
/// Unimplemented is the error when the feature is not implemented.
|
||||
#[error{"unimplemented"}]
|
||||
Unimplemented,
|
||||
|
||||
// EmptyHTTPRangeError is the error when the range fallback error is empty.
|
||||
/// EmptyHTTPRangeError is the error when the range fallback error is empty.
|
||||
#[error{"RangeUnsatisfiable: Failed to parse range fallback error, please file an issue"}]
|
||||
EmptyHTTPRangeError,
|
||||
|
||||
// TonicStatus is the error for tonic status.
|
||||
/// TonicStatus is the error for tonic status.
|
||||
#[error(transparent)]
|
||||
TonicStatus(#[from] tonic::Status),
|
||||
|
||||
// TonicStreamElapsed is the error for tonic stream elapsed.
|
||||
/// TonicStreamElapsed is the error for tonic stream elapsed.
|
||||
#[error(transparent)]
|
||||
TokioStreamElapsed(#[from] tokio_stream::Elapsed),
|
||||
|
||||
// ReqwestError is the error for reqwest.
|
||||
/// ReqwestError is the error for reqwest.
|
||||
#[error(transparent)]
|
||||
ReqwesError(#[from] reqwest::Error),
|
||||
|
||||
// OpenDALError is the error for opendal.
|
||||
/// OpenDALError is the error for opendal.
|
||||
#[error(transparent)]
|
||||
OpenDALError(#[from] opendal::Error),
|
||||
|
||||
// HyperError is the error for hyper.
|
||||
/// HyperError is the error for hyper.
|
||||
#[error(transparent)]
|
||||
HyperError(#[from] hyper::Error),
|
||||
|
||||
// BackendError is the error for backend.
|
||||
/// BackendError is the error for backend.
|
||||
#[error(transparent)]
|
||||
BackendError(BackendError),
|
||||
|
||||
// HyperUtilClientLegacyError is the error for hyper util client legacy.
|
||||
/// HyperUtilClientLegacyError is the error for hyper util client legacy.
|
||||
#[error(transparent)]
|
||||
HyperUtilClientLegacyError(#[from] hyper_util::client::legacy::Error),
|
||||
|
||||
// ExternalError is the error for external error.
|
||||
/// ExternalError is the error for external error.
|
||||
#[error(transparent)]
|
||||
ExternalError(#[from] ExternalError),
|
||||
|
||||
// MaxDownloadFilesExceeded is the error for max download files exceeded.
|
||||
/// MaxDownloadFilesExceeded is the error for max download files exceeded.
|
||||
#[error("max number of files to download exceeded: {0}")]
|
||||
MaxDownloadFilesExceeded(usize),
|
||||
|
||||
// Unsupported is the error for unsupported.
|
||||
/// Unsupported is the error for unsupported.
|
||||
#[error("unsupported {0}")]
|
||||
Unsupported(String),
|
||||
|
||||
// TokioJoinError is the error for tokio join.
|
||||
/// TokioJoinError is the error for tokio join.
|
||||
#[error(transparent)]
|
||||
TokioJoinError(tokio::task::JoinError),
|
||||
|
||||
// ValidationError is the error for validate.
|
||||
/// ValidationError is the error for validate.
|
||||
#[error("validate failed: {0}")]
|
||||
ValidationError(String),
|
||||
}
|
||||
|
||||
// SendError is the error for send.
|
||||
/// SendError is the error for send.
|
||||
impl<T> From<tokio::sync::mpsc::error::SendError<T>> for DFError {
|
||||
fn from(e: tokio::sync::mpsc::error::SendError<T>) -> Self {
|
||||
Self::MpscSend(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
// SendTimeoutError is the error for send timeout.
|
||||
/// SendTimeoutError is the error for send timeout.
|
||||
impl<T> From<tokio::sync::mpsc::error::SendTimeoutError<T>> for DFError {
|
||||
fn from(err: tokio::sync::mpsc::error::SendTimeoutError<T>) -> Self {
|
||||
match err {
|
||||
|
|
|
|||
|
|
@ -92,6 +92,7 @@ async fn main() -> Result<(), anyhow::Error> {
|
|||
error!("failed to load config: {}", err);
|
||||
err
|
||||
})?;
|
||||
|
||||
// Handle features of the container runtime.
|
||||
let container_runtime = container_runtime::ContainerRuntime::new(&config);
|
||||
container_runtime.run().await.map_err(|err| {
|
||||
|
|
|
|||
|
|
@ -25,20 +25,20 @@ use tokio::{self, fs};
|
|||
use toml_edit::{value, Array, DocumentMut, Item, Table, Value};
|
||||
use tracing::{info, instrument};
|
||||
|
||||
// Containerd represents the containerd runtime manager.
|
||||
/// Containerd represents the containerd runtime manager.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Containerd {
|
||||
// config is the configuration for initializing
|
||||
// runtime environment for the dfdaemon.
|
||||
/// config is the configuration for initializing
|
||||
/// runtime environment for the dfdaemon.
|
||||
config: dfinit::Containerd,
|
||||
|
||||
// proxy_config is the configuration for the dfdaemon's proxy server.
|
||||
/// proxy_config is the configuration for the dfdaemon's proxy server.
|
||||
proxy_config: dfinit::Proxy,
|
||||
}
|
||||
|
||||
// Containerd implements the containerd runtime manager.
|
||||
/// Containerd implements the containerd runtime manager.
|
||||
impl Containerd {
|
||||
// new creates a new containerd runtime manager.
|
||||
/// new creates a new containerd runtime manager.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(config: dfinit::Containerd, proxy_config: dfinit::Proxy) -> Self {
|
||||
Self {
|
||||
|
|
@ -47,8 +47,8 @@ impl Containerd {
|
|||
}
|
||||
}
|
||||
|
||||
// run runs the containerd runtime to initialize
|
||||
// runtime environment for the dfdaemon.
|
||||
/// run runs the containerd runtime to initialize
|
||||
/// runtime environment for the dfdaemon.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) -> Result<()> {
|
||||
let content = fs::read_to_string(&self.config.config_path).await?;
|
||||
|
|
@ -114,8 +114,8 @@ impl Containerd {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// add_registries adds registries to the containerd configuration, when containerd supports
|
||||
// config_path mode and config_path is not empty.
|
||||
/// add_registries adds registries to the containerd configuration, when containerd supports
|
||||
/// config_path mode and config_path is not empty.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn add_registries(
|
||||
&self,
|
||||
|
|
|
|||
|
|
@ -24,20 +24,20 @@ use toml_edit::{value, Array, ArrayOfTables, Item, Table, Value};
|
|||
use tracing::{info, instrument};
|
||||
use url::Url;
|
||||
|
||||
// CRIO represents the cri-o runtime manager.
|
||||
/// CRIO represents the cri-o runtime manager.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CRIO {
|
||||
// config is the configuration for initializing
|
||||
// runtime environment for the dfdaemon.
|
||||
/// config is the configuration for initializing
|
||||
/// runtime environment for the dfdaemon.
|
||||
config: dfinit::CRIO,
|
||||
|
||||
// proxy_config is the configuration for the dfdaemon's proxy server.
|
||||
/// proxy_config is the configuration for the dfdaemon's proxy server.
|
||||
proxy_config: dfinit::Proxy,
|
||||
}
|
||||
|
||||
// CRIO implements the cri-o runtime manager.
|
||||
/// CRIO implements the cri-o runtime manager.
|
||||
impl CRIO {
|
||||
// new creates a new cri-o runtime manager.
|
||||
/// new creates a new cri-o runtime manager.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(config: dfinit::CRIO, proxy_config: dfinit::Proxy) -> Self {
|
||||
Self {
|
||||
|
|
@ -46,8 +46,8 @@ impl CRIO {
|
|||
}
|
||||
}
|
||||
|
||||
// run runs the cri-o runtime to initialize
|
||||
// runtime environment for the dfdaemon.
|
||||
/// run runs the cri-o runtime to initialize
|
||||
/// runtime environment for the dfdaemon.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) -> Result<()> {
|
||||
let mut registries_config_table = toml_edit::DocumentMut::new();
|
||||
|
|
|
|||
|
|
@ -18,20 +18,20 @@ use dragonfly_client_config::dfinit;
|
|||
use dragonfly_client_core::{Error, Result};
|
||||
use tracing::{info, instrument};
|
||||
|
||||
// Docker represents the docker runtime manager.
|
||||
/// Docker represents the docker runtime manager.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Docker {
|
||||
// config is the configuration for initializing
|
||||
// runtime environment for the dfdaemon.
|
||||
/// config is the configuration for initializing
|
||||
/// runtime environment for the dfdaemon.
|
||||
config: dfinit::Docker,
|
||||
|
||||
// proxy_config is the configuration for the dfdaemon's proxy server.
|
||||
/// proxy_config is the configuration for the dfdaemon's proxy server.
|
||||
proxy_config: dfinit::Proxy,
|
||||
}
|
||||
|
||||
// Docker implements the docker runtime manager.
|
||||
/// Docker implements the docker runtime manager.
|
||||
impl Docker {
|
||||
// new creates a new docker runtime manager.
|
||||
/// new creates a new docker runtime manager.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(config: dfinit::Docker, proxy_config: dfinit::Proxy) -> Self {
|
||||
Self {
|
||||
|
|
@ -40,10 +40,10 @@ impl Docker {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: Implement the run method for Docker.
|
||||
//
|
||||
// run runs the docker runtime to initialize
|
||||
// runtime environment for the dfdaemon.
|
||||
/// TODO: Implement the run method for Docker.
|
||||
///
|
||||
/// run runs the docker runtime to initialize
|
||||
/// runtime environment for the dfdaemon.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) -> Result<()> {
|
||||
info!(
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ pub mod containerd;
|
|||
pub mod crio;
|
||||
pub mod docker;
|
||||
|
||||
// Engine represents config of the container runtime engine.
|
||||
/// Engine represents config of the container runtime engine.
|
||||
#[derive(Debug, Clone)]
|
||||
enum Engine {
|
||||
Containerd(containerd::Containerd),
|
||||
|
|
@ -30,14 +30,14 @@ enum Engine {
|
|||
Crio(crio::CRIO),
|
||||
}
|
||||
|
||||
// ContainerRuntime represents the container runtime manager.
|
||||
/// ContainerRuntime represents the container runtime manager.
|
||||
pub struct ContainerRuntime {
|
||||
engine: Option<Engine>,
|
||||
}
|
||||
|
||||
// ContainerRuntime implements the container runtime manager.
|
||||
/// ContainerRuntime implements the container runtime manager.
|
||||
impl ContainerRuntime {
|
||||
// new creates a new container runtime manager.
|
||||
/// new creates a new container runtime manager.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(config: &Config) -> Self {
|
||||
Self {
|
||||
|
|
@ -45,7 +45,7 @@ impl ContainerRuntime {
|
|||
}
|
||||
}
|
||||
|
||||
// run runs the container runtime to initialize runtime environment for the dfdaemon.
|
||||
/// run runs the container runtime to initialize runtime environment for the dfdaemon.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) -> Result<()> {
|
||||
// If containerd is enabled, override the default containerd
|
||||
|
|
@ -58,7 +58,7 @@ impl ContainerRuntime {
|
|||
}
|
||||
}
|
||||
|
||||
// get_engine returns the runtime engine from the config.
|
||||
/// get_engine returns the runtime engine from the config.
|
||||
#[instrument(skip_all)]
|
||||
fn get_engine(config: &Config) -> Option<Engine> {
|
||||
if let Some(ref container_runtime_config) = config.container_runtime.config {
|
||||
|
|
|
|||
|
|
@ -25,39 +25,39 @@ use tokio::io::{self, AsyncRead, AsyncReadExt, AsyncSeekExt, BufReader, SeekFrom
|
|||
use tokio_util::io::InspectReader;
|
||||
use tracing::{error, info, instrument, warn};
|
||||
|
||||
// DEFAULT_DIR_NAME is the default directory name to store content.
|
||||
/// DEFAULT_DIR_NAME is the default directory name to store content.
|
||||
const DEFAULT_DIR_NAME: &str = "content";
|
||||
|
||||
// Content is the content of a piece.
|
||||
/// Content is the content of a piece.
|
||||
pub struct Content {
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// dir is the directory to store content.
|
||||
/// dir is the directory to store content.
|
||||
dir: PathBuf,
|
||||
}
|
||||
|
||||
// WritePieceResponse is the response of writing a piece.
|
||||
/// WritePieceResponse is the response of writing a piece.
|
||||
pub struct WritePieceResponse {
|
||||
// length is the length of the piece.
|
||||
/// length is the length of the piece.
|
||||
pub length: u64,
|
||||
|
||||
// hash is the hash of the piece.
|
||||
/// hash is the hash of the piece.
|
||||
pub hash: String,
|
||||
}
|
||||
|
||||
// WriteCacheTaskResponse is the response of writing a cache task.
|
||||
/// WriteCacheTaskResponse is the response of writing a cache task.
|
||||
pub struct WriteCacheTaskResponse {
|
||||
// length is the length of the cache task.
|
||||
/// length is the length of the cache task.
|
||||
pub length: u64,
|
||||
|
||||
// hash is the hash of the cache task.
|
||||
/// hash is the hash of the cache task.
|
||||
pub hash: String,
|
||||
}
|
||||
|
||||
// Content implements the content storage.
|
||||
/// Content implements the content storage.
|
||||
impl Content {
|
||||
// new returns a new content.
|
||||
/// new returns a new content.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(config: Arc<Config>, dir: &Path) -> Result<Content> {
|
||||
let dir = dir.join(DEFAULT_DIR_NAME);
|
||||
|
|
@ -75,7 +75,7 @@ impl Content {
|
|||
Ok(Content { config, dir })
|
||||
}
|
||||
|
||||
// hard_link_or_copy_task hard links or copies the task content to the destination.
|
||||
/// hard_link_or_copy_task hard links or copies the task content to the destination.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn hard_link_or_copy_task(
|
||||
&self,
|
||||
|
|
@ -144,14 +144,14 @@ impl Content {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// hard_link_task hard links the task content.
|
||||
/// hard_link_task hard links the task content.
|
||||
#[instrument(skip_all)]
|
||||
async fn hard_link_task(&self, task_id: &str, link: &Path) -> Result<()> {
|
||||
fs::hard_link(self.dir.join(task_id), link).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// copy_task copies the task content to the destination.
|
||||
/// copy_task copies the task content to the destination.
|
||||
#[instrument(skip_all)]
|
||||
async fn copy_task(&self, task_id: &str, to: &Path) -> Result<()> {
|
||||
// Ensure the parent directory of the destination exists.
|
||||
|
|
@ -168,7 +168,7 @@ impl Content {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// copy_task_by_range copies the task content to the destination by range.
|
||||
/// copy_task_by_range copies the task content to the destination by range.
|
||||
#[instrument(skip_all)]
|
||||
async fn copy_task_by_range(&self, task_id: &str, to: &Path, range: Range) -> Result<()> {
|
||||
// Ensure the parent directory of the destination exists.
|
||||
|
|
@ -200,7 +200,7 @@ impl Content {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// read_task reads the task content by range.
|
||||
/// read_task reads the task content by range.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn read_task_by_range(&self, task_id: &str, range: Range) -> Result<impl AsyncRead> {
|
||||
let task_path = self.dir.join(task_id);
|
||||
|
|
@ -221,7 +221,7 @@ impl Content {
|
|||
Ok(range_reader)
|
||||
}
|
||||
|
||||
// delete_task deletes the task content.
|
||||
/// delete_task deletes the task content.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete_task(&self, task_id: &str) -> Result<()> {
|
||||
info!("delete task content: {}", task_id);
|
||||
|
|
@ -233,7 +233,7 @@ impl Content {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// read_piece reads the piece from the content.
|
||||
/// read_piece reads the piece from the content.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn read_piece(
|
||||
&self,
|
||||
|
|
@ -274,7 +274,7 @@ impl Content {
|
|||
Ok(f.take(length))
|
||||
}
|
||||
|
||||
// write_piece writes the piece to the content.
|
||||
/// write_piece writes the piece to the content.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn write_piece<R: AsyncRead + Unpin + ?Sized>(
|
||||
&self,
|
||||
|
|
@ -326,7 +326,7 @@ impl Content {
|
|||
})
|
||||
}
|
||||
|
||||
// hard_link_or_copy_cache_task hard links or copies the task content to the destination.
|
||||
/// hard_link_or_copy_cache_task hard links or copies the task content to the destination.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn hard_link_or_copy_cache_task(
|
||||
&self,
|
||||
|
|
@ -379,7 +379,7 @@ impl Content {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// copy_cache_task copies the cache task content to the destination.
|
||||
/// copy_cache_task copies the cache task content to the destination.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn write_cache_task(
|
||||
&self,
|
||||
|
|
@ -426,7 +426,7 @@ impl Content {
|
|||
})
|
||||
}
|
||||
|
||||
// delete_task deletes the cache task content.
|
||||
/// delete_task deletes the cache task content.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete_cache_task(&self, cache_task_id: &str) -> Result<()> {
|
||||
info!("delete cache task content: {}", cache_task_id);
|
||||
|
|
|
|||
|
|
@ -30,24 +30,24 @@ pub mod content;
|
|||
pub mod metadata;
|
||||
pub mod storage_engine;
|
||||
|
||||
// DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL is the default interval for waiting for the piece to be finished.
|
||||
/// DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL is the default interval for waiting for the piece to be finished.
|
||||
pub const DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL: Duration = Duration::from_millis(500);
|
||||
|
||||
// Storage is the storage of the task.
|
||||
/// Storage is the storage of the task.
|
||||
pub struct Storage {
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// metadata implements the metadata storage.
|
||||
/// metadata implements the metadata storage.
|
||||
metadata: metadata::Metadata,
|
||||
|
||||
// content implements the content storage.
|
||||
/// content implements the content storage.
|
||||
content: content::Content,
|
||||
}
|
||||
|
||||
// Storage implements the storage.
|
||||
/// Storage implements the storage.
|
||||
impl Storage {
|
||||
// new returns a new storage.
|
||||
/// new returns a new storage.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(config: Arc<Config>, dir: &Path, log_dir: PathBuf) -> Result<Self> {
|
||||
let metadata = metadata::Metadata::new(config.clone(), dir, &log_dir)?;
|
||||
|
|
@ -59,7 +59,7 @@ impl Storage {
|
|||
})
|
||||
}
|
||||
|
||||
// hard_link_or_copy_task hard links or copies the task content to the destination.
|
||||
/// hard_link_or_copy_task hard links or copies the task content to the destination.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn hard_link_or_copy_task(
|
||||
&self,
|
||||
|
|
@ -70,7 +70,7 @@ impl Storage {
|
|||
self.content.hard_link_or_copy_task(task, to, range).await
|
||||
}
|
||||
|
||||
// read_task_by_range returns the reader of the task by range.
|
||||
/// read_task_by_range returns the reader of the task by range.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn read_task_by_range(
|
||||
&self,
|
||||
|
|
@ -80,7 +80,7 @@ impl Storage {
|
|||
self.content.read_task_by_range(task_id, range).await
|
||||
}
|
||||
|
||||
// download_task_started updates the metadata of the task when the task downloads started.
|
||||
/// download_task_started updates the metadata of the task when the task downloads started.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_task_started(
|
||||
&self,
|
||||
|
|
@ -93,49 +93,49 @@ impl Storage {
|
|||
.download_task_started(id, piece_length, content_length, response_header)
|
||||
}
|
||||
|
||||
// download_task_finished updates the metadata of the task when the task downloads finished.
|
||||
/// download_task_finished updates the metadata of the task when the task downloads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_task_finished(&self, id: &str) -> Result<metadata::Task> {
|
||||
self.metadata.download_task_finished(id)
|
||||
}
|
||||
|
||||
// download_task_failed updates the metadata of the task when the task downloads failed.
|
||||
/// download_task_failed updates the metadata of the task when the task downloads failed.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_task_failed(&self, id: &str) -> Result<metadata::Task> {
|
||||
self.metadata.download_task_failed(id)
|
||||
}
|
||||
|
||||
// prefetch_task_started updates the metadata of the task when the task prefetches started.
|
||||
/// prefetch_task_started updates the metadata of the task when the task prefetches started.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn prefetch_task_started(&self, id: &str) -> Result<metadata::Task> {
|
||||
self.metadata.prefetch_task_started(id)
|
||||
}
|
||||
|
||||
// prefetch_task_failed updates the metadata of the task when the task prefetches failed.
|
||||
/// prefetch_task_failed updates the metadata of the task when the task prefetches failed.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn prefetch_task_failed(&self, id: &str) -> Result<metadata::Task> {
|
||||
self.metadata.prefetch_task_failed(id)
|
||||
}
|
||||
|
||||
// upload_task_finished updates the metadata of the task when task uploads finished.
|
||||
/// upload_task_finished updates the metadata of the task when task uploads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn upload_task_finished(&self, id: &str) -> Result<metadata::Task> {
|
||||
self.metadata.upload_task_finished(id)
|
||||
}
|
||||
|
||||
// get_task returns the task metadata.
|
||||
/// get_task returns the task metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_task(&self, id: &str) -> Result<Option<metadata::Task>> {
|
||||
self.metadata.get_task(id)
|
||||
}
|
||||
|
||||
// get_tasks returns the task metadatas.
|
||||
/// get_tasks returns the task metadatas.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_tasks(&self) -> Result<Vec<metadata::Task>> {
|
||||
self.metadata.get_tasks()
|
||||
}
|
||||
|
||||
// delete_task deletes the task metadatas, task content and piece metadatas.
|
||||
/// delete_task deletes the task metadatas, task content and piece metadatas.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete_task(&self, id: &str) {
|
||||
self.metadata
|
||||
|
|
@ -151,7 +151,7 @@ impl Storage {
|
|||
});
|
||||
}
|
||||
|
||||
// hard_link_or_copy_cache_task hard links or copies the cache task content to the destination.
|
||||
/// hard_link_or_copy_cache_task hard links or copies the cache task content to the destination.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn hard_link_or_copy_cache_task(
|
||||
&self,
|
||||
|
|
@ -161,7 +161,7 @@ impl Storage {
|
|||
self.content.hard_link_or_copy_cache_task(task, to).await
|
||||
}
|
||||
|
||||
// create_persistent_cache_task creates a new persistent cache task.
|
||||
/// create_persistent_cache_task creates a new persistent cache task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn create_persistent_cache_task(
|
||||
&self,
|
||||
|
|
@ -190,7 +190,7 @@ impl Storage {
|
|||
)
|
||||
}
|
||||
|
||||
// download_cache_task_started updates the metadata of the cache task when the cache task downloads started.
|
||||
/// download_cache_task_started updates the metadata of the cache task when the cache task downloads started.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_cache_task_started(
|
||||
&self,
|
||||
|
|
@ -204,37 +204,37 @@ impl Storage {
|
|||
.download_cache_task_started(id, ttl, persistent, piece_length, content_length)
|
||||
}
|
||||
|
||||
// download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished.
|
||||
/// download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_cache_task_finished(&self, id: &str) -> Result<metadata::CacheTask> {
|
||||
self.metadata.download_cache_task_finished(id)
|
||||
}
|
||||
|
||||
// download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed.
|
||||
/// download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_cache_task_failed(&self, id: &str) -> Result<metadata::CacheTask> {
|
||||
self.metadata.download_cache_task_failed(id)
|
||||
}
|
||||
|
||||
// upload_cache_task_finished updates the metadata of the cahce task when cache task uploads finished.
|
||||
/// upload_cache_task_finished updates the metadata of the cahce task when cache task uploads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn upload_cache_task_finished(&self, id: &str) -> Result<metadata::CacheTask> {
|
||||
self.metadata.upload_cache_task_finished(id)
|
||||
}
|
||||
|
||||
// get_cache_task returns the cache task metadata.
|
||||
/// get_cache_task returns the cache task metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_cache_task(&self, id: &str) -> Result<Option<metadata::CacheTask>> {
|
||||
self.metadata.get_cache_task(id)
|
||||
}
|
||||
|
||||
// get_tasks returns the task metadatas.
|
||||
/// get_tasks returns the task metadatas.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_cache_tasks(&self) -> Result<Vec<metadata::CacheTask>> {
|
||||
self.metadata.get_cache_tasks()
|
||||
}
|
||||
|
||||
// delete_cache_task deletes the cache task metadatas, cache task content and piece metadatas.
|
||||
/// delete_cache_task deletes the cache task metadatas, cache task content and piece metadatas.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete_cache_task(&self, id: &str) {
|
||||
self.metadata.delete_cache_task(id).unwrap_or_else(|err| {
|
||||
|
|
@ -249,8 +249,8 @@ impl Storage {
|
|||
});
|
||||
}
|
||||
|
||||
// download_piece_started updates the metadata of the piece and writes
|
||||
// the data of piece to file when the piece downloads started.
|
||||
/// download_piece_started updates the metadata of the piece and writes
|
||||
/// the data of piece to file when the piece downloads started.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_piece_started(
|
||||
&self,
|
||||
|
|
@ -265,7 +265,7 @@ impl Storage {
|
|||
}
|
||||
}
|
||||
|
||||
// download_piece_from_source_finished is used for downloading piece from source.
|
||||
/// download_piece_from_source_finished is used for downloading piece from source.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_piece_from_source_finished<R: AsyncRead + Unpin + ?Sized>(
|
||||
&self,
|
||||
|
|
@ -288,7 +288,7 @@ impl Storage {
|
|||
)
|
||||
}
|
||||
|
||||
// download_piece_from_remote_peer_finished is used for downloading piece from remote peer.
|
||||
/// download_piece_from_remote_peer_finished is used for downloading piece from remote peer.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_piece_from_remote_peer_finished<R: AsyncRead + Unpin + ?Sized>(
|
||||
&self,
|
||||
|
|
@ -321,14 +321,14 @@ impl Storage {
|
|||
)
|
||||
}
|
||||
|
||||
// download_piece_failed updates the metadata of the piece when the piece downloads failed.
|
||||
/// download_piece_failed updates the metadata of the piece when the piece downloads failed.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_piece_failed(&self, task_id: &str, number: u32) -> Result<()> {
|
||||
self.metadata.download_piece_failed(task_id, number)
|
||||
}
|
||||
|
||||
// upload_piece updates the metadata of the piece and
|
||||
// returns the data of the piece.
|
||||
/// upload_piece updates the metadata of the piece and
|
||||
/// returns the data of the piece.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn upload_piece(
|
||||
&self,
|
||||
|
|
@ -394,24 +394,24 @@ impl Storage {
|
|||
}
|
||||
}
|
||||
|
||||
// get_piece returns the piece metadata.
|
||||
/// get_piece returns the piece metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_piece(&self, task_id: &str, number: u32) -> Result<Option<metadata::Piece>> {
|
||||
self.metadata.get_piece(task_id, number)
|
||||
}
|
||||
|
||||
// get_pieces returns the piece metadatas.
|
||||
/// get_pieces returns the piece metadatas.
|
||||
pub fn get_pieces(&self, task_id: &str) -> Result<Vec<metadata::Piece>> {
|
||||
self.metadata.get_pieces(task_id)
|
||||
}
|
||||
|
||||
// piece_id returns the piece id.
|
||||
/// piece_id returns the piece id.
|
||||
#[instrument(skip_all)]
|
||||
pub fn piece_id(&self, task_id: &str, number: u32) -> String {
|
||||
self.metadata.piece_id(task_id, number)
|
||||
}
|
||||
|
||||
// wait_for_piece_finished waits for the piece to be finished.
|
||||
/// wait_for_piece_finished waits for the piece to be finished.
|
||||
#[instrument(skip_all)]
|
||||
async fn wait_for_piece_finished(&self, task_id: &str, number: u32) -> Result<metadata::Piece> {
|
||||
// Initialize the timeout of piece.
|
||||
|
|
|
|||
|
|
@ -30,83 +30,83 @@ use tracing::{error, info, instrument};
|
|||
|
||||
use crate::storage_engine::{rocksdb::RocksdbStorageEngine, DatabaseObject, StorageEngineOwned};
|
||||
|
||||
// Task is the metadata of the task.
|
||||
/// Task is the metadata of the task.
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct Task {
|
||||
// id is the task id.
|
||||
/// id is the task id.
|
||||
pub id: String,
|
||||
|
||||
// piece_length is the length of the piece.
|
||||
/// piece_length is the length of the piece.
|
||||
pub piece_length: Option<u64>,
|
||||
|
||||
// content_length is the length of the content.
|
||||
/// content_length is the length of the content.
|
||||
pub content_length: Option<u64>,
|
||||
|
||||
// header is the header of the response.
|
||||
/// header is the header of the response.
|
||||
pub response_header: HashMap<String, String>,
|
||||
|
||||
// uploading_count is the count of the task being uploaded by other peers.
|
||||
/// uploading_count is the count of the task being uploaded by other peers.
|
||||
pub uploading_count: u64,
|
||||
|
||||
// uploaded_count is the count of the task has been uploaded by other peers.
|
||||
/// uploaded_count is the count of the task has been uploaded by other peers.
|
||||
pub uploaded_count: u64,
|
||||
|
||||
// updated_at is the time when the task metadata is updated. If the task is downloaded
|
||||
// by other peers, it will also update updated_at.
|
||||
/// updated_at is the time when the task metadata is updated. If the task is downloaded
|
||||
/// by other peers, it will also update updated_at.
|
||||
pub updated_at: NaiveDateTime,
|
||||
|
||||
// created_at is the time when the task metadata is created.
|
||||
/// created_at is the time when the task metadata is created.
|
||||
pub created_at: NaiveDateTime,
|
||||
|
||||
// prefetched_at is the time when the task prefetched.
|
||||
/// prefetched_at is the time when the task prefetched.
|
||||
pub prefetched_at: Option<NaiveDateTime>,
|
||||
|
||||
// failed_at is the time when the task downloads failed.
|
||||
/// failed_at is the time when the task downloads failed.
|
||||
pub failed_at: Option<NaiveDateTime>,
|
||||
|
||||
// finished_at is the time when the task downloads finished.
|
||||
/// finished_at is the time when the task downloads finished.
|
||||
pub finished_at: Option<NaiveDateTime>,
|
||||
}
|
||||
|
||||
// Task implements the task database object.
|
||||
/// Task implements the task database object.
|
||||
impl DatabaseObject for Task {
|
||||
// NAMESPACE is the namespace of [Task] objects.
|
||||
/// NAMESPACE is the namespace of [Task] objects.
|
||||
const NAMESPACE: &'static str = "task";
|
||||
}
|
||||
|
||||
// Task implements the task metadata.
|
||||
/// Task implements the task metadata.
|
||||
impl Task {
|
||||
// is_started returns whether the task downloads started.
|
||||
/// is_started returns whether the task downloads started.
|
||||
pub fn is_started(&self) -> bool {
|
||||
self.finished_at.is_none()
|
||||
}
|
||||
|
||||
// is_downloading returns whether the task is downloading.
|
||||
/// is_downloading returns whether the task is downloading.
|
||||
pub fn is_uploading(&self) -> bool {
|
||||
self.uploading_count > 0
|
||||
}
|
||||
|
||||
// is_expired returns whether the task is expired.
|
||||
/// is_expired returns whether the task is expired.
|
||||
pub fn is_expired(&self, ttl: Duration) -> bool {
|
||||
self.updated_at + ttl < Utc::now().naive_utc()
|
||||
}
|
||||
|
||||
// is_prefetched returns whether the task is prefetched.
|
||||
/// is_prefetched returns whether the task is prefetched.
|
||||
pub fn is_prefetched(&self) -> bool {
|
||||
self.prefetched_at.is_some()
|
||||
}
|
||||
|
||||
// is_failed returns whether the task downloads failed.
|
||||
/// is_failed returns whether the task downloads failed.
|
||||
pub fn is_failed(&self) -> bool {
|
||||
self.failed_at.is_some()
|
||||
}
|
||||
|
||||
// is_finished returns whether the task downloads finished.
|
||||
/// is_finished returns whether the task downloads finished.
|
||||
pub fn is_finished(&self) -> bool {
|
||||
self.finished_at.is_some()
|
||||
}
|
||||
|
||||
// is_empty returns whether the task is empty.
|
||||
/// is_empty returns whether the task is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
if let Some(content_length) = self.content_length() {
|
||||
if content_length == 0 {
|
||||
|
|
@ -117,79 +117,79 @@ impl Task {
|
|||
false
|
||||
}
|
||||
|
||||
// piece_length returns the piece length of the task.
|
||||
/// piece_length returns the piece length of the task.
|
||||
pub fn piece_length(&self) -> Option<u64> {
|
||||
self.piece_length
|
||||
}
|
||||
|
||||
// content_length returns the content length of the task.
|
||||
/// content_length returns the content length of the task.
|
||||
pub fn content_length(&self) -> Option<u64> {
|
||||
self.content_length
|
||||
}
|
||||
}
|
||||
|
||||
// CacheTask is the metadata of the cache task.
|
||||
/// CacheTask is the metadata of the cache task.
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct CacheTask {
|
||||
// id is the task id.
|
||||
/// id is the task id.
|
||||
pub id: String,
|
||||
|
||||
// persistent represents whether the cache task is persistent.
|
||||
// If the cache task is persistent, the cache peer will
|
||||
// not be deleted when dfdamon runs garbage collection.
|
||||
/// persistent represents whether the cache task is persistent.
|
||||
/// If the cache task is persistent, the cache peer will
|
||||
/// not be deleted when dfdamon runs garbage collection.
|
||||
pub persistent: bool,
|
||||
|
||||
// ttl is the time to live of the cache task.
|
||||
/// ttl is the time to live of the cache task.
|
||||
pub ttl: Duration,
|
||||
|
||||
// digests is the digests of the cache task.
|
||||
/// digests is the digests of the cache task.
|
||||
pub digest: String,
|
||||
|
||||
// piece_length is the length of the piece.
|
||||
/// piece_length is the length of the piece.
|
||||
pub piece_length: u64,
|
||||
|
||||
// content_length is the length of the content.
|
||||
/// content_length is the length of the content.
|
||||
pub content_length: u64,
|
||||
|
||||
// uploading_count is the count of the task being uploaded by other peers.
|
||||
/// uploading_count is the count of the task being uploaded by other peers.
|
||||
pub uploading_count: u64,
|
||||
|
||||
// uploaded_count is the count of the task has been uploaded by other peers.
|
||||
/// uploaded_count is the count of the task has been uploaded by other peers.
|
||||
pub uploaded_count: u64,
|
||||
|
||||
// updated_at is the time when the task metadata is updated. If the task is downloaded
|
||||
// by other peers, it will also update updated_at.
|
||||
/// updated_at is the time when the task metadata is updated. If the task is downloaded
|
||||
/// by other peers, it will also update updated_at.
|
||||
pub updated_at: NaiveDateTime,
|
||||
|
||||
// created_at is the time when the task metadata is created.
|
||||
/// created_at is the time when the task metadata is created.
|
||||
pub created_at: NaiveDateTime,
|
||||
|
||||
// failed_at is the time when the task downloads failed.
|
||||
/// failed_at is the time when the task downloads failed.
|
||||
pub failed_at: Option<NaiveDateTime>,
|
||||
|
||||
// finished_at is the time when the task downloads finished.
|
||||
/// finished_at is the time when the task downloads finished.
|
||||
pub finished_at: Option<NaiveDateTime>,
|
||||
}
|
||||
|
||||
// CacheTask implements the cache task database object.
|
||||
/// CacheTask implements the cache task database object.
|
||||
impl DatabaseObject for CacheTask {
|
||||
// NAMESPACE is the namespace of [CacheTask] objects.
|
||||
/// NAMESPACE is the namespace of [CacheTask] objects.
|
||||
const NAMESPACE: &'static str = "cache_task";
|
||||
}
|
||||
|
||||
// CacheTask implements the cache task metadata.
|
||||
/// CacheTask implements the cache task metadata.
|
||||
impl CacheTask {
|
||||
// is_started returns whether the cache task downloads started.
|
||||
/// is_started returns whether the cache task downloads started.
|
||||
pub fn is_started(&self) -> bool {
|
||||
self.finished_at.is_none()
|
||||
}
|
||||
|
||||
// is_downloading returns whether the cache task is downloading.
|
||||
/// is_downloading returns whether the cache task is downloading.
|
||||
pub fn is_uploading(&self) -> bool {
|
||||
self.uploading_count > 0
|
||||
}
|
||||
|
||||
// is_expired returns whether the cache task is expired.
|
||||
/// is_expired returns whether the cache task is expired.
|
||||
pub fn is_expired(&self) -> bool {
|
||||
// When scheduler runs garbage collection, it will trigger dfdaemon to evict the cache task.
|
||||
// But sometimes the dfdaemon may not evict the cache task in time, so we select the ttl * 1.2
|
||||
|
|
@ -197,17 +197,17 @@ impl CacheTask {
|
|||
self.created_at + self.ttl * 2 < Utc::now().naive_utc()
|
||||
}
|
||||
|
||||
// is_failed returns whether the cache task downloads failed.
|
||||
/// is_failed returns whether the cache task downloads failed.
|
||||
pub fn is_failed(&self) -> bool {
|
||||
self.failed_at.is_some()
|
||||
}
|
||||
|
||||
// is_finished returns whether the cache task downloads finished.
|
||||
/// is_finished returns whether the cache task downloads finished.
|
||||
pub fn is_finished(&self) -> bool {
|
||||
self.finished_at.is_some()
|
||||
}
|
||||
|
||||
// is_empty returns whether the cache task is empty.
|
||||
/// is_empty returns whether the cache task is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
if self.content_length == 0 {
|
||||
return true;
|
||||
|
|
@ -216,76 +216,76 @@ impl CacheTask {
|
|||
false
|
||||
}
|
||||
|
||||
// is_persistent returns whether the cache task is persistent.
|
||||
/// is_persistent returns whether the cache task is persistent.
|
||||
pub fn is_persistent(&self) -> bool {
|
||||
self.persistent
|
||||
}
|
||||
|
||||
// piece_length returns the piece length of the cache task.
|
||||
/// piece_length returns the piece length of the cache task.
|
||||
pub fn piece_length(&self) -> u64 {
|
||||
self.piece_length
|
||||
}
|
||||
|
||||
// content_length returns the content length of the cache task.
|
||||
/// content_length returns the content length of the cache task.
|
||||
pub fn content_length(&self) -> u64 {
|
||||
self.content_length
|
||||
}
|
||||
}
|
||||
|
||||
// Piece is the metadata of the piece.
|
||||
/// Piece is the metadata of the piece.
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct Piece {
|
||||
// number is the piece number.
|
||||
/// number is the piece number.
|
||||
pub number: u32,
|
||||
|
||||
// offset is the offset of the piece in the task.
|
||||
/// offset is the offset of the piece in the task.
|
||||
pub offset: u64,
|
||||
|
||||
// length is the length of the piece.
|
||||
/// length is the length of the piece.
|
||||
pub length: u64,
|
||||
|
||||
// digest is the digest of the piece.
|
||||
/// digest is the digest of the piece.
|
||||
pub digest: String,
|
||||
|
||||
// parent_id is the parent id of the piece.
|
||||
/// parent_id is the parent id of the piece.
|
||||
pub parent_id: Option<String>,
|
||||
|
||||
// uploading_count is the count of the piece being uploaded by other peers.
|
||||
/// uploading_count is the count of the piece being uploaded by other peers.
|
||||
pub uploading_count: u64,
|
||||
|
||||
// uploaded_count is the count of the piece has been uploaded by other peers.
|
||||
/// uploaded_count is the count of the piece has been uploaded by other peers.
|
||||
pub uploaded_count: u64,
|
||||
|
||||
// updated_at is the time when the piece metadata is updated. If the piece is downloaded
|
||||
// by other peers, it will also update updated_at.
|
||||
/// updated_at is the time when the piece metadata is updated. If the piece is downloaded
|
||||
/// by other peers, it will also update updated_at.
|
||||
pub updated_at: NaiveDateTime,
|
||||
|
||||
// created_at is the time when the piece metadata is created.
|
||||
/// created_at is the time when the piece metadata is created.
|
||||
pub created_at: NaiveDateTime,
|
||||
|
||||
// finished_at is the time when the piece downloads finished.
|
||||
/// finished_at is the time when the piece downloads finished.
|
||||
pub finished_at: Option<NaiveDateTime>,
|
||||
}
|
||||
|
||||
// Piece implements the piece database object.
|
||||
/// Piece implements the piece database object.
|
||||
impl DatabaseObject for Piece {
|
||||
// NAMESPACE is the namespace of [Piece] objects.
|
||||
/// NAMESPACE is the namespace of [Piece] objects.
|
||||
const NAMESPACE: &'static str = "piece";
|
||||
}
|
||||
|
||||
// Piece implements the piece metadata.
|
||||
/// Piece implements the piece metadata.
|
||||
impl Piece {
|
||||
// is_started returns whether the piece downloads started.
|
||||
/// is_started returns whether the piece downloads started.
|
||||
pub fn is_started(&self) -> bool {
|
||||
self.finished_at.is_none()
|
||||
}
|
||||
|
||||
// is_finished returns whether the piece downloads finished.
|
||||
/// is_finished returns whether the piece downloads finished.
|
||||
pub fn is_finished(&self) -> bool {
|
||||
self.finished_at.is_some()
|
||||
}
|
||||
|
||||
// cost returns the cost of the piece downloaded.
|
||||
/// cost returns the cost of the piece downloaded.
|
||||
pub fn cost(&self) -> Option<Duration> {
|
||||
match self
|
||||
.finished_at
|
||||
|
|
@ -302,7 +302,7 @@ impl Piece {
|
|||
}
|
||||
}
|
||||
|
||||
// prost_cost returns the prost cost of the piece downloaded.
|
||||
/// prost_cost returns the prost cost of the piece downloaded.
|
||||
pub fn prost_cost(&self) -> Option<prost_wkt_types::Duration> {
|
||||
match self.cost() {
|
||||
Some(cost) => match prost_wkt_types::Duration::try_from(cost) {
|
||||
|
|
@ -317,17 +317,17 @@ impl Piece {
|
|||
}
|
||||
}
|
||||
|
||||
// Metadata manages the metadata of [Task], [Piece] and [CacheTask].
|
||||
/// Metadata manages the metadata of [Task], [Piece] and [CacheTask].
|
||||
pub struct Metadata<E = RocksdbStorageEngine>
|
||||
where
|
||||
E: StorageEngineOwned,
|
||||
{
|
||||
// db is the underlying storage engine instance.
|
||||
/// db is the underlying storage engine instance.
|
||||
db: E,
|
||||
}
|
||||
|
||||
impl<E: StorageEngineOwned> Metadata<E> {
|
||||
// download_task_started updates the metadata of the task when the task downloads started.
|
||||
/// download_task_started updates the metadata of the task when the task downloads started.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_task_started(
|
||||
&self,
|
||||
|
|
@ -381,7 +381,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// download_task_finished updates the metadata of the task when the task downloads finished.
|
||||
/// download_task_finished updates the metadata of the task when the task downloads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_task_finished(&self, id: &str) -> Result<Task> {
|
||||
let task = match self.db.get::<Task>(id.as_bytes())? {
|
||||
|
|
@ -398,7 +398,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// download_task_failed updates the metadata of the task when the task downloads failed.
|
||||
/// download_task_failed updates the metadata of the task when the task downloads failed.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_task_failed(&self, id: &str) -> Result<Task> {
|
||||
let task = match self.db.get::<Task>(id.as_bytes())? {
|
||||
|
|
@ -414,7 +414,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// prefetch_task_started updates the metadata of the task when the task prefetch started.
|
||||
/// prefetch_task_started updates the metadata of the task when the task prefetch started.
|
||||
#[instrument(skip_all)]
|
||||
pub fn prefetch_task_started(&self, id: &str) -> Result<Task> {
|
||||
let task = match self.db.get::<Task>(id.as_bytes())? {
|
||||
|
|
@ -436,7 +436,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// prefetch_task_failed updates the metadata of the task when the task prefetch failed.
|
||||
/// prefetch_task_failed updates the metadata of the task when the task prefetch failed.
|
||||
#[instrument(skip_all)]
|
||||
pub fn prefetch_task_failed(&self, id: &str) -> Result<Task> {
|
||||
let task = match self.db.get::<Task>(id.as_bytes())? {
|
||||
|
|
@ -453,7 +453,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// upload_task_started updates the metadata of the task when task uploads started.
|
||||
/// upload_task_started updates the metadata of the task when task uploads started.
|
||||
#[instrument(skip_all)]
|
||||
pub fn upload_task_started(&self, id: &str) -> Result<Task> {
|
||||
let task = match self.db.get::<Task>(id.as_bytes())? {
|
||||
|
|
@ -469,7 +469,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// upload_task_finished updates the metadata of the task when task uploads finished.
|
||||
/// upload_task_finished updates the metadata of the task when task uploads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn upload_task_finished(&self, id: &str) -> Result<Task> {
|
||||
let task = match self.db.get::<Task>(id.as_bytes())? {
|
||||
|
|
@ -486,7 +486,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// upload_task_failed updates the metadata of the task when the task uploads failed.
|
||||
/// upload_task_failed updates the metadata of the task when the task uploads failed.
|
||||
#[instrument(skip_all)]
|
||||
pub fn upload_task_failed(&self, id: &str) -> Result<Task> {
|
||||
let task = match self.db.get::<Task>(id.as_bytes())? {
|
||||
|
|
@ -502,13 +502,13 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// get_task gets the task metadata.
|
||||
/// get_task gets the task metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_task(&self, id: &str) -> Result<Option<Task>> {
|
||||
self.db.get(id.as_bytes())
|
||||
}
|
||||
|
||||
// get_tasks gets the task metadatas.
|
||||
/// get_tasks gets the task metadatas.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_tasks(&self) -> Result<Vec<Task>> {
|
||||
let tasks = self
|
||||
|
|
@ -526,16 +526,16 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
.collect()
|
||||
}
|
||||
|
||||
// delete_task deletes the task metadata.
|
||||
/// delete_task deletes the task metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn delete_task(&self, id: &str) -> Result<()> {
|
||||
info!("delete task metadata {}", id);
|
||||
self.db.delete::<Task>(id.as_bytes())
|
||||
}
|
||||
|
||||
// create_persistent_cache_task creates a new persistent cache task.
|
||||
// If the cache task imports the content to the dfdaemon finished,
|
||||
// the dfdaemon will create a persistent cache task metadata.
|
||||
/// create_persistent_cache_task creates a new persistent cache task.
|
||||
/// If the cache task imports the content to the dfdaemon finished,
|
||||
/// the dfdaemon will create a persistent cache task metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn create_persistent_cache_task(
|
||||
&self,
|
||||
|
|
@ -562,9 +562,9 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// download_cache_task_started updates the metadata of the cache task when
|
||||
// the cache task downloads started. If the cache task downloaded by scheduler
|
||||
// to create persistent cache task, the persistent should be set to true.
|
||||
/// download_cache_task_started updates the metadata of the cache task when
|
||||
/// the cache task downloads started. If the cache task downloaded by scheduler
|
||||
/// to create persistent cache task, the persistent should be set to true.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_cache_task_started(
|
||||
&self,
|
||||
|
|
@ -597,7 +597,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished.
|
||||
/// download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_cache_task_finished(&self, id: &str) -> Result<CacheTask> {
|
||||
let task = match self.db.get::<CacheTask>(id.as_bytes())? {
|
||||
|
|
@ -619,7 +619,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed.
|
||||
/// download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_cache_task_failed(&self, id: &str) -> Result<CacheTask> {
|
||||
let task = match self.db.get::<CacheTask>(id.as_bytes())? {
|
||||
|
|
@ -635,7 +635,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// upload_cache_task_started updates the metadata of the cache task when cache task uploads started.
|
||||
/// upload_cache_task_started updates the metadata of the cache task when cache task uploads started.
|
||||
#[instrument(skip_all)]
|
||||
pub fn upload_cache_task_started(&self, id: &str) -> Result<CacheTask> {
|
||||
let task = match self.db.get::<CacheTask>(id.as_bytes())? {
|
||||
|
|
@ -651,7 +651,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// upload_cache_task_finished updates the metadata of the cache task when cache task uploads finished.
|
||||
/// upload_cache_task_finished updates the metadata of the cache task when cache task uploads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn upload_cache_task_finished(&self, id: &str) -> Result<CacheTask> {
|
||||
let task = match self.db.get::<CacheTask>(id.as_bytes())? {
|
||||
|
|
@ -668,7 +668,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// upload_cache_task_failed updates the metadata of the cache task when the cache task uploads failed.
|
||||
/// upload_cache_task_failed updates the metadata of the cache task when the cache task uploads failed.
|
||||
#[instrument(skip_all)]
|
||||
pub fn upload_cache_task_failed(&self, id: &str) -> Result<CacheTask> {
|
||||
let task = match self.db.get::<CacheTask>(id.as_bytes())? {
|
||||
|
|
@ -684,27 +684,27 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// get_cache_task gets the cache task metadata.
|
||||
/// get_cache_task gets the cache task metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_cache_task(&self, id: &str) -> Result<Option<CacheTask>> {
|
||||
self.db.get(id.as_bytes())
|
||||
}
|
||||
|
||||
// get_cache_tasks gets the cache task metadatas.
|
||||
/// get_cache_tasks gets the cache task metadatas.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_cache_tasks(&self) -> Result<Vec<CacheTask>> {
|
||||
let iter = self.db.iter::<CacheTask>()?;
|
||||
iter.map(|ele| ele.map(|(_, task)| task)).collect()
|
||||
}
|
||||
|
||||
// delete_cache_task deletes the cache task metadata.
|
||||
/// delete_cache_task deletes the cache task metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn delete_cache_task(&self, id: &str) -> Result<()> {
|
||||
info!("delete cache task metadata {}", id);
|
||||
self.db.delete::<CacheTask>(id.as_bytes())
|
||||
}
|
||||
|
||||
// download_piece_started updates the metadata of the piece when the piece downloads started.
|
||||
/// download_piece_started updates the metadata of the piece when the piece downloads started.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_piece_started(&self, task_id: &str, number: u32) -> Result<Piece> {
|
||||
// Construct the piece metadata.
|
||||
|
|
@ -721,7 +721,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(piece)
|
||||
}
|
||||
|
||||
// download_piece_finished updates the metadata of the piece when the piece downloads finished.
|
||||
/// download_piece_finished updates the metadata of the piece when the piece downloads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_piece_finished(
|
||||
&self,
|
||||
|
|
@ -751,19 +751,19 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(piece)
|
||||
}
|
||||
|
||||
// download_piece_failed updates the metadata of the piece when the piece downloads failed.
|
||||
/// download_piece_failed updates the metadata of the piece when the piece downloads failed.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_piece_failed(&self, task_id: &str, number: u32) -> Result<()> {
|
||||
self.delete_piece(task_id, number)
|
||||
}
|
||||
|
||||
// wait_for_piece_finished_failed waits for the piece to be finished or failed.
|
||||
/// wait_for_piece_finished_failed waits for the piece to be finished or failed.
|
||||
#[instrument(skip_all)]
|
||||
pub fn wait_for_piece_finished_failed(&self, task_id: &str, number: u32) -> Result<()> {
|
||||
self.delete_piece(task_id, number)
|
||||
}
|
||||
|
||||
// upload_piece_started updates the metadata of the piece when piece uploads started.
|
||||
/// upload_piece_started updates the metadata of the piece when piece uploads started.
|
||||
#[instrument(skip_all)]
|
||||
pub fn upload_piece_started(&self, task_id: &str, number: u32) -> Result<Piece> {
|
||||
// Get the piece id.
|
||||
|
|
@ -781,7 +781,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(piece)
|
||||
}
|
||||
|
||||
// upload_piece_finished updates the metadata of the piece when piece uploads finished.
|
||||
/// upload_piece_finished updates the metadata of the piece when piece uploads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn upload_piece_finished(&self, task_id: &str, number: u32) -> Result<Piece> {
|
||||
// Get the piece id.
|
||||
|
|
@ -800,7 +800,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(piece)
|
||||
}
|
||||
|
||||
// upload_piece_failed updates the metadata of the piece when the piece uploads failed.
|
||||
/// upload_piece_failed updates the metadata of the piece when the piece uploads failed.
|
||||
#[instrument(skip_all)]
|
||||
pub fn upload_piece_failed(&self, task_id: &str, number: u32) -> Result<Piece> {
|
||||
// Get the piece id.
|
||||
|
|
@ -818,13 +818,13 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(piece)
|
||||
}
|
||||
|
||||
// get_piece gets the piece metadata.
|
||||
/// get_piece gets the piece metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_piece(&self, task_id: &str, number: u32) -> Result<Option<Piece>> {
|
||||
self.db.get(self.piece_id(task_id, number).as_bytes())
|
||||
}
|
||||
|
||||
// get_pieces gets the piece metadatas.
|
||||
/// get_pieces gets the piece metadatas.
|
||||
pub fn get_pieces(&self, task_id: &str) -> Result<Vec<Piece>> {
|
||||
let pieces = self
|
||||
.db
|
||||
|
|
@ -841,7 +841,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
.collect()
|
||||
}
|
||||
|
||||
// delete_piece deletes the piece metadata.
|
||||
/// delete_piece deletes the piece metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub fn delete_piece(&self, task_id: &str, number: u32) -> Result<()> {
|
||||
info!("delete piece metadata {}", self.piece_id(task_id, number));
|
||||
|
|
@ -849,7 +849,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
.delete::<Piece>(self.piece_id(task_id, number).as_bytes())
|
||||
}
|
||||
|
||||
// delete_pieces deletes the piece metadatas.
|
||||
/// delete_pieces deletes the piece metadatas.
|
||||
#[instrument(skip_all)]
|
||||
pub fn delete_pieces(&self, task_id: &str) -> Result<()> {
|
||||
let piece_ids = self
|
||||
|
|
@ -878,16 +878,16 @@ impl<E: StorageEngineOwned> Metadata<E> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// piece_id returns the piece id.
|
||||
/// piece_id returns the piece id.
|
||||
#[instrument(skip_all)]
|
||||
pub fn piece_id(&self, task_id: &str, number: u32) -> String {
|
||||
format!("{}-{}", task_id, number)
|
||||
}
|
||||
}
|
||||
|
||||
// Metadata implements the metadata of the storage engine.
|
||||
/// Metadata implements the metadata of the storage engine.
|
||||
impl Metadata<RocksdbStorageEngine> {
|
||||
// new creates a new metadata instance.
|
||||
/// new creates a new metadata instance.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
|
|||
|
|
@ -32,18 +32,18 @@ pub struct RocksdbStorageEngine {
|
|||
inner: rocksdb::DB,
|
||||
}
|
||||
|
||||
// RocksdbStorageEngine implements deref of the storage engine.
|
||||
/// RocksdbStorageEngine implements deref of the storage engine.
|
||||
impl Deref for RocksdbStorageEngine {
|
||||
// Target is the inner rocksdb DB.
|
||||
/// Target is the inner rocksdb DB.
|
||||
type Target = rocksdb::DB;
|
||||
|
||||
// deref returns the inner rocksdb DB.
|
||||
/// deref returns the inner rocksdb DB.
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
// RocksdbStorageEngine implements the storage engine of the rocksdb.
|
||||
/// RocksdbStorageEngine implements the storage engine of the rocksdb.
|
||||
impl RocksdbStorageEngine {
|
||||
/// DEFAULT_DIR_NAME is the default directory name to store metadata.
|
||||
const DEFAULT_DIR_NAME: &'static str = "metadata";
|
||||
|
|
@ -60,10 +60,10 @@ impl RocksdbStorageEngine {
|
|||
/// DEFAULT_CACHE_SIZE is the default cache size for rocksdb, default is 512MB.
|
||||
const DEFAULT_CACHE_SIZE: usize = 512 * 1024 * 1024;
|
||||
|
||||
// DEFAULT_LOG_MAX_SIZE is the default max log size for rocksdb, default is 64MB.
|
||||
/// DEFAULT_LOG_MAX_SIZE is the default max log size for rocksdb, default is 64MB.
|
||||
const DEFAULT_LOG_MAX_SIZE: usize = 64 * 1024 * 1024;
|
||||
|
||||
// DEFAULT_LOG_MAX_FILES is the default max log files for rocksdb.
|
||||
/// DEFAULT_LOG_MAX_FILES is the default max log files for rocksdb.
|
||||
const DEFAULT_LOG_MAX_FILES: usize = 10;
|
||||
|
||||
/// open opens a rocksdb storage engine with the given directory and column families.
|
||||
|
|
@ -124,9 +124,9 @@ impl RocksdbStorageEngine {
|
|||
}
|
||||
}
|
||||
|
||||
// RocksdbStorageEngine implements the storage engine operations.
|
||||
/// RocksdbStorageEngine implements the storage engine operations.
|
||||
impl Operations for RocksdbStorageEngine {
|
||||
// get gets the object by key.
|
||||
/// get gets the object by key.
|
||||
#[instrument(skip_all)]
|
||||
fn get<O: DatabaseObject>(&self, key: &[u8]) -> Result<Option<O>> {
|
||||
let cf = cf_handle::<O>(self)?;
|
||||
|
|
@ -142,7 +142,7 @@ impl Operations for RocksdbStorageEngine {
|
|||
}
|
||||
}
|
||||
|
||||
// put puts the object by key.
|
||||
/// put puts the object by key.
|
||||
#[instrument(skip_all)]
|
||||
fn put<O: DatabaseObject>(&self, key: &[u8], value: &O) -> Result<()> {
|
||||
let cf = cf_handle::<O>(self)?;
|
||||
|
|
@ -155,7 +155,7 @@ impl Operations for RocksdbStorageEngine {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// delete deletes the object by key.
|
||||
/// delete deletes the object by key.
|
||||
#[instrument(skip_all)]
|
||||
fn delete<O: DatabaseObject>(&self, key: &[u8]) -> Result<()> {
|
||||
let cf = cf_handle::<O>(self)?;
|
||||
|
|
@ -167,7 +167,7 @@ impl Operations for RocksdbStorageEngine {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// iter iterates all objects.
|
||||
/// iter iterates all objects.
|
||||
#[instrument(skip_all)]
|
||||
fn iter<O: DatabaseObject>(&self) -> Result<impl Iterator<Item = Result<(Box<[u8]>, O)>>> {
|
||||
let cf = cf_handle::<O>(self)?;
|
||||
|
|
@ -178,7 +178,7 @@ impl Operations for RocksdbStorageEngine {
|
|||
}))
|
||||
}
|
||||
|
||||
// iter_raw iterates all objects without serialization.
|
||||
/// iter_raw iterates all objects without serialization.
|
||||
#[instrument(skip_all)]
|
||||
fn iter_raw<O: DatabaseObject>(
|
||||
&self,
|
||||
|
|
@ -192,7 +192,7 @@ impl Operations for RocksdbStorageEngine {
|
|||
}))
|
||||
}
|
||||
|
||||
// prefix_iter iterates all objects with prefix.
|
||||
/// prefix_iter iterates all objects with prefix.
|
||||
#[instrument(skip_all)]
|
||||
fn prefix_iter<O: DatabaseObject>(
|
||||
&self,
|
||||
|
|
@ -206,7 +206,7 @@ impl Operations for RocksdbStorageEngine {
|
|||
}))
|
||||
}
|
||||
|
||||
// prefix_iter_raw iterates all objects with prefix without serialization.
|
||||
/// prefix_iter_raw iterates all objects with prefix without serialization.
|
||||
#[instrument(skip_all)]
|
||||
fn prefix_iter_raw<O: DatabaseObject>(
|
||||
&self,
|
||||
|
|
@ -219,7 +219,7 @@ impl Operations for RocksdbStorageEngine {
|
|||
}))
|
||||
}
|
||||
|
||||
// batch_delete deletes objects by keys.
|
||||
/// batch_delete deletes objects by keys.
|
||||
#[instrument(skip_all)]
|
||||
fn batch_delete<O: DatabaseObject>(&self, keys: Vec<&[u8]>) -> Result<()> {
|
||||
let cf = cf_handle::<O>(self)?;
|
||||
|
|
@ -236,7 +236,7 @@ impl Operations for RocksdbStorageEngine {
|
|||
}
|
||||
}
|
||||
|
||||
// RocksdbStorageEngine implements the rocksdb of the storage engine.
|
||||
/// RocksdbStorageEngine implements the rocksdb of the storage engine.
|
||||
impl<'db> StorageEngine<'db> for RocksdbStorageEngine {}
|
||||
|
||||
/// cf_handle returns the column family handle for the given object.
|
||||
|
|
|
|||
|
|
@ -22,28 +22,28 @@ use std::path::Path;
|
|||
use std::str::FromStr;
|
||||
use tracing::instrument;
|
||||
|
||||
// SEPARATOR is the separator of digest.
|
||||
/// SEPARATOR is the separator of digest.
|
||||
pub const SEPARATOR: &str = ":";
|
||||
|
||||
// Algorithm is an enum of the algorithm that is used to generate digest.
|
||||
/// Algorithm is an enum of the algorithm that is used to generate digest.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum Algorithm {
|
||||
// Crc32 is crc32 algorithm for generate digest.
|
||||
/// Crc32 is crc32 algorithm for generate digest.
|
||||
Crc32,
|
||||
|
||||
// Blake3 is blake3 algorithm for generate digest.
|
||||
/// Blake3 is blake3 algorithm for generate digest.
|
||||
Blake3,
|
||||
|
||||
// Sha256 is sha256 algorithm for generate digest.
|
||||
/// Sha256 is sha256 algorithm for generate digest.
|
||||
Sha256,
|
||||
|
||||
// Sha512 is sha512 algorithm for generate digest.
|
||||
/// Sha512 is sha512 algorithm for generate digest.
|
||||
Sha512,
|
||||
}
|
||||
|
||||
// Algorithm implements the Display.
|
||||
/// Algorithm implements the Display.
|
||||
impl fmt::Display for Algorithm {
|
||||
// fmt formats the value using the given formatter.
|
||||
/// fmt formats the value using the given formatter.
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Algorithm::Crc32 => write!(f, "crc32"),
|
||||
|
|
@ -54,11 +54,11 @@ impl fmt::Display for Algorithm {
|
|||
}
|
||||
}
|
||||
|
||||
// Algorithm implements the FromStr.
|
||||
/// Algorithm implements the FromStr.
|
||||
impl FromStr for Algorithm {
|
||||
type Err = String;
|
||||
|
||||
// from_str parses an algorithm string.
|
||||
/// from_str parses an algorithm string.
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"crc32" => Ok(Algorithm::Crc32),
|
||||
|
|
@ -70,23 +70,23 @@ impl FromStr for Algorithm {
|
|||
}
|
||||
}
|
||||
|
||||
// Digest is a struct that is used to generate digest.
|
||||
/// Digest is a struct that is used to generate digest.
|
||||
pub struct Digest {
|
||||
// algorithm is the algorithm that is used to generate digest.
|
||||
/// algorithm is the algorithm that is used to generate digest.
|
||||
algorithm: Algorithm,
|
||||
|
||||
// encoded is the encoded digest.
|
||||
/// encoded is the encoded digest.
|
||||
encoded: String,
|
||||
}
|
||||
|
||||
// Digest implements the Digest.
|
||||
/// Digest implements the Digest.
|
||||
impl Digest {
|
||||
// new returns a new Digest.
|
||||
/// new returns a new Digest.
|
||||
pub fn new(algorithm: Algorithm, encoded: String) -> Self {
|
||||
Self { algorithm, encoded }
|
||||
}
|
||||
|
||||
// algorithm returns the algorithm of the digest.
|
||||
/// algorithm returns the algorithm of the digest.
|
||||
pub fn algorithm(&self) -> Algorithm {
|
||||
self.algorithm
|
||||
}
|
||||
|
|
@ -97,19 +97,19 @@ impl Digest {
|
|||
}
|
||||
}
|
||||
|
||||
// Digest implements the Display.
|
||||
/// Digest implements the Display.
|
||||
impl fmt::Display for Digest {
|
||||
// fmt formats the value using the given formatter.
|
||||
/// fmt formats the value using the given formatter.
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}{}{}", self.algorithm, SEPARATOR, self.encoded)
|
||||
}
|
||||
}
|
||||
|
||||
// Digest implements the FromStr.
|
||||
/// Digest implements the FromStr.
|
||||
impl FromStr for Digest {
|
||||
type Err = String;
|
||||
|
||||
// from_str parses a digest string.
|
||||
/// from_str parses a digest string.
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let parts: Vec<&str> = s.splitn(2, SEPARATOR).collect();
|
||||
if parts.len() != 2 {
|
||||
|
|
@ -128,7 +128,7 @@ impl FromStr for Digest {
|
|||
}
|
||||
}
|
||||
|
||||
// calculate_file_hash calculates the hash of a file.
|
||||
/// calculate_file_hash calculates the hash of a file.
|
||||
#[instrument(skip_all)]
|
||||
pub fn calculate_file_hash(algorithm: Algorithm, path: &Path) -> ClientResult<Digest> {
|
||||
let f = std::fs::File::open(path)?;
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ use reqwest::header::{HeaderMap, HeaderValue};
|
|||
use std::collections::HashMap;
|
||||
use tracing::{error, instrument};
|
||||
|
||||
// reqwest_headermap_to_hashmap converts a reqwest headermap to a hashmap.
|
||||
/// reqwest_headermap_to_hashmap converts a reqwest headermap to a hashmap.
|
||||
#[instrument(skip_all)]
|
||||
pub fn reqwest_headermap_to_hashmap(header: &HeaderMap<HeaderValue>) -> HashMap<String, String> {
|
||||
let mut hashmap: HashMap<String, String> = HashMap::new();
|
||||
|
|
@ -38,7 +38,7 @@ pub fn reqwest_headermap_to_hashmap(header: &HeaderMap<HeaderValue>) -> HashMap<
|
|||
hashmap
|
||||
}
|
||||
|
||||
// hashmap_to_reqwest_headermap converts a hashmap to a reqwest headermap.
|
||||
/// hashmap_to_reqwest_headermap converts a hashmap to a reqwest headermap.
|
||||
#[instrument(skip_all)]
|
||||
pub fn hashmap_to_reqwest_headermap(
|
||||
header: &HashMap<String, String>,
|
||||
|
|
@ -47,7 +47,7 @@ pub fn hashmap_to_reqwest_headermap(
|
|||
Ok(header)
|
||||
}
|
||||
|
||||
// hashmap_to_hyper_header_map converts a hashmap to a hyper header map.
|
||||
/// hashmap_to_hyper_header_map converts a hashmap to a hyper header map.
|
||||
#[instrument(skip_all)]
|
||||
pub fn hashmap_to_hyper_header_map(
|
||||
header: &HashMap<String, String>,
|
||||
|
|
@ -56,10 +56,10 @@ pub fn hashmap_to_hyper_header_map(
|
|||
Ok(header)
|
||||
}
|
||||
|
||||
// TODO: Remove the conversion after the http crate version is the same.
|
||||
// Convert the Reqwest header to the Hyper header, because of the http crate
|
||||
// version is different. Reqwest header depends on the http crate
|
||||
// version 0.2, but the Hyper header depends on the http crate version 0.1.
|
||||
/// TODO: Remove the conversion after the http crate version is the same.
|
||||
/// Convert the Reqwest header to the Hyper header, because of the http crate
|
||||
/// version is different. Reqwest header depends on the http crate
|
||||
/// version 0.2, but the Hyper header depends on the http crate version 0.1.
|
||||
#[instrument(skip_all)]
|
||||
pub fn hyper_headermap_to_reqwest_headermap(
|
||||
hyper_header: &hyper::header::HeaderMap,
|
||||
|
|
@ -95,7 +95,7 @@ pub fn hyper_headermap_to_reqwest_headermap(
|
|||
reqwest_header
|
||||
}
|
||||
|
||||
// header_vec_to_hashmap converts a vector of header string to a hashmap.
|
||||
/// header_vec_to_hashmap converts a vector of header string to a hashmap.
|
||||
#[instrument(skip_all)]
|
||||
pub fn header_vec_to_hashmap(raw_header: Vec<String>) -> Result<HashMap<String, String>> {
|
||||
let mut header = HashMap::new();
|
||||
|
|
@ -109,7 +109,7 @@ pub fn header_vec_to_hashmap(raw_header: Vec<String>) -> Result<HashMap<String,
|
|||
Ok(header)
|
||||
}
|
||||
|
||||
// header_vec_to_reqwest_headermap converts a vector of header string to a reqwest headermap.
|
||||
/// header_vec_to_reqwest_headermap converts a vector of header string to a reqwest headermap.
|
||||
#[instrument(skip_all)]
|
||||
pub fn header_vec_to_reqwest_headermap(
|
||||
raw_header: Vec<String>,
|
||||
|
|
@ -117,7 +117,7 @@ pub fn header_vec_to_reqwest_headermap(
|
|||
hashmap_to_reqwest_headermap(&header_vec_to_hashmap(raw_header)?)
|
||||
}
|
||||
|
||||
// get_range gets the range from http header.
|
||||
/// get_range gets the range from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_range(header: &HeaderMap, content_length: u64) -> Result<Option<Range>> {
|
||||
match header.get(reqwest::header::RANGE) {
|
||||
|
|
@ -129,9 +129,9 @@ pub fn get_range(header: &HeaderMap, content_length: u64) -> Result<Option<Range
|
|||
}
|
||||
}
|
||||
|
||||
// parse_range_header parses a Range header string as per RFC 7233,
|
||||
// supported Range Header: "Range": "bytes=100-200", "Range": "bytes=-50",
|
||||
// "Range": "bytes=150-", "Range": "bytes=0-0,-1".
|
||||
/// parse_range_header parses a Range header string as per RFC 7233,
|
||||
/// supported Range Header: "Range": "bytes=100-200", "Range": "bytes=-50",
|
||||
/// "Range": "bytes=150-", "Range": "bytes=0-0,-1".
|
||||
#[instrument(skip_all)]
|
||||
pub fn parse_range_header(range_header_value: &str, content_length: u64) -> Result<Range> {
|
||||
let parsed_ranges =
|
||||
|
|
|
|||
|
|
@ -25,31 +25,31 @@ use tracing::instrument;
|
|||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
// SEED_PEER_KEY is the key of the seed peer.
|
||||
/// SEED_PEER_KEY is the key of the seed peer.
|
||||
const SEED_PEER_KEY: &str = "seed";
|
||||
|
||||
// CACHE_KEY is the key of the cache.
|
||||
/// CACHE_KEY is the key of the cache.
|
||||
const CACHE_KEY: &str = "cache";
|
||||
|
||||
// PERSISTENT_CACHE_KEY is the key of the persistent cache.
|
||||
/// PERSISTENT_CACHE_KEY is the key of the persistent cache.
|
||||
const PERSISTENT_CACHE_KEY: &str = "persistent";
|
||||
|
||||
// IDGenerator is used to generate the id for the resources.
|
||||
/// IDGenerator is used to generate the id for the resources.
|
||||
#[derive(Debug)]
|
||||
pub struct IDGenerator {
|
||||
// ip is the ip of the host.
|
||||
/// ip is the ip of the host.
|
||||
ip: String,
|
||||
|
||||
// hostname is the hostname of the host.
|
||||
/// hostname is the hostname of the host.
|
||||
hostname: String,
|
||||
|
||||
// is_seed_peer indicates whether the host is a seed peer.
|
||||
/// is_seed_peer indicates whether the host is a seed peer.
|
||||
is_seed_peer: bool,
|
||||
}
|
||||
|
||||
// IDGenerator implements the IDGenerator.
|
||||
/// IDGenerator implements the IDGenerator.
|
||||
impl IDGenerator {
|
||||
// new creates a new IDGenerator.
|
||||
/// new creates a new IDGenerator.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(ip: String, hostname: String, is_seed_peer: bool) -> Self {
|
||||
IDGenerator {
|
||||
|
|
@ -59,7 +59,7 @@ impl IDGenerator {
|
|||
}
|
||||
}
|
||||
|
||||
// host_id generates the host id.
|
||||
/// host_id generates the host id.
|
||||
#[instrument(skip_all)]
|
||||
pub fn host_id(&self) -> String {
|
||||
if self.is_seed_peer {
|
||||
|
|
@ -69,7 +69,7 @@ impl IDGenerator {
|
|||
format!("{}-{}", self.ip, self.hostname)
|
||||
}
|
||||
|
||||
// task_id generates the task id.
|
||||
/// task_id generates the task id.
|
||||
#[instrument(skip_all)]
|
||||
pub fn task_id(
|
||||
&self,
|
||||
|
|
@ -113,7 +113,7 @@ impl IDGenerator {
|
|||
Ok(hex::encode(hasher.finalize()))
|
||||
}
|
||||
|
||||
// cache_task_id generates the cache task id.
|
||||
/// cache_task_id generates the cache task id.
|
||||
#[instrument(skip_all)]
|
||||
pub fn cache_task_id(
|
||||
&self,
|
||||
|
|
@ -142,7 +142,7 @@ impl IDGenerator {
|
|||
Ok(hasher.finalize().to_hex().to_string())
|
||||
}
|
||||
|
||||
// peer_id generates the peer id.
|
||||
/// peer_id generates the peer id.
|
||||
#[instrument(skip_all)]
|
||||
pub fn peer_id(&self) -> String {
|
||||
if self.is_seed_peer {
|
||||
|
|
@ -158,7 +158,7 @@ impl IDGenerator {
|
|||
format!("{}-{}-{}", self.ip, self.hostname, Uuid::new_v4())
|
||||
}
|
||||
|
||||
// cache_peer_id generates the cache peer id.
|
||||
/// cache_peer_id generates the cache peer id.
|
||||
#[instrument(skip_all)]
|
||||
pub fn cache_peer_id(&self, persistent: bool) -> String {
|
||||
if persistent {
|
||||
|
|
@ -181,7 +181,7 @@ impl IDGenerator {
|
|||
)
|
||||
}
|
||||
|
||||
// task_type generates the task type by the task id.
|
||||
/// task_type generates the task type by the task id.
|
||||
#[instrument(skip_all)]
|
||||
pub fn task_type(&self, id: &str) -> TaskType {
|
||||
if id.contains(CACHE_KEY) {
|
||||
|
|
|
|||
|
|
@ -24,22 +24,22 @@ use std::vec::Vec;
|
|||
use std::{fs, io};
|
||||
use tracing::instrument;
|
||||
|
||||
// NoVerifier is a verifier that does not verify the server certificate.
|
||||
// It is used for testing and should not be used in production.
|
||||
/// NoVerifier is a verifier that does not verify the server certificate.
|
||||
/// It is used for testing and should not be used in production.
|
||||
#[derive(Debug)]
|
||||
pub struct NoVerifier(Arc<rustls::crypto::CryptoProvider>);
|
||||
|
||||
// Implement the NoVerifier.
|
||||
/// Implement the NoVerifier.
|
||||
impl NoVerifier {
|
||||
// new creates a new NoVerifier.
|
||||
/// new creates a new NoVerifier.
|
||||
pub fn new() -> Arc<Self> {
|
||||
Arc::new(Self(Arc::new(rustls::crypto::ring::default_provider())))
|
||||
}
|
||||
}
|
||||
|
||||
// Implement the ServerCertVerifier trait for NoVerifier.
|
||||
/// Implement the ServerCertVerifier trait for NoVerifier.
|
||||
impl rustls::client::danger::ServerCertVerifier for NoVerifier {
|
||||
// verify_server_cert verifies the server certificate.
|
||||
/// verify_server_cert verifies the server certificate.
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
_end_entity: &CertificateDer<'_>,
|
||||
|
|
@ -51,7 +51,7 @@ impl rustls::client::danger::ServerCertVerifier for NoVerifier {
|
|||
Ok(rustls::client::danger::ServerCertVerified::assertion())
|
||||
}
|
||||
|
||||
// verify_tls12_signature verifies the TLS 1.2 signature.
|
||||
/// verify_tls12_signature verifies the TLS 1.2 signature.
|
||||
fn verify_tls12_signature(
|
||||
&self,
|
||||
message: &[u8],
|
||||
|
|
@ -66,7 +66,7 @@ impl rustls::client::danger::ServerCertVerifier for NoVerifier {
|
|||
)
|
||||
}
|
||||
|
||||
// verify_tls13_signature verifies the TLS 1.3 signature.
|
||||
/// verify_tls13_signature verifies the TLS 1.3 signature.
|
||||
fn verify_tls13_signature(
|
||||
&self,
|
||||
message: &[u8],
|
||||
|
|
@ -81,15 +81,15 @@ impl rustls::client::danger::ServerCertVerifier for NoVerifier {
|
|||
)
|
||||
}
|
||||
|
||||
// supported_verify_schemes returns the supported signature schemes.
|
||||
/// supported_verify_schemes returns the supported signature schemes.
|
||||
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
|
||||
self.0.signature_verification_algorithms.supported_schemes()
|
||||
}
|
||||
}
|
||||
|
||||
// Generate a CA certificate from PEM format files.
|
||||
// Generate CA by openssl with PEM format files:
|
||||
// openssl req -x509 -sha256 -days 36500 -nodes -newkey rsa:4096 -keyout ca.key -out ca.crt
|
||||
/// Generate a CA certificate from PEM format files.
|
||||
/// Generate CA by openssl with PEM format files:
|
||||
/// openssl req -x509 -sha256 -days 36500 -nodes -newkey rsa:4096 -keyout ca.key -out ca.crt
|
||||
#[instrument(skip_all)]
|
||||
pub fn generate_ca_cert_from_pem(
|
||||
ca_cert_path: &PathBuf,
|
||||
|
|
@ -110,7 +110,7 @@ pub fn generate_ca_cert_from_pem(
|
|||
Ok(ca_cert)
|
||||
}
|
||||
|
||||
// Generate certificates from PEM format files.
|
||||
/// Generate certificates from PEM format files.
|
||||
#[instrument(skip_all)]
|
||||
pub fn generate_certs_from_pem(cert_path: &PathBuf) -> ClientResult<Vec<CertificateDer<'static>>> {
|
||||
let f = fs::File::open(cert_path)?;
|
||||
|
|
@ -119,8 +119,8 @@ pub fn generate_certs_from_pem(cert_path: &PathBuf) -> ClientResult<Vec<Certific
|
|||
Ok(certs)
|
||||
}
|
||||
|
||||
// generate_self_signed_certs_by_ca_cert generates a self-signed certificates
|
||||
// by given subject alternative names with CA certificate.
|
||||
/// generate_self_signed_certs_by_ca_cert generates a self-signed certificates
|
||||
/// by given subject alternative names with CA certificate.
|
||||
#[instrument(skip_all)]
|
||||
pub fn generate_self_signed_certs_by_ca_cert(
|
||||
ca_cert: &Certificate,
|
||||
|
|
@ -146,7 +146,7 @@ pub fn generate_self_signed_certs_by_ca_cert(
|
|||
Ok((certs, key))
|
||||
}
|
||||
|
||||
// generate_simple_self_signed_certs generates a simple self-signed certificates
|
||||
/// generate_simple_self_signed_certs generates a simple self-signed certificates
|
||||
#[instrument(skip_all)]
|
||||
pub fn generate_simple_self_signed_certs(
|
||||
subject_alt_names: impl Into<Vec<String>>,
|
||||
|
|
@ -162,7 +162,7 @@ pub fn generate_simple_self_signed_certs(
|
|||
Ok((certs, key))
|
||||
}
|
||||
|
||||
// certs_to_raw_certs converts DER format of the certificates to raw certificates.
|
||||
/// certs_to_raw_certs converts DER format of the certificates to raw certificates.
|
||||
#[instrument(skip_all)]
|
||||
pub fn certs_to_raw_certs(certs: Vec<CertificateDer<'static>>) -> Vec<Vec<u8>> {
|
||||
certs
|
||||
|
|
@ -171,7 +171,7 @@ pub fn certs_to_raw_certs(certs: Vec<CertificateDer<'static>>) -> Vec<Vec<u8>> {
|
|||
.collect()
|
||||
}
|
||||
|
||||
// raw_certs_to_certs converts raw certificates to DER format of certificates.
|
||||
/// raw_certs_to_certs converts raw certificates to DER format of certificates.
|
||||
#[instrument(skip_all)]
|
||||
pub fn raw_certs_to_certs(raw_certs: Vec<Vec<u8>>) -> Vec<CertificateDer<'static>> {
|
||||
raw_certs.into_iter().map(|cert| cert.into()).collect()
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ use std::env;
|
|||
use std::process::Command;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
// git_commit_hash returns the short hash of the current git commit.
|
||||
/// git_commit_hash returns the short hash of the current git commit.
|
||||
fn git_commit_hash() -> String {
|
||||
if let Ok(output) = Command::new("git")
|
||||
.args(["rev-parse", "--short", "HEAD"])
|
||||
|
|
|
|||
|
|
@ -31,24 +31,24 @@ use sysinfo::System;
|
|||
use tokio::sync::mpsc;
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
// ManagerAnnouncer is used to announce the dfdaemon information to the manager.
|
||||
/// ManagerAnnouncer is used to announce the dfdaemon information to the manager.
|
||||
pub struct ManagerAnnouncer {
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// manager_client is the grpc client of the manager.
|
||||
/// manager_client is the grpc client of the manager.
|
||||
manager_client: Arc<ManagerClient>,
|
||||
|
||||
// shutdown is used to shutdown the announcer.
|
||||
/// shutdown is used to shutdown the announcer.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
// _shutdown_complete is used to notify the announcer is shutdown.
|
||||
/// _shutdown_complete is used to notify the announcer is shutdown.
|
||||
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
// ManagerAnnouncer implements the manager announcer of the dfdaemon.
|
||||
/// ManagerAnnouncer implements the manager announcer of the dfdaemon.
|
||||
impl ManagerAnnouncer {
|
||||
// new creates a new manager announcer.
|
||||
/// new creates a new manager announcer.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -64,7 +64,7 @@ impl ManagerAnnouncer {
|
|||
}
|
||||
}
|
||||
|
||||
// run announces the dfdaemon information to the manager.
|
||||
/// run announces the dfdaemon information to the manager.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) -> Result<()> {
|
||||
// Clone the shutdown channel.
|
||||
|
|
@ -110,27 +110,27 @@ impl ManagerAnnouncer {
|
|||
}
|
||||
}
|
||||
|
||||
// Announcer is used to announce the dfdaemon information to the manager and scheduler.
|
||||
/// Announcer is used to announce the dfdaemon information to the manager and scheduler.
|
||||
pub struct SchedulerAnnouncer {
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// host_id is the id of the host.
|
||||
/// host_id is the id of the host.
|
||||
host_id: String,
|
||||
|
||||
// scheduler_client is the grpc client of the scheduler.
|
||||
/// scheduler_client is the grpc client of the scheduler.
|
||||
scheduler_client: Arc<SchedulerClient>,
|
||||
|
||||
// shutdown is used to shutdown the announcer.
|
||||
/// shutdown is used to shutdown the announcer.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
// _shutdown_complete is used to notify the announcer is shutdown.
|
||||
/// _shutdown_complete is used to notify the announcer is shutdown.
|
||||
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
// SchedulerAnnouncer implements the scheduler announcer of the dfdaemon.
|
||||
/// SchedulerAnnouncer implements the scheduler announcer of the dfdaemon.
|
||||
impl SchedulerAnnouncer {
|
||||
// new creates a new scheduler announcer.
|
||||
/// new creates a new scheduler announcer.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -155,7 +155,7 @@ impl SchedulerAnnouncer {
|
|||
Ok(announcer)
|
||||
}
|
||||
|
||||
// run announces the dfdaemon information to the scheduler.
|
||||
/// run announces the dfdaemon information to the scheduler.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
|
|
@ -193,7 +193,7 @@ impl SchedulerAnnouncer {
|
|||
}
|
||||
}
|
||||
|
||||
// make_announce_host_request makes the announce host request.
|
||||
/// make_announce_host_request makes the announce host request.
|
||||
#[instrument(skip_all)]
|
||||
fn make_announce_host_request(&self) -> Result<AnnounceHostRequest> {
|
||||
// If the seed peer is enabled, we should announce the seed peer to the scheduler.
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ use tracing::{error, info};
|
|||
|
||||
use super::*;
|
||||
|
||||
// ExportCommand is the subcommand of export.
|
||||
/// ExportCommand is the subcommand of export.
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
pub struct ExportCommand {
|
||||
#[arg(help = "Specify the cache task ID to export")]
|
||||
|
|
@ -67,9 +67,9 @@ pub struct ExportCommand {
|
|||
timeout: Duration,
|
||||
}
|
||||
|
||||
// Implement the execute for ExportCommand.
|
||||
/// Implement the execute for ExportCommand.
|
||||
impl ExportCommand {
|
||||
// execute executes the export command.
|
||||
/// execute executes the export command.
|
||||
pub async fn execute(&self, endpoint: &Path) -> Result<()> {
|
||||
// Validate the command line arguments.
|
||||
if let Err(err) = self.validate_args() {
|
||||
|
|
@ -358,7 +358,7 @@ impl ExportCommand {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// run runs the export command.
|
||||
/// run runs the export command.
|
||||
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
|
||||
// Get the absolute path of the output file.
|
||||
let absolute_path = Path::new(&self.output).absolutize()?;
|
||||
|
|
@ -428,7 +428,7 @@ impl ExportCommand {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// validate_args validates the command line arguments.
|
||||
/// validate_args validates the command line arguments.
|
||||
fn validate_args(&self) -> Result<()> {
|
||||
let absolute_path = Path::new(&self.output).absolutize()?;
|
||||
match absolute_path.parent() {
|
||||
|
|
|
|||
|
|
@ -28,10 +28,10 @@ use termion::{color, style};
|
|||
|
||||
use super::*;
|
||||
|
||||
// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar.
|
||||
/// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar.
|
||||
const DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL: Duration = Duration::from_millis(80);
|
||||
|
||||
// ImportCommand is the subcommand of import.
|
||||
/// ImportCommand is the subcommand of import.
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
pub struct ImportCommand {
|
||||
#[arg(help = "Specify the path of the file to import")]
|
||||
|
|
@ -75,9 +75,9 @@ pub struct ImportCommand {
|
|||
timeout: Duration,
|
||||
}
|
||||
|
||||
// Implement the execute for ImportCommand.
|
||||
/// Implement the execute for ImportCommand.
|
||||
impl ImportCommand {
|
||||
// execute executes the import sub command.
|
||||
/// execute executes the import sub command.
|
||||
pub async fn execute(&self, endpoint: &Path) -> Result<()> {
|
||||
// Validate the command line arguments.
|
||||
if let Err(err) = self.validate_args() {
|
||||
|
|
@ -257,7 +257,7 @@ impl ImportCommand {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// run runs the import sub command.
|
||||
/// run runs the import sub command.
|
||||
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
|
||||
let pb = ProgressBar::new_spinner();
|
||||
pb.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL);
|
||||
|
|
@ -288,7 +288,7 @@ impl ImportCommand {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// validate_args validates the command line arguments.
|
||||
/// validate_args validates the command line arguments.
|
||||
fn validate_args(&self) -> Result<()> {
|
||||
if self.path.is_dir() {
|
||||
return Err(Error::ValidationError(format!(
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ pub enum Command {
|
|||
Remove(remove::RemoveCommand),
|
||||
}
|
||||
|
||||
// Implement the execute for Command.
|
||||
/// Implement the execute for Command.
|
||||
impl Command {
|
||||
#[allow(unused)]
|
||||
pub async fn execute(self, endpoint: &Path) -> Result<()> {
|
||||
|
|
@ -154,7 +154,7 @@ async fn main() -> anyhow::Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health.
|
||||
/// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health.
|
||||
pub async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownloadClient> {
|
||||
// Check dfdaemon's health.
|
||||
let health_client = HealthClient::new_unix(endpoint.clone()).await?;
|
||||
|
|
|
|||
|
|
@ -24,19 +24,19 @@ use termion::{color, style};
|
|||
|
||||
use super::*;
|
||||
|
||||
// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar.
|
||||
/// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar.
|
||||
const DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL: Duration = Duration::from_millis(80);
|
||||
|
||||
// RemoveCommand is the subcommand of remove.
|
||||
/// RemoveCommand is the subcommand of remove.
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
pub struct RemoveCommand {
|
||||
#[arg(help = "Specify the cache task ID to remove")]
|
||||
id: String,
|
||||
}
|
||||
|
||||
// Implement the execute for RemoveCommand.
|
||||
/// Implement the execute for RemoveCommand.
|
||||
impl RemoveCommand {
|
||||
// execute executes the delete command.
|
||||
/// execute executes the delete command.
|
||||
pub async fn execute(&self, endpoint: &Path) -> Result<()> {
|
||||
// Get dfdaemon download client.
|
||||
let dfdaemon_download_client =
|
||||
|
|
@ -178,7 +178,7 @@ impl RemoveCommand {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// run runs the delete command.
|
||||
/// run runs the delete command.
|
||||
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
|
||||
let pb = ProgressBar::new_spinner();
|
||||
pb.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL);
|
||||
|
|
|
|||
|
|
@ -32,16 +32,16 @@ use termion::{color, style};
|
|||
|
||||
use super::*;
|
||||
|
||||
// StatCommand is the subcommand of stat.
|
||||
/// StatCommand is the subcommand of stat.
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
pub struct StatCommand {
|
||||
#[arg(help = "Specify the cache task ID to stat")]
|
||||
id: String,
|
||||
}
|
||||
|
||||
// Implement the execute for StatCommand.
|
||||
/// Implement the execute for StatCommand.
|
||||
impl StatCommand {
|
||||
// execute executes the stat command.
|
||||
/// execute executes the stat command.
|
||||
pub async fn execute(&self, endpoint: &Path) -> Result<()> {
|
||||
// Get dfdaemon download client.
|
||||
let dfdaemon_download_client =
|
||||
|
|
@ -183,7 +183,7 @@ impl StatCommand {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// run runs the stat command.
|
||||
/// run runs the stat command.
|
||||
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
|
||||
let task = dfdaemon_download_client
|
||||
.stat_cache_task(StatCacheTaskRequest {
|
||||
|
|
|
|||
|
|
@ -546,7 +546,7 @@ async fn main() -> anyhow::Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// run runs the dfget command.
|
||||
/// run runs the dfget command.
|
||||
async fn run(mut args: Args, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
|
||||
// Get the absolute path of the output file.
|
||||
args.output = Path::new(&args.output).absolutize()?.into();
|
||||
|
|
@ -567,7 +567,7 @@ async fn run(mut args: Args, dfdaemon_download_client: DfdaemonDownloadClient) -
|
|||
download(args, ProgressBar::new(0), dfdaemon_download_client).await
|
||||
}
|
||||
|
||||
// download_dir downloads all files in the directory.
|
||||
/// download_dir downloads all files in the directory.
|
||||
async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Result<()> {
|
||||
// Initalize the object storage.
|
||||
let object_storage = Some(ObjectStorage {
|
||||
|
|
@ -657,7 +657,7 @@ async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Re
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// download downloads the single file.
|
||||
/// download downloads the single file.
|
||||
async fn download(
|
||||
args: Args,
|
||||
progress_bar: ProgressBar,
|
||||
|
|
@ -759,7 +759,7 @@ async fn download(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// get_entries gets all entries in the directory.
|
||||
/// get_entries gets all entries in the directory.
|
||||
async fn get_entries(args: Args, object_storage: Option<ObjectStorage>) -> Result<Vec<DirEntry>> {
|
||||
// Initialize backend factory and build backend.
|
||||
let backend_factory = BackendFactory::new(None)?;
|
||||
|
|
@ -818,7 +818,7 @@ async fn get_entries(args: Args, object_storage: Option<ObjectStorage>) -> Resul
|
|||
Ok(response.entries)
|
||||
}
|
||||
|
||||
// make_output_by_entry makes the output path by the entry information.
|
||||
/// make_output_by_entry makes the output path by the entry information.
|
||||
fn make_output_by_entry(url: Url, output: &Path, entry: DirEntry) -> Result<PathBuf> {
|
||||
// Get the root directory of the download directory and the output root directory.
|
||||
let root_dir = url.path().to_string();
|
||||
|
|
@ -836,7 +836,7 @@ fn make_output_by_entry(url: Url, output: &Path, entry: DirEntry) -> Result<Path
|
|||
.into())
|
||||
}
|
||||
|
||||
// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health.
|
||||
/// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health.
|
||||
async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownloadClient> {
|
||||
// Check dfdaemon's health.
|
||||
let health_client = HealthClient::new_unix(endpoint.clone()).await?;
|
||||
|
|
@ -847,7 +847,7 @@ async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownl
|
|||
Ok(dfdaemon_download_client)
|
||||
}
|
||||
|
||||
// validate_args validates the command line arguments.
|
||||
/// validate_args validates the command line arguments.
|
||||
fn validate_args(args: &Args) -> Result<()> {
|
||||
// If the URL is a directory, the output path should be a directory.
|
||||
if args.url.path().ends_with('/') && !args.output.is_dir() {
|
||||
|
|
|
|||
|
|
@ -95,11 +95,11 @@ pub enum Command {
|
|||
Remove(RemoveCommand),
|
||||
}
|
||||
|
||||
// Download or upload files using object storage in Dragonfly.
|
||||
/// Download or upload files using object storage in Dragonfly.
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
pub struct CopyCommand {}
|
||||
|
||||
// Remove a file from Dragonfly object storage.
|
||||
/// Remove a file from Dragonfly object storage.
|
||||
#[derive(Debug, Clone, Parser)]
|
||||
pub struct RemoveCommand {}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,43 +27,43 @@ use tokio::sync::{mpsc, Mutex, RwLock};
|
|||
use tonic_health::pb::health_check_response::ServingStatus;
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
// Data is the dynamic configuration of the dfdaemon.
|
||||
/// Data is the dynamic configuration of the dfdaemon.
|
||||
#[derive(Default)]
|
||||
pub struct Data {
|
||||
// schedulers is the schedulers of the dfdaemon.
|
||||
/// schedulers is the schedulers of the dfdaemon.
|
||||
pub schedulers: ListSchedulersResponse,
|
||||
|
||||
// available_schedulers is the available schedulers of the dfdaemon.
|
||||
/// available_schedulers is the available schedulers of the dfdaemon.
|
||||
pub available_schedulers: Vec<Scheduler>,
|
||||
|
||||
// available_scheduler_cluster_id is the id of the available scheduler cluster of the dfdaemon.
|
||||
/// available_scheduler_cluster_id is the id of the available scheduler cluster of the dfdaemon.
|
||||
pub available_scheduler_cluster_id: Option<u64>,
|
||||
}
|
||||
|
||||
// Dynconfig supports dynamic configuration of the client.
|
||||
/// Dynconfig supports dynamic configuration of the client.
|
||||
pub struct Dynconfig {
|
||||
// data is the dynamic configuration of the dfdaemon.
|
||||
/// data is the dynamic configuration of the dfdaemon.
|
||||
pub data: RwLock<Data>,
|
||||
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// manager_client is the grpc client of the manager.
|
||||
/// manager_client is the grpc client of the manager.
|
||||
manager_client: Arc<ManagerClient>,
|
||||
|
||||
// mutex is used to protect refresh.
|
||||
/// mutex is used to protect refresh.
|
||||
mutex: Mutex<()>,
|
||||
|
||||
// shutdown is used to shutdown the dynconfig.
|
||||
/// shutdown is used to shutdown the dynconfig.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
// _shutdown_complete is used to notify the dynconfig is shutdown.
|
||||
/// _shutdown_complete is used to notify the dynconfig is shutdown.
|
||||
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
// Dynconfig is the implementation of Dynconfig.
|
||||
/// Dynconfig is the implementation of Dynconfig.
|
||||
impl Dynconfig {
|
||||
// new creates a new Dynconfig.
|
||||
/// new creates a new Dynconfig.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -86,7 +86,7 @@ impl Dynconfig {
|
|||
Ok(dc)
|
||||
}
|
||||
|
||||
// run starts the dynconfig server.
|
||||
/// run starts the dynconfig server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
|
|
@ -110,7 +110,7 @@ impl Dynconfig {
|
|||
}
|
||||
}
|
||||
|
||||
// refresh refreshes the dynamic configuration of the dfdaemon.
|
||||
/// refresh refreshes the dynamic configuration of the dfdaemon.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn refresh(&self) -> Result<()> {
|
||||
// Only one refresh can be running at a time.
|
||||
|
|
@ -142,7 +142,7 @@ impl Dynconfig {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// list_schedulers lists the schedulers from the manager.
|
||||
/// list_schedulers lists the schedulers from the manager.
|
||||
#[instrument(skip_all)]
|
||||
async fn list_schedulers(&self) -> Result<ListSchedulersResponse> {
|
||||
// Get the source type.
|
||||
|
|
@ -166,7 +166,7 @@ impl Dynconfig {
|
|||
.await
|
||||
}
|
||||
|
||||
// get_available_schedulers gets the available schedulers.
|
||||
/// get_available_schedulers gets the available schedulers.
|
||||
#[instrument(skip_all)]
|
||||
async fn get_available_schedulers(&self, schedulers: &[Scheduler]) -> Result<Vec<Scheduler>> {
|
||||
let mut available_schedulers: Vec<Scheduler> = Vec::new();
|
||||
|
|
|
|||
|
|
@ -24,29 +24,29 @@ use std::sync::Arc;
|
|||
use tokio::sync::mpsc;
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
// GC is the garbage collector of dfdaemon.
|
||||
/// GC is the garbage collector of dfdaemon.
|
||||
pub struct GC {
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// host_id is the id of the host.
|
||||
/// host_id is the id of the host.
|
||||
host_id: String,
|
||||
|
||||
// storage is the local storage.
|
||||
/// storage is the local storage.
|
||||
storage: Arc<Storage>,
|
||||
|
||||
// scheduler_client is the grpc client of the scheduler.
|
||||
/// scheduler_client is the grpc client of the scheduler.
|
||||
scheduler_client: Arc<SchedulerClient>,
|
||||
|
||||
// shutdown is used to shutdown the garbage collector.
|
||||
/// shutdown is used to shutdown the garbage collector.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
// _shutdown_complete is used to notify the garbage collector is shutdown.
|
||||
/// _shutdown_complete is used to notify the garbage collector is shutdown.
|
||||
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
impl GC {
|
||||
// new creates a new GC.
|
||||
/// new creates a new GC.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -66,7 +66,7 @@ impl GC {
|
|||
}
|
||||
}
|
||||
|
||||
// run runs the garbage collector.
|
||||
/// run runs the garbage collector.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
|
|
@ -106,7 +106,7 @@ impl GC {
|
|||
}
|
||||
}
|
||||
|
||||
// evict_task_by_ttl evicts the task by ttl.
|
||||
/// evict_task_by_ttl evicts the task by ttl.
|
||||
#[instrument(skip_all)]
|
||||
async fn evict_task_by_ttl(&self) -> Result<()> {
|
||||
info!("start to evict by task ttl");
|
||||
|
|
@ -124,7 +124,7 @@ impl GC {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// evict_task_by_disk_usage evicts the task by disk usage.
|
||||
/// evict_task_by_disk_usage evicts the task by disk usage.
|
||||
#[instrument(skip_all)]
|
||||
async fn evict_task_by_disk_usage(&self) -> Result<()> {
|
||||
let stats = fs2::statvfs(self.config.storage.dir.as_path())?;
|
||||
|
|
@ -153,7 +153,7 @@ impl GC {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// evict_task_space evicts the task by the given space.
|
||||
/// evict_task_space evicts the task by the given space.
|
||||
#[instrument(skip_all)]
|
||||
async fn evict_task_space(&self, need_evict_space: u64) -> Result<()> {
|
||||
let mut tasks = self.storage.get_tasks()?;
|
||||
|
|
@ -190,7 +190,7 @@ impl GC {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// delete_task_from_scheduler deletes the task from the scheduler.
|
||||
/// delete_task_from_scheduler deletes the task from the scheduler.
|
||||
#[instrument(skip_all)]
|
||||
async fn delete_task_from_scheduler(&self, task: metadata::Task) {
|
||||
self.scheduler_client
|
||||
|
|
@ -204,7 +204,7 @@ impl GC {
|
|||
});
|
||||
}
|
||||
|
||||
// evict_cache_task_by_ttl evicts the cache task by ttl.
|
||||
/// evict_cache_task_by_ttl evicts the cache task by ttl.
|
||||
#[instrument(skip_all)]
|
||||
async fn evict_cache_task_by_ttl(&self) -> Result<()> {
|
||||
info!("start to evict by cache task ttl * 2");
|
||||
|
|
@ -222,7 +222,7 @@ impl GC {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// evict_cache_task_by_disk_usage evicts the cache task by disk usage.
|
||||
/// evict_cache_task_by_disk_usage evicts the cache task by disk usage.
|
||||
#[instrument(skip_all)]
|
||||
async fn evict_cache_task_by_disk_usage(&self) -> Result<()> {
|
||||
let stats = fs2::statvfs(self.config.storage.dir.as_path())?;
|
||||
|
|
@ -251,7 +251,7 @@ impl GC {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// evict_cache_task_space evicts the cache task by the given space.
|
||||
/// evict_cache_task_space evicts the cache task by the given space.
|
||||
#[instrument(skip_all)]
|
||||
async fn evict_cache_task_space(&self, need_evict_space: u64) -> Result<()> {
|
||||
let mut tasks = self.storage.get_cache_tasks()?;
|
||||
|
|
@ -286,7 +286,7 @@ impl GC {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// delete_cache_task_from_scheduler deletes the cache task from the scheduler.
|
||||
/// delete_cache_task_from_scheduler deletes the cache task from the scheduler.
|
||||
#[instrument(skip_all)]
|
||||
async fn delete_cache_task_from_scheduler(&self, task: metadata::CacheTask) {
|
||||
self.scheduler_client
|
||||
|
|
|
|||
|
|
@ -60,24 +60,24 @@ use tonic::{
|
|||
use tower::service_fn;
|
||||
use tracing::{error, info, instrument, Instrument, Span};
|
||||
|
||||
// DfdaemonDownloadServer is the grpc unix server of the download.
|
||||
/// DfdaemonDownloadServer is the grpc unix server of the download.
|
||||
pub struct DfdaemonDownloadServer {
|
||||
// socket_path is the path of the unix domain socket.
|
||||
/// socket_path is the path of the unix domain socket.
|
||||
socket_path: PathBuf,
|
||||
|
||||
// service is the grpc service of the dfdaemon.
|
||||
/// service is the grpc service of the dfdaemon.
|
||||
service: DfdaemonDownloadGRPCServer<DfdaemonDownloadServerHandler>,
|
||||
|
||||
// shutdown is used to shutdown the grpc server.
|
||||
/// shutdown is used to shutdown the grpc server.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
// _shutdown_complete is used to notify the grpc server is shutdown.
|
||||
/// _shutdown_complete is used to notify the grpc server is shutdown.
|
||||
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
// DfdaemonDownloadServer implements the grpc server of the download.
|
||||
/// DfdaemonDownloadServer implements the grpc server of the download.
|
||||
impl DfdaemonDownloadServer {
|
||||
// new creates a new DfdaemonServer.
|
||||
/// new creates a new DfdaemonServer.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
socket_path: PathBuf,
|
||||
|
|
@ -105,7 +105,7 @@ impl DfdaemonDownloadServer {
|
|||
}
|
||||
}
|
||||
|
||||
// run starts the download server with unix domain socket.
|
||||
/// run starts the download server with unix domain socket.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&mut self) {
|
||||
// Register the reflection service.
|
||||
|
|
@ -156,25 +156,25 @@ impl DfdaemonDownloadServer {
|
|||
}
|
||||
}
|
||||
|
||||
// DfdaemonDownloadServerHandler is the handler of the dfdaemon download grpc service.
|
||||
/// DfdaemonDownloadServerHandler is the handler of the dfdaemon download grpc service.
|
||||
pub struct DfdaemonDownloadServerHandler {
|
||||
// socket_path is the path of the unix domain socket.
|
||||
/// socket_path is the path of the unix domain socket.
|
||||
socket_path: PathBuf,
|
||||
|
||||
// task is the task manager.
|
||||
/// task is the task manager.
|
||||
task: Arc<task::Task>,
|
||||
|
||||
// cache_task is the cache task manager.
|
||||
/// cache_task is the cache task manager.
|
||||
cache_task: Arc<cache_task::CacheTask>,
|
||||
}
|
||||
|
||||
// DfdaemonDownloadServerHandler implements the dfdaemon download grpc service.
|
||||
/// DfdaemonDownloadServerHandler implements the dfdaemon download grpc service.
|
||||
#[tonic::async_trait]
|
||||
impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
||||
// DownloadTaskStream is the stream of the download task response.
|
||||
/// DownloadTaskStream is the stream of the download task response.
|
||||
type DownloadTaskStream = ReceiverStream<Result<DownloadTaskResponse, Status>>;
|
||||
|
||||
// download_task tells the dfdaemon to download the task.
|
||||
/// download_task tells the dfdaemon to download the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id))]
|
||||
async fn download_task(
|
||||
&self,
|
||||
|
|
@ -544,7 +544,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
Ok(Response::new(ReceiverStream::new(out_stream_rx)))
|
||||
}
|
||||
|
||||
// stat_task gets the status of the task.
|
||||
/// stat_task gets the status of the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
async fn stat_task(
|
||||
&self,
|
||||
|
|
@ -582,7 +582,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
Ok(Response::new(task))
|
||||
}
|
||||
|
||||
// delete_task calls the dfdaemon to delete the task.
|
||||
/// delete_task calls the dfdaemon to delete the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
async fn delete_task(
|
||||
&self,
|
||||
|
|
@ -619,7 +619,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
Ok(Response::new(()))
|
||||
}
|
||||
|
||||
// delete_host calls the scheduler to delete the host.
|
||||
/// delete_host calls the scheduler to delete the host.
|
||||
#[instrument(skip_all, fields(host_id))]
|
||||
async fn delete_host(&self, _: Request<()>) -> Result<Response<()>, Status> {
|
||||
// Generate the host id.
|
||||
|
|
@ -646,10 +646,10 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
Ok(Response::new(()))
|
||||
}
|
||||
|
||||
// DownloadCacheTaskStream is the stream of the download cache task response.
|
||||
/// DownloadCacheTaskStream is the stream of the download cache task response.
|
||||
type DownloadCacheTaskStream = ReceiverStream<Result<DownloadCacheTaskResponse, Status>>;
|
||||
|
||||
// download_cache_task downloads the cache task.
|
||||
/// download_cache_task downloads the cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id))]
|
||||
async fn download_cache_task(
|
||||
&self,
|
||||
|
|
@ -818,7 +818,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
Ok(Response::new(ReceiverStream::new(out_stream_rx)))
|
||||
}
|
||||
|
||||
// upload_cache_task uploads the cache task.
|
||||
/// upload_cache_task uploads the cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id))]
|
||||
async fn upload_cache_task(
|
||||
&self,
|
||||
|
|
@ -912,7 +912,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
Ok(Response::new(task))
|
||||
}
|
||||
|
||||
// stat_cache_task stats the cache task.
|
||||
/// stat_cache_task stats the cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
async fn stat_cache_task(
|
||||
&self,
|
||||
|
|
@ -949,7 +949,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
Ok(Response::new(task))
|
||||
}
|
||||
|
||||
// delete_cache_task deletes the cache task.
|
||||
/// delete_cache_task deletes the cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
async fn delete_cache_task(
|
||||
&self,
|
||||
|
|
@ -986,16 +986,16 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
|
|||
}
|
||||
}
|
||||
|
||||
// DfdaemonDownloadClient is a wrapper of DfdaemonDownloadGRPCClient.
|
||||
/// DfdaemonDownloadClient is a wrapper of DfdaemonDownloadGRPCClient.
|
||||
#[derive(Clone)]
|
||||
pub struct DfdaemonDownloadClient {
|
||||
// client is the grpc client of the dfdaemon.
|
||||
/// client is the grpc client of the dfdaemon.
|
||||
pub client: DfdaemonDownloadGRPCClient<Channel>,
|
||||
}
|
||||
|
||||
// DfdaemonDownloadClient implements the grpc client of the dfdaemon download.
|
||||
/// DfdaemonDownloadClient implements the grpc client of the dfdaemon download.
|
||||
impl DfdaemonDownloadClient {
|
||||
// new_unix creates a new DfdaemonDownloadClient with unix domain socket.
|
||||
/// new_unix creates a new DfdaemonDownloadClient with unix domain socket.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new_unix(socket_path: PathBuf) -> ClientResult<Self> {
|
||||
// Ignore the uri because it is not used.
|
||||
|
|
@ -1024,7 +1024,7 @@ impl DfdaemonDownloadClient {
|
|||
Ok(Self { client })
|
||||
}
|
||||
|
||||
// download_task tells the dfdaemon to download the task.
|
||||
/// download_task tells the dfdaemon to download the task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_task(
|
||||
&self,
|
||||
|
|
@ -1050,7 +1050,7 @@ impl DfdaemonDownloadClient {
|
|||
Ok(response)
|
||||
}
|
||||
|
||||
// stat_task gets the status of the task.
|
||||
/// stat_task gets the status of the task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn stat_task(&self, request: DfdaemonStatTaskRequest) -> ClientResult<Task> {
|
||||
let request = Self::make_request(request);
|
||||
|
|
@ -1058,7 +1058,7 @@ impl DfdaemonDownloadClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// delete_task tells the dfdaemon to delete the task.
|
||||
/// delete_task tells the dfdaemon to delete the task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete_task(&self, request: DeleteTaskRequest) -> ClientResult<()> {
|
||||
let request = Self::make_request(request);
|
||||
|
|
@ -1066,7 +1066,7 @@ impl DfdaemonDownloadClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// download_cache_task downloads the cache task.
|
||||
/// download_cache_task downloads the cache task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_cache_task(
|
||||
&self,
|
||||
|
|
@ -1090,7 +1090,7 @@ impl DfdaemonDownloadClient {
|
|||
Ok(response)
|
||||
}
|
||||
|
||||
// upload_cache_task uploads the cache task.
|
||||
/// upload_cache_task uploads the cache task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn upload_cache_task(
|
||||
&self,
|
||||
|
|
@ -1114,7 +1114,7 @@ impl DfdaemonDownloadClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// stat_cache_task stats the cache task.
|
||||
/// stat_cache_task stats the cache task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> ClientResult<CacheTask> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
|
|
@ -1124,7 +1124,7 @@ impl DfdaemonDownloadClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// delete_cache_task deletes the cache task.
|
||||
/// delete_cache_task deletes the cache task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> ClientResult<()> {
|
||||
let request = Self::make_request(request);
|
||||
|
|
@ -1132,7 +1132,7 @@ impl DfdaemonDownloadClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// make_request creates a new request with timeout.
|
||||
/// make_request creates a new request with timeout.
|
||||
#[instrument(skip_all)]
|
||||
fn make_request<T>(request: T) -> tonic::Request<T> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
|
|
|
|||
|
|
@ -54,24 +54,24 @@ use tonic::{
|
|||
};
|
||||
use tracing::{error, info, instrument, Instrument, Span};
|
||||
|
||||
// DfdaemonUploadServer is the grpc server of the upload.
|
||||
/// DfdaemonUploadServer is the grpc server of the upload.
|
||||
pub struct DfdaemonUploadServer {
|
||||
// addr is the address of the grpc server.
|
||||
/// addr is the address of the grpc server.
|
||||
addr: SocketAddr,
|
||||
|
||||
// service is the grpc service of the dfdaemon upload.
|
||||
/// service is the grpc service of the dfdaemon upload.
|
||||
service: DfdaemonUploadGRPCServer<DfdaemonUploadServerHandler>,
|
||||
|
||||
// shutdown is used to shutdown the grpc server.
|
||||
/// shutdown is used to shutdown the grpc server.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
// _shutdown_complete is used to notify the grpc server is shutdown.
|
||||
/// _shutdown_complete is used to notify the grpc server is shutdown.
|
||||
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
// DfdaemonUploadServer implements the grpc server of the upload.
|
||||
/// DfdaemonUploadServer implements the grpc server of the upload.
|
||||
impl DfdaemonUploadServer {
|
||||
// new creates a new DfdaemonUploadServer.
|
||||
/// new creates a new DfdaemonUploadServer.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -100,7 +100,7 @@ impl DfdaemonUploadServer {
|
|||
}
|
||||
}
|
||||
|
||||
// run starts the upload server.
|
||||
/// run starts the upload server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&mut self) {
|
||||
// Register the reflection service.
|
||||
|
|
@ -139,25 +139,25 @@ impl DfdaemonUploadServer {
|
|||
}
|
||||
}
|
||||
|
||||
// DfdaemonUploadServerHandler is the handler of the dfdaemon upload grpc service.
|
||||
/// DfdaemonUploadServerHandler is the handler of the dfdaemon upload grpc service.
|
||||
pub struct DfdaemonUploadServerHandler {
|
||||
// socket_path is the path of the unix domain socket.
|
||||
/// socket_path is the path of the unix domain socket.
|
||||
socket_path: PathBuf,
|
||||
|
||||
// task is the task manager.
|
||||
/// task is the task manager.
|
||||
task: Arc<task::Task>,
|
||||
|
||||
// cache_task is the cache task manager.
|
||||
/// cache_task is the cache task manager.
|
||||
cache_task: Arc<cache_task::CacheTask>,
|
||||
}
|
||||
|
||||
// DfdaemonUploadServerHandler implements the dfdaemon upload grpc service.
|
||||
/// DfdaemonUploadServerHandler implements the dfdaemon upload grpc service.
|
||||
#[tonic::async_trait]
|
||||
impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
||||
// DownloadTaskStream is the stream of the download task response.
|
||||
/// DownloadTaskStream is the stream of the download task response.
|
||||
type DownloadTaskStream = ReceiverStream<Result<DownloadTaskResponse, Status>>;
|
||||
|
||||
// download_task downloads the task.
|
||||
/// download_task downloads the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id))]
|
||||
async fn download_task(
|
||||
&self,
|
||||
|
|
@ -530,7 +530,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
Ok(Response::new(ReceiverStream::new(out_stream_rx)))
|
||||
}
|
||||
|
||||
// stat_task stats the task.
|
||||
/// stat_task stats the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
async fn stat_task(&self, request: Request<StatTaskRequest>) -> Result<Response<Task>, Status> {
|
||||
// Clone the request.
|
||||
|
|
@ -565,7 +565,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
Ok(Response::new(task))
|
||||
}
|
||||
|
||||
// delete_task deletes the task.
|
||||
/// delete_task deletes the task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
async fn delete_task(
|
||||
&self,
|
||||
|
|
@ -602,10 +602,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
Ok(Response::new(()))
|
||||
}
|
||||
|
||||
// SyncPiecesStream is the stream of the sync pieces response.
|
||||
/// SyncPiecesStream is the stream of the sync pieces response.
|
||||
type SyncPiecesStream = ReceiverStream<Result<SyncPiecesResponse, Status>>;
|
||||
|
||||
// sync_pieces provides the piece metadata for remote peer.
|
||||
/// sync_pieces provides the piece metadata for remote peer.
|
||||
#[instrument(skip_all, fields(host_id, remote_host_id, task_id))]
|
||||
async fn sync_pieces(
|
||||
&self,
|
||||
|
|
@ -734,7 +734,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
Ok(Response::new(ReceiverStream::new(out_stream_rx)))
|
||||
}
|
||||
|
||||
// download_piece provides the piece content for remote peer.
|
||||
/// download_piece provides the piece content for remote peer.
|
||||
#[instrument(skip_all, fields(host_id, remote_host_id, task_id, piece_id))]
|
||||
async fn download_piece(
|
||||
&self,
|
||||
|
|
@ -829,10 +829,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
}))
|
||||
}
|
||||
|
||||
// DownloadCacheTaskStream is the stream of the download cache task response.
|
||||
/// DownloadCacheTaskStream is the stream of the download cache task response.
|
||||
type DownloadCacheTaskStream = ReceiverStream<Result<DownloadCacheTaskResponse, Status>>;
|
||||
|
||||
// download_cache_task downloads the cache task.
|
||||
/// download_cache_task downloads the cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id, peer_id))]
|
||||
async fn download_cache_task(
|
||||
&self,
|
||||
|
|
@ -1001,7 +1001,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
Ok(Response::new(ReceiverStream::new(out_stream_rx)))
|
||||
}
|
||||
|
||||
// stat_cache_task stats the cache task.
|
||||
/// stat_cache_task stats the cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
async fn stat_cache_task(
|
||||
&self,
|
||||
|
|
@ -1038,7 +1038,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
Ok(Response::new(task))
|
||||
}
|
||||
|
||||
// delete_cache_task deletes the cache task.
|
||||
/// delete_cache_task deletes the cache task.
|
||||
#[instrument(skip_all, fields(host_id, task_id))]
|
||||
async fn delete_cache_task(
|
||||
&self,
|
||||
|
|
@ -1075,16 +1075,16 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
|
|||
}
|
||||
}
|
||||
|
||||
// DfdaemonUploadClient is a wrapper of DfdaemonUploadGRPCClient.
|
||||
/// DfdaemonUploadClient is a wrapper of DfdaemonUploadGRPCClient.
|
||||
#[derive(Clone)]
|
||||
pub struct DfdaemonUploadClient {
|
||||
// client is the grpc client of the dfdaemon upload.
|
||||
/// client is the grpc client of the dfdaemon upload.
|
||||
pub client: DfdaemonUploadGRPCClient<Channel>,
|
||||
}
|
||||
|
||||
// DfdaemonUploadClient implements the dfdaemon upload grpc client.
|
||||
/// DfdaemonUploadClient implements the dfdaemon upload grpc client.
|
||||
impl DfdaemonUploadClient {
|
||||
// new creates a new DfdaemonUploadClient.
|
||||
/// new creates a new DfdaemonUploadClient.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(addr: String) -> ClientResult<Self> {
|
||||
let channel = Channel::from_static(Box::leak(addr.clone().into_boxed_str()))
|
||||
|
|
@ -1106,7 +1106,7 @@ impl DfdaemonUploadClient {
|
|||
Ok(Self { client })
|
||||
}
|
||||
|
||||
// download_task downloads the task.
|
||||
/// download_task downloads the task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_task(
|
||||
&self,
|
||||
|
|
@ -1132,7 +1132,7 @@ impl DfdaemonUploadClient {
|
|||
Ok(response)
|
||||
}
|
||||
|
||||
// sync_pieces provides the piece metadata for remote peer.
|
||||
/// sync_pieces provides the piece metadata for remote peer.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn sync_pieces(
|
||||
&self,
|
||||
|
|
@ -1143,7 +1143,7 @@ impl DfdaemonUploadClient {
|
|||
Ok(response)
|
||||
}
|
||||
|
||||
// download_piece provides the piece content for remote peer.
|
||||
/// download_piece provides the piece content for remote peer.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_piece(
|
||||
&self,
|
||||
|
|
@ -1157,7 +1157,7 @@ impl DfdaemonUploadClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// download_cache_task downloads the cache task.
|
||||
/// download_cache_task downloads the cache task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_cache_task(
|
||||
&self,
|
||||
|
|
@ -1181,7 +1181,7 @@ impl DfdaemonUploadClient {
|
|||
Ok(response)
|
||||
}
|
||||
|
||||
// stat_cache_task stats the cache task.
|
||||
/// stat_cache_task stats the cache task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> ClientResult<CacheTask> {
|
||||
let request = Self::make_request(request);
|
||||
|
|
@ -1189,7 +1189,7 @@ impl DfdaemonUploadClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// delete_cache_task deletes the cache task.
|
||||
/// delete_cache_task deletes the cache task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> ClientResult<()> {
|
||||
let request = Self::make_request(request);
|
||||
|
|
@ -1197,7 +1197,7 @@ impl DfdaemonUploadClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// make_request creates a new request with timeout.
|
||||
/// make_request creates a new request with timeout.
|
||||
#[instrument(skip_all)]
|
||||
fn make_request<T>(request: T) -> tonic::Request<T> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
|
|
|
|||
|
|
@ -28,16 +28,16 @@ use tonic_health::pb::{
|
|||
use tower::service_fn;
|
||||
use tracing::{error, instrument};
|
||||
|
||||
// HealthClient is a wrapper of HealthGRPCClient.
|
||||
/// HealthClient is a wrapper of HealthGRPCClient.
|
||||
#[derive(Clone)]
|
||||
pub struct HealthClient {
|
||||
// client is the grpc client of the certificate.
|
||||
/// client is the grpc client of the certificate.
|
||||
client: HealthGRPCClient<Channel>,
|
||||
}
|
||||
|
||||
// HealthClient implements the grpc client of the health.
|
||||
/// HealthClient implements the grpc client of the health.
|
||||
impl HealthClient {
|
||||
// new creates a new HealthClient.
|
||||
/// new creates a new HealthClient.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(addr: &str) -> Result<Self> {
|
||||
let channel = Channel::from_shared(addr.to_string())
|
||||
|
|
@ -60,7 +60,7 @@ impl HealthClient {
|
|||
Ok(Self { client })
|
||||
}
|
||||
|
||||
// new_unix creates a new HealthClient with unix domain socket.
|
||||
/// new_unix creates a new HealthClient with unix domain socket.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new_unix(socket_path: PathBuf) -> Result<Self> {
|
||||
// Ignore the uri because it is not used.
|
||||
|
|
@ -86,7 +86,7 @@ impl HealthClient {
|
|||
Ok(Self { client })
|
||||
}
|
||||
|
||||
// check checks the health of the grpc service without service name.
|
||||
/// check checks the health of the grpc service without service name.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn check(&self) -> Result<HealthCheckResponse> {
|
||||
let request = Self::make_request(HealthCheckRequest {
|
||||
|
|
@ -96,7 +96,7 @@ impl HealthClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// check_service checks the health of the grpc service with service name.
|
||||
/// check_service checks the health of the grpc service with service name.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn check_service(&self, service: String) -> Result<HealthCheckResponse> {
|
||||
let request = Self::make_request(HealthCheckRequest { service });
|
||||
|
|
@ -104,21 +104,21 @@ impl HealthClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// check_dfdaemon_download checks the health of the dfdaemon download service.
|
||||
/// check_dfdaemon_download checks the health of the dfdaemon download service.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn check_dfdaemon_download(&self) -> Result<HealthCheckResponse> {
|
||||
self.check_service("dfdaemon.v2.DfdaemonDownload".to_string())
|
||||
.await
|
||||
}
|
||||
|
||||
// check_dfdaemon_upload checks the health of the dfdaemon upload service.
|
||||
/// check_dfdaemon_upload checks the health of the dfdaemon upload service.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn check_dfdaemon_upload(&self) -> Result<HealthCheckResponse> {
|
||||
self.check_service("dfdaemon.v2.DfdaemonUpload".to_string())
|
||||
.await
|
||||
}
|
||||
|
||||
// make_request creates a new request with timeout.
|
||||
/// make_request creates a new request with timeout.
|
||||
#[instrument(skip_all)]
|
||||
fn make_request<T>(request: T) -> tonic::Request<T> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
|
|
|
|||
|
|
@ -27,16 +27,16 @@ use tonic::transport::Channel;
|
|||
use tonic_health::pb::health_check_response::ServingStatus;
|
||||
use tracing::{error, info, instrument, warn};
|
||||
|
||||
// ManagerClient is a wrapper of ManagerGRPCClient.
|
||||
/// ManagerClient is a wrapper of ManagerGRPCClient.
|
||||
#[derive(Clone)]
|
||||
pub struct ManagerClient {
|
||||
// client is the grpc client of the manager.
|
||||
/// client is the grpc client of the manager.
|
||||
pub client: ManagerGRPCClient<Channel>,
|
||||
}
|
||||
|
||||
// ManagerClient implements the grpc client of the manager.
|
||||
/// ManagerClient implements the grpc client of the manager.
|
||||
impl ManagerClient {
|
||||
// new creates a new ManagerClient.
|
||||
/// new creates a new ManagerClient.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(addrs: Vec<String>) -> Result<Self> {
|
||||
// Find the available manager address.
|
||||
|
|
@ -91,7 +91,7 @@ impl ManagerClient {
|
|||
Ok(Self { client })
|
||||
}
|
||||
|
||||
// list_schedulers lists all schedulers that best match the client.
|
||||
/// list_schedulers lists all schedulers that best match the client.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn list_schedulers(
|
||||
&self,
|
||||
|
|
@ -102,7 +102,7 @@ impl ManagerClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// update_seed_peer updates the seed peer information.
|
||||
/// update_seed_peer updates the seed peer information.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn update_seed_peer(&self, request: UpdateSeedPeerRequest) -> Result<SeedPeer> {
|
||||
let request = Self::make_request(request);
|
||||
|
|
@ -110,7 +110,7 @@ impl ManagerClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// delete_seed_peer deletes the seed peer information.
|
||||
/// delete_seed_peer deletes the seed peer information.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete_seed_peer(&self, request: DeleteSeedPeerRequest) -> Result<()> {
|
||||
let request = Self::make_request(request);
|
||||
|
|
@ -118,7 +118,7 @@ impl ManagerClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// make_request creates a new request with timeout.
|
||||
/// make_request creates a new request with timeout.
|
||||
#[instrument(skip_all)]
|
||||
fn make_request<T>(request: T) -> tonic::Request<T> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
|
|
|
|||
|
|
@ -31,31 +31,31 @@ pub mod manager;
|
|||
pub mod scheduler;
|
||||
pub mod security;
|
||||
|
||||
// CONNECT_TIMEOUT is the timeout for GRPC connection.
|
||||
/// CONNECT_TIMEOUT is the timeout for GRPC connection.
|
||||
pub const CONNECT_TIMEOUT: Duration = Duration::from_secs(2);
|
||||
|
||||
// REQUEST_TIMEOUT is the timeout for GRPC requests.
|
||||
/// REQUEST_TIMEOUT is the timeout for GRPC requests.
|
||||
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
// TCP_KEEPALIVE is the keepalive duration for TCP connection.
|
||||
/// TCP_KEEPALIVE is the keepalive duration for TCP connection.
|
||||
pub const TCP_KEEPALIVE: Duration = Duration::from_secs(3600);
|
||||
|
||||
// HTTP2_KEEP_ALIVE_INTERVAL is the interval for HTTP2 keep alive.
|
||||
/// HTTP2_KEEP_ALIVE_INTERVAL is the interval for HTTP2 keep alive.
|
||||
pub const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(300);
|
||||
|
||||
// HTTP2_KEEP_ALIVE_TIMEOUT is the timeout for HTTP2 keep alive.
|
||||
/// HTTP2_KEEP_ALIVE_TIMEOUT is the timeout for HTTP2 keep alive.
|
||||
pub const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(20);
|
||||
|
||||
// MAX_FRAME_SIZE is the max frame size for GRPC, default is 12MB.
|
||||
/// MAX_FRAME_SIZE is the max frame size for GRPC, default is 12MB.
|
||||
pub const MAX_FRAME_SIZE: u32 = 12 * 1024 * 1024;
|
||||
|
||||
// INITIAL_WINDOW_SIZE is the initial window size for GRPC, default is 12MB.
|
||||
/// INITIAL_WINDOW_SIZE is the initial window size for GRPC, default is 12MB.
|
||||
pub const INITIAL_WINDOW_SIZE: u32 = 12 * 1024 * 1024;
|
||||
|
||||
// BUFFER_SIZE is the buffer size for GRPC, default is 64KB.
|
||||
/// BUFFER_SIZE is the buffer size for GRPC, default is 64KB.
|
||||
pub const BUFFER_SIZE: usize = 64 * 1024;
|
||||
|
||||
// prefetch_task prefetches the task if prefetch flag is true.
|
||||
/// prefetch_task prefetches the task if prefetch flag is true.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn prefetch_task(
|
||||
socket_path: PathBuf,
|
||||
|
|
|
|||
|
|
@ -37,40 +37,40 @@ use tokio::task::JoinSet;
|
|||
use tonic::transport::Channel;
|
||||
use tracing::{error, info, instrument, Instrument};
|
||||
|
||||
// VNode is the virtual node of the hashring.
|
||||
/// VNode is the virtual node of the hashring.
|
||||
#[derive(Debug, Copy, Clone, Hash, PartialEq)]
|
||||
struct VNode {
|
||||
// addr is the address of the virtual node.
|
||||
/// addr is the address of the virtual node.
|
||||
addr: SocketAddr,
|
||||
}
|
||||
|
||||
// VNode implements the Display trait.
|
||||
/// VNode implements the Display trait.
|
||||
impl std::fmt::Display for VNode {
|
||||
// fmt formats the virtual node.
|
||||
/// fmt formats the virtual node.
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.addr)
|
||||
}
|
||||
}
|
||||
|
||||
// SchedulerClient is a wrapper of SchedulerGRPCClient.
|
||||
/// SchedulerClient is a wrapper of SchedulerGRPCClient.
|
||||
#[derive(Clone)]
|
||||
pub struct SchedulerClient {
|
||||
// dynconfig is the dynamic configuration of the dfdaemon.
|
||||
/// dynconfig is the dynamic configuration of the dfdaemon.
|
||||
dynconfig: Arc<Dynconfig>,
|
||||
|
||||
// available_schedulers is the available schedulers.
|
||||
/// available_schedulers is the available schedulers.
|
||||
available_schedulers: Arc<RwLock<Vec<Scheduler>>>,
|
||||
|
||||
// available_scheduler_addrs is the addresses of available schedulers.
|
||||
/// available_scheduler_addrs is the addresses of available schedulers.
|
||||
available_scheduler_addrs: Arc<RwLock<Vec<SocketAddr>>>,
|
||||
|
||||
// hashring is the hashring of the scheduler.
|
||||
/// hashring is the hashring of the scheduler.
|
||||
hashring: Arc<RwLock<HashRing<VNode>>>,
|
||||
}
|
||||
|
||||
// SchedulerClient implements the grpc client of the scheduler.
|
||||
/// SchedulerClient implements the grpc client of the scheduler.
|
||||
impl SchedulerClient {
|
||||
// new creates a new SchedulerClient.
|
||||
/// new creates a new SchedulerClient.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(dynconfig: Arc<Dynconfig>) -> Result<Self> {
|
||||
let client = Self {
|
||||
|
|
@ -84,7 +84,7 @@ impl SchedulerClient {
|
|||
Ok(client)
|
||||
}
|
||||
|
||||
// announce_peer announces the peer to the scheduler.
|
||||
/// announce_peer announces the peer to the scheduler.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn announce_peer(
|
||||
&self,
|
||||
|
|
@ -100,7 +100,7 @@ impl SchedulerClient {
|
|||
Ok(response)
|
||||
}
|
||||
|
||||
// stat_peer gets the status of the peer.
|
||||
/// stat_peer gets the status of the peer.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn stat_peer(&self, request: StatPeerRequest) -> Result<Peer> {
|
||||
let task_id = request.task_id.clone();
|
||||
|
|
@ -113,7 +113,7 @@ impl SchedulerClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// delete_peer tells the scheduler that the peer is deleting.
|
||||
/// delete_peer tells the scheduler that the peer is deleting.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn delete_peer(&self, request: DeletePeerRequest) -> Result<()> {
|
||||
let task_id = request.task_id.clone();
|
||||
|
|
@ -125,7 +125,7 @@ impl SchedulerClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// stat_task gets the status of the task.
|
||||
/// stat_task gets the status of the task.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn stat_task(&self, request: StatTaskRequest) -> Result<Task> {
|
||||
let task_id = request.task_id.clone();
|
||||
|
|
@ -138,7 +138,7 @@ impl SchedulerClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// delete_task tells the scheduler that the task is deleting.
|
||||
/// delete_task tells the scheduler that the task is deleting.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn delete_task(&self, request: DeleteTaskRequest) -> Result<()> {
|
||||
let task_id = request.task_id.clone();
|
||||
|
|
@ -150,7 +150,7 @@ impl SchedulerClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// announce_host announces the host to the scheduler.
|
||||
/// announce_host announces the host to the scheduler.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn announce_host(&self, request: AnnounceHostRequest) -> Result<()> {
|
||||
// Update scheduler addresses of the client.
|
||||
|
|
@ -208,7 +208,7 @@ impl SchedulerClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// init_announce_host announces the host to the scheduler.
|
||||
/// init_announce_host announces the host to the scheduler.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn init_announce_host(&self, request: AnnounceHostRequest) -> Result<()> {
|
||||
let mut join_set = JoinSet::new();
|
||||
|
|
@ -263,7 +263,7 @@ impl SchedulerClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// delete_host tells the scheduler that the host is deleting.
|
||||
/// delete_host tells the scheduler that the host is deleting.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn delete_host(&self, request: DeleteHostRequest) -> Result<()> {
|
||||
// Update scheduler addresses of the client.
|
||||
|
|
@ -321,7 +321,7 @@ impl SchedulerClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// announce_cache_peer announces the cache peer to the scheduler.
|
||||
/// announce_cache_peer announces the cache peer to the scheduler.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn announce_cache_peer(
|
||||
&self,
|
||||
|
|
@ -337,7 +337,7 @@ impl SchedulerClient {
|
|||
Ok(response)
|
||||
}
|
||||
|
||||
// stat_cache_peer gets the status of the cache peer.
|
||||
/// stat_cache_peer gets the status of the cache peer.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn stat_cache_peer(&self, request: StatCachePeerRequest) -> Result<CachePeer> {
|
||||
let task_id = request.task_id.clone();
|
||||
|
|
@ -350,7 +350,7 @@ impl SchedulerClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// delete_cache_peer tells the scheduler that the cache peer is deleting.
|
||||
/// delete_cache_peer tells the scheduler that the cache peer is deleting.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn delete_cache_peer(&self, request: DeleteCachePeerRequest) -> Result<()> {
|
||||
let task_id = request.task_id.clone();
|
||||
|
|
@ -362,7 +362,7 @@ impl SchedulerClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// upload_cache_task_started uploads the metadata of the cache task started.
|
||||
/// upload_cache_task_started uploads the metadata of the cache task started.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn upload_cache_task_started(
|
||||
&self,
|
||||
|
|
@ -377,7 +377,7 @@ impl SchedulerClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// upload_cache_task_finished uploads the metadata of the cache task finished.
|
||||
/// upload_cache_task_finished uploads the metadata of the cache task finished.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn upload_cache_task_finished(
|
||||
&self,
|
||||
|
|
@ -393,7 +393,7 @@ impl SchedulerClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// upload_cache_task_failed uploads the metadata of the cache task failed.
|
||||
/// upload_cache_task_failed uploads the metadata of the cache task failed.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn upload_cache_task_failed(
|
||||
&self,
|
||||
|
|
@ -408,7 +408,7 @@ impl SchedulerClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// stat_cache_task gets the status of the cache task.
|
||||
/// stat_cache_task gets the status of the cache task.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> Result<CacheTask> {
|
||||
let task_id = request.task_id.clone();
|
||||
|
|
@ -421,7 +421,7 @@ impl SchedulerClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// delete_cache_task tells the scheduler that the cache task is deleting.
|
||||
/// delete_cache_task tells the scheduler that the cache task is deleting.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> Result<()> {
|
||||
let task_id = request.task_id.clone();
|
||||
|
|
@ -433,7 +433,7 @@ impl SchedulerClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// client gets the grpc client of the scheduler.
|
||||
/// client gets the grpc client of the scheduler.
|
||||
#[instrument(skip(self))]
|
||||
async fn client(
|
||||
&self,
|
||||
|
|
@ -480,7 +480,7 @@ impl SchedulerClient {
|
|||
.max_encoding_message_size(usize::MAX))
|
||||
}
|
||||
|
||||
// update_available_scheduler_addrs updates the addresses of available schedulers.
|
||||
/// update_available_scheduler_addrs updates the addresses of available schedulers.
|
||||
#[instrument(skip(self))]
|
||||
async fn update_available_scheduler_addrs(&self) -> Result<()> {
|
||||
// Get the endpoints of available schedulers.
|
||||
|
|
@ -566,7 +566,7 @@ impl SchedulerClient {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// refresh_available_scheduler_addrs refreshes addresses of available schedulers.
|
||||
/// refresh_available_scheduler_addrs refreshes addresses of available schedulers.
|
||||
#[instrument(skip(self))]
|
||||
async fn refresh_available_scheduler_addrs(&self) -> Result<()> {
|
||||
// Refresh the dynamic configuration.
|
||||
|
|
@ -576,7 +576,7 @@ impl SchedulerClient {
|
|||
self.update_available_scheduler_addrs().await
|
||||
}
|
||||
|
||||
// make_request creates a new request with timeout.
|
||||
/// make_request creates a new request with timeout.
|
||||
#[instrument(skip_all)]
|
||||
fn make_request<T>(request: T) -> tonic::Request<T> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
|
|
|
|||
|
|
@ -25,16 +25,16 @@ use dragonfly_client_core::{
|
|||
use tonic::transport::Channel;
|
||||
use tracing::instrument;
|
||||
|
||||
// CertificateClient is a wrapper of CertificateGRPCClient.
|
||||
/// CertificateClient is a wrapper of CertificateGRPCClient.
|
||||
#[derive(Clone)]
|
||||
pub struct CertificateClient {
|
||||
// client is the grpc client of the certificate.
|
||||
/// client is the grpc client of the certificate.
|
||||
pub client: CertificateGRPCClient<Channel>,
|
||||
}
|
||||
|
||||
// CertificateClient implements the grpc client of the certificate.
|
||||
/// CertificateClient implements the grpc client of the certificate.
|
||||
impl CertificateClient {
|
||||
// new creates a new CertificateClient.
|
||||
/// new creates a new CertificateClient.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn new(addr: String) -> Result<Self> {
|
||||
let channel = Channel::from_static(Box::leak(addr.into_boxed_str()))
|
||||
|
|
@ -49,7 +49,7 @@ impl CertificateClient {
|
|||
Ok(Self { client })
|
||||
}
|
||||
|
||||
// issue_certificate issues a certificate for the peer.
|
||||
/// issue_certificate issues a certificate for the peer.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn issue_certificate(
|
||||
&self,
|
||||
|
|
@ -60,7 +60,7 @@ impl CertificateClient {
|
|||
Ok(response.into_inner())
|
||||
}
|
||||
|
||||
// make_request creates a new request with timeout.
|
||||
/// make_request creates a new request with timeout.
|
||||
#[instrument(skip_all)]
|
||||
fn make_request<T>(request: T) -> tonic::Request<T> {
|
||||
let mut request = tonic::Request::new(request);
|
||||
|
|
|
|||
|
|
@ -20,22 +20,22 @@ use tokio::sync::mpsc;
|
|||
use tracing::{info, instrument};
|
||||
use warp::{Filter, Rejection, Reply};
|
||||
|
||||
// Health is the health server.
|
||||
/// Health is the health server.
|
||||
#[derive(Debug)]
|
||||
pub struct Health {
|
||||
// addr is the address of the health server.
|
||||
/// addr is the address of the health server.
|
||||
addr: SocketAddr,
|
||||
|
||||
// shutdown is used to shutdown the health server.
|
||||
/// shutdown is used to shutdown the health server.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
// _shutdown_complete is used to notify the health server is shutdown.
|
||||
/// _shutdown_complete is used to notify the health server is shutdown.
|
||||
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
// Health implements the health server.
|
||||
/// Health implements the health server.
|
||||
impl Health {
|
||||
// new creates a new Health.
|
||||
/// new creates a new Health.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
addr: SocketAddr,
|
||||
|
|
@ -49,7 +49,7 @@ impl Health {
|
|||
}
|
||||
}
|
||||
|
||||
// run starts the health server.
|
||||
/// run starts the health server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
|
|
@ -75,7 +75,7 @@ impl Health {
|
|||
}
|
||||
}
|
||||
|
||||
// health_handler handles the health check request.
|
||||
/// health_handler handles the health check request.
|
||||
#[instrument(skip_all)]
|
||||
async fn health_handler() -> Result<impl Reply, Rejection> {
|
||||
Ok(warp::reply())
|
||||
|
|
|
|||
|
|
@ -31,201 +31,201 @@ use tokio::sync::mpsc;
|
|||
use tracing::{error, info, instrument, warn};
|
||||
use warp::{Filter, Rejection, Reply};
|
||||
|
||||
// DOWNLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of download task level1 duration for
|
||||
// recording slow download task.
|
||||
/// DOWNLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of download task level1 duration for
|
||||
/// recording slow download task.
|
||||
const DOWNLOAD_TASK_LEVEL1_DURATION_THRESHOLD: Duration = Duration::from_millis(500);
|
||||
|
||||
// UPLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of upload task level1 duration for
|
||||
// recording slow upload task.
|
||||
/// UPLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of upload task level1 duration for
|
||||
/// recording slow upload task.
|
||||
const UPLOAD_TASK_LEVEL1_DURATION_THRESHOLD: Duration = Duration::from_millis(500);
|
||||
|
||||
lazy_static! {
|
||||
// REGISTRY is used to register all metrics.
|
||||
/// REGISTRY is used to register all metrics.
|
||||
pub static ref REGISTRY: Registry = Registry::new();
|
||||
|
||||
// VERSION_GAUGE is used to record the version info of the service.
|
||||
/// VERSION_GAUGE is used to record the version info of the service.
|
||||
pub static ref VERSION_GAUGE: IntGaugeVec =
|
||||
IntGaugeVec::new(
|
||||
Opts::new("version", "Version info of the service.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["git_version", "git_commit", "platform", "build_time"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// UPLOAD_TASK_COUNT is used to count the number of upload tasks.
|
||||
/// UPLOAD_TASK_COUNT is used to count the number of upload tasks.
|
||||
pub static ref UPLOAD_TASK_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("upload_task_total", "Counter of the number of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type", "tag", "app"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// UPLOAD_TASK_FAILURE_COUNT is used to count the failed number of upload tasks.
|
||||
/// UPLOAD_TASK_FAILURE_COUNT is used to count the failed number of upload tasks.
|
||||
pub static ref UPLOAD_TASK_FAILURE_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("upload_task_failure_total", "Counter of the number of failed of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type", "tag", "app"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// CONCURRENT_UPLOAD_TASK_GAUGE is used to gauge the number of concurrent upload tasks.
|
||||
/// CONCURRENT_UPLOAD_TASK_GAUGE is used to gauge the number of concurrent upload tasks.
|
||||
pub static ref CONCURRENT_UPLOAD_TASK_GAUGE: IntGaugeVec =
|
||||
IntGaugeVec::new(
|
||||
Opts::new("concurrent_upload_task_total", "Gauge of the number of concurrent of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type", "tag", "app"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// UPLOAD_TASK_DURATION is used to record the upload task duration.
|
||||
/// UPLOAD_TASK_DURATION is used to record the upload task duration.
|
||||
pub static ref UPLOAD_TASK_DURATION: HistogramVec =
|
||||
HistogramVec::new(
|
||||
HistogramOpts::new("upload_task_duration_milliseconds", "Histogram of the upload task duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()),
|
||||
&["task_type", "task_size_level"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// DOWNLOAD_TASK_COUNT is used to count the number of download tasks.
|
||||
/// DOWNLOAD_TASK_COUNT is used to count the number of download tasks.
|
||||
pub static ref DOWNLOAD_TASK_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("download_task_total", "Counter of the number of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type", "tag", "app", "priority"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// DOWNLOAD_TASK_FAILURE_COUNT is used to count the failed number of download tasks.
|
||||
/// DOWNLOAD_TASK_FAILURE_COUNT is used to count the failed number of download tasks.
|
||||
pub static ref DOWNLOAD_TASK_FAILURE_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("download_task_failure_total", "Counter of the number of failed of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type", "tag", "app", "priority"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// PREFETCH_TASK_COUNT is used to count the number of prefetch tasks.
|
||||
/// PREFETCH_TASK_COUNT is used to count the number of prefetch tasks.
|
||||
pub static ref PREFETCH_TASK_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("prefetch_task_total", "Counter of the number of the prefetch task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type", "tag", "app", "priority"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// PREFETCH_TASK_FAILURE_COUNT is used to count the failed number of prefetch tasks.
|
||||
/// PREFETCH_TASK_FAILURE_COUNT is used to count the failed number of prefetch tasks.
|
||||
pub static ref PREFETCH_TASK_FAILURE_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("prefetch_task_failure_total", "Counter of the number of failed of the prefetch task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type", "tag", "app", "priority"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// CONCURRENT_DOWNLOAD_TASK_GAUGE is used to gauge the number of concurrent download tasks.
|
||||
/// CONCURRENT_DOWNLOAD_TASK_GAUGE is used to gauge the number of concurrent download tasks.
|
||||
pub static ref CONCURRENT_DOWNLOAD_TASK_GAUGE: IntGaugeVec =
|
||||
IntGaugeVec::new(
|
||||
Opts::new("concurrent_download_task_total", "Gauge of the number of concurrent of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type", "tag", "app", "priority"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// CONCURRENT_UPLOAD_PIECE_GAUGE is used to gauge the number of concurrent upload pieces.
|
||||
/// CONCURRENT_UPLOAD_PIECE_GAUGE is used to gauge the number of concurrent upload pieces.
|
||||
pub static ref CONCURRENT_UPLOAD_PIECE_GAUGE: IntGaugeVec =
|
||||
IntGaugeVec::new(
|
||||
Opts::new("concurrent_upload_piece_total", "Gauge of the number of concurrent of the upload piece.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&[]
|
||||
).expect("metric can be created");
|
||||
|
||||
// DOWNLOAD_TRAFFIC is used to count the download traffic.
|
||||
/// DOWNLOAD_TRAFFIC is used to count the download traffic.
|
||||
pub static ref DOWNLOAD_TRAFFIC: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("download_traffic", "Counter of the number of the download traffic.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type", "task_type"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// UPLOAD_TRAFFIC is used to count the upload traffic.
|
||||
/// UPLOAD_TRAFFIC is used to count the upload traffic.
|
||||
pub static ref UPLOAD_TRAFFIC: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("upload_traffic", "Counter of the number of the upload traffic.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["task_type"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// DOWNLOAD_TASK_DURATION is used to record the download task duration.
|
||||
/// DOWNLOAD_TASK_DURATION is used to record the download task duration.
|
||||
pub static ref DOWNLOAD_TASK_DURATION: HistogramVec =
|
||||
HistogramVec::new(
|
||||
HistogramOpts::new("download_task_duration_milliseconds", "Histogram of the download task duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()),
|
||||
&["task_type", "task_size_level"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// BACKEND_REQUEST_COUNT is used to count the number of backend requset.
|
||||
/// BACKEND_REQUEST_COUNT is used to count the number of backend requset.
|
||||
pub static ref BACKEND_REQUEST_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("backend_request_total", "Counter of the number of the backend request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["scheme", "method"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// BACKEND_REQUEST_FAILURE_COUNT is used to count the failed number of backend request.
|
||||
/// BACKEND_REQUEST_FAILURE_COUNT is used to count the failed number of backend request.
|
||||
pub static ref BACKEND_REQUEST_FAILURE_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("backend_request_failure_total", "Counter of the number of failed of the backend request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["scheme", "method"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// BACKEND_REQUEST_DURATION is used to record the backend request duration.
|
||||
/// BACKEND_REQUEST_DURATION is used to record the backend request duration.
|
||||
pub static ref BACKEND_REQUEST_DURATION: HistogramVec =
|
||||
HistogramVec::new(
|
||||
HistogramOpts::new("backend_request_duration_milliseconds", "Histogram of the backend request duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()),
|
||||
&["scheme", "method"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// PROXY_REQUEST_COUNT is used to count the number of proxy requset.
|
||||
/// PROXY_REQUEST_COUNT is used to count the number of proxy requset.
|
||||
pub static ref PROXY_REQUEST_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("proxy_request_total", "Counter of the number of the proxy request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&[]
|
||||
).expect("metric can be created");
|
||||
|
||||
// PROXY_REQUEST_FAILURE_COUNT is used to count the failed number of proxy request.
|
||||
/// PROXY_REQUEST_FAILURE_COUNT is used to count the failed number of proxy request.
|
||||
pub static ref PROXY_REQUEST_FAILURE_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("proxy_request_failure_total", "Counter of the number of failed of the proxy request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&[]
|
||||
).expect("metric can be created");
|
||||
|
||||
// STAT_TASK_COUNT is used to count the number of stat tasks.
|
||||
/// STAT_TASK_COUNT is used to count the number of stat tasks.
|
||||
pub static ref STAT_TASK_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("stat_task_total", "Counter of the number of the stat task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// STAT_TASK_FAILURE_COUNT is used to count the failed number of stat tasks.
|
||||
/// STAT_TASK_FAILURE_COUNT is used to count the failed number of stat tasks.
|
||||
pub static ref STAT_TASK_FAILURE_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("stat_task_failure_total", "Counter of the number of failed of the stat task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// DELETE_TASK_COUNT is used to count the number of delete tasks.
|
||||
/// DELETE_TASK_COUNT is used to count the number of delete tasks.
|
||||
pub static ref DELETE_TASK_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("delete_task_total", "Counter of the number of the delete task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// DELETE_TASK_FAILURE_COUNT is used to count the failed number of delete tasks.
|
||||
/// DELETE_TASK_FAILURE_COUNT is used to count the failed number of delete tasks.
|
||||
pub static ref DELETE_TASK_FAILURE_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("delete_task_failure_total", "Counter of the number of failed of the delete task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&["type"]
|
||||
).expect("metric can be created");
|
||||
|
||||
// DELETE_HOST_COUNT is used to count the number of delete host.
|
||||
/// DELETE_HOST_COUNT is used to count the number of delete host.
|
||||
pub static ref DELETE_HOST_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("delete_host_total", "Counter of the number of the delete host.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&[]
|
||||
).expect("metric can be created");
|
||||
|
||||
// DELETE_HOST_FAILURE_COUNT is used to count the failed number of delete host.
|
||||
/// DELETE_HOST_FAILURE_COUNT is used to count the failed number of delete host.
|
||||
pub static ref DELETE_HOST_FAILURE_COUNT: IntCounterVec =
|
||||
IntCounterVec::new(
|
||||
Opts::new("delete_host_failure_total", "Counter of the number of failed of the delete host.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&[]
|
||||
).expect("metric can be created");
|
||||
|
||||
// DISK_SPACE is used to count of the disk space.
|
||||
/// DISK_SPACE is used to count of the disk space.
|
||||
pub static ref DISK_SPACE: IntGaugeVec =
|
||||
IntGaugeVec::new(
|
||||
Opts::new("disk_space_total", "Gauge of the disk space in bytes").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
&[]
|
||||
).expect("metric can be created");
|
||||
|
||||
// DISK_USAGE_SPACE is used to count of the disk usage space.
|
||||
/// DISK_USAGE_SPACE is used to count of the disk usage space.
|
||||
pub static ref DISK_USAGE_SPACE: IntGaugeVec =
|
||||
IntGaugeVec::new(
|
||||
Opts::new("disk_usage_space_total", "Gauge of the disk usage space in bytes").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
|
||||
|
|
@ -233,76 +233,76 @@ lazy_static! {
|
|||
).expect("metric can be created");
|
||||
}
|
||||
|
||||
// TaskSize represents the size of the task.
|
||||
/// TaskSize represents the size of the task.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum TaskSize {
|
||||
// Level0 represents unknown size.
|
||||
/// Level0 represents unknown size.
|
||||
Level0,
|
||||
|
||||
// Level0 represents size range is from 0 to 1M.
|
||||
/// Level0 represents size range is from 0 to 1M.
|
||||
Level1,
|
||||
|
||||
// Level1 represents size range is from 1M to 4M.
|
||||
/// Level1 represents size range is from 1M to 4M.
|
||||
Level2,
|
||||
|
||||
// Level2 represents size range is from 4M to 8M.
|
||||
/// Level2 represents size range is from 4M to 8M.
|
||||
Level3,
|
||||
|
||||
// Level3 represents size range is from 8M to 16M.
|
||||
/// Level3 represents size range is from 8M to 16M.
|
||||
Level4,
|
||||
|
||||
// Level4 represents size range is from 16M to 32M.
|
||||
/// Level4 represents size range is from 16M to 32M.
|
||||
Level5,
|
||||
|
||||
// Level5 represents size range is from 32M to 64M.
|
||||
/// Level5 represents size range is from 32M to 64M.
|
||||
Level6,
|
||||
|
||||
// Level6 represents size range is from 64M to 128M.
|
||||
/// Level6 represents size range is from 64M to 128M.
|
||||
Level7,
|
||||
|
||||
// Level7 represents size range is from 128M to 256M.
|
||||
/// Level7 represents size range is from 128M to 256M.
|
||||
Level8,
|
||||
|
||||
// Level8 represents size range is from 256M to 512M.
|
||||
/// Level8 represents size range is from 256M to 512M.
|
||||
Level9,
|
||||
|
||||
// Level9 represents size range is from 512M to 1G.
|
||||
/// Level9 represents size range is from 512M to 1G.
|
||||
Level10,
|
||||
|
||||
// Level10 represents size range is from 1G to 4G.
|
||||
/// Level10 represents size range is from 1G to 4G.
|
||||
Level11,
|
||||
|
||||
// Level11 represents size range is from 4G to 8G.
|
||||
/// Level11 represents size range is from 4G to 8G.
|
||||
Level12,
|
||||
|
||||
// Level12 represents size range is from 8G to 16G.
|
||||
/// Level12 represents size range is from 8G to 16G.
|
||||
Level13,
|
||||
|
||||
// Level13 represents size range is from 16G to 32G.
|
||||
/// Level13 represents size range is from 16G to 32G.
|
||||
Level14,
|
||||
|
||||
// Level14 represents size range is from 32G to 64G.
|
||||
/// Level14 represents size range is from 32G to 64G.
|
||||
Level15,
|
||||
|
||||
// Level15 represents size range is from 64G to 128G.
|
||||
/// Level15 represents size range is from 64G to 128G.
|
||||
Level16,
|
||||
|
||||
// Level16 represents size range is from 128G to 256G.
|
||||
/// Level16 represents size range is from 128G to 256G.
|
||||
Level17,
|
||||
|
||||
// Level17 represents size range is from 256G to 512G.
|
||||
/// Level17 represents size range is from 256G to 512G.
|
||||
Level18,
|
||||
|
||||
// Level18 represents size range is from 512G to 1T.
|
||||
/// Level18 represents size range is from 512G to 1T.
|
||||
Level19,
|
||||
|
||||
// Level20 represents size is greater than 1T.
|
||||
/// Level20 represents size is greater than 1T.
|
||||
Level20,
|
||||
}
|
||||
|
||||
// TaskSize implements the Display trait.
|
||||
/// TaskSize implements the Display trait.
|
||||
impl std::fmt::Display for TaskSize {
|
||||
// fmt formats the TaskSize.
|
||||
/// fmt formats the TaskSize.
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self {
|
||||
TaskSize::Level0 => write!(f, "0"),
|
||||
|
|
@ -330,9 +330,9 @@ impl std::fmt::Display for TaskSize {
|
|||
}
|
||||
}
|
||||
|
||||
// TaskSize implements the TaskSize.
|
||||
/// TaskSize implements the TaskSize.
|
||||
impl TaskSize {
|
||||
// calculate_size_level calculates the size level according to the size.
|
||||
/// calculate_size_level calculates the size level according to the size.
|
||||
pub fn calculate_size_level(size: u64) -> Self {
|
||||
match size {
|
||||
0 => TaskSize::Level0,
|
||||
|
|
@ -360,7 +360,7 @@ impl TaskSize {
|
|||
}
|
||||
}
|
||||
|
||||
// collect_upload_task_started_metrics collects the upload task started metrics.
|
||||
/// collect_upload_task_started_metrics collects the upload task started metrics.
|
||||
pub fn collect_upload_task_started_metrics(typ: i32, tag: &str, app: &str) {
|
||||
UPLOAD_TASK_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str(), tag, app])
|
||||
|
|
@ -371,7 +371,7 @@ pub fn collect_upload_task_started_metrics(typ: i32, tag: &str, app: &str) {
|
|||
.inc();
|
||||
}
|
||||
|
||||
// collect_upload_task_finished_metrics collects the upload task finished metrics.
|
||||
/// collect_upload_task_finished_metrics collects the upload task finished metrics.
|
||||
pub fn collect_upload_task_finished_metrics(
|
||||
typ: i32,
|
||||
tag: &str,
|
||||
|
|
@ -399,7 +399,7 @@ pub fn collect_upload_task_finished_metrics(
|
|||
.dec();
|
||||
}
|
||||
|
||||
// collect_upload_task_failure_metrics collects the upload task failure metrics.
|
||||
/// collect_upload_task_failure_metrics collects the upload task failure metrics.
|
||||
pub fn collect_upload_task_failure_metrics(typ: i32, tag: &str, app: &str) {
|
||||
UPLOAD_TASK_FAILURE_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str(), tag, app])
|
||||
|
|
@ -410,7 +410,7 @@ pub fn collect_upload_task_failure_metrics(typ: i32, tag: &str, app: &str) {
|
|||
.dec();
|
||||
}
|
||||
|
||||
// collect_download_task_started_metrics collects the download task started metrics.
|
||||
/// collect_download_task_started_metrics collects the download task started metrics.
|
||||
pub fn collect_download_task_started_metrics(typ: i32, tag: &str, app: &str, priority: &str) {
|
||||
DOWNLOAD_TASK_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str(), tag, app, priority])
|
||||
|
|
@ -421,7 +421,7 @@ pub fn collect_download_task_started_metrics(typ: i32, tag: &str, app: &str, pri
|
|||
.inc();
|
||||
}
|
||||
|
||||
// collect_download_task_finished_metrics collects the download task finished metrics.
|
||||
/// collect_download_task_finished_metrics collects the download task finished metrics.
|
||||
pub fn collect_download_task_finished_metrics(
|
||||
typ: i32,
|
||||
tag: &str,
|
||||
|
|
@ -457,7 +457,7 @@ pub fn collect_download_task_finished_metrics(
|
|||
.dec();
|
||||
}
|
||||
|
||||
// collect_download_task_failure_metrics collects the download task failure metrics.
|
||||
/// collect_download_task_failure_metrics collects the download task failure metrics.
|
||||
pub fn collect_download_task_failure_metrics(typ: i32, tag: &str, app: &str, priority: &str) {
|
||||
DOWNLOAD_TASK_FAILURE_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str(), tag, app, priority])
|
||||
|
|
@ -468,119 +468,119 @@ pub fn collect_download_task_failure_metrics(typ: i32, tag: &str, app: &str, pri
|
|||
.dec();
|
||||
}
|
||||
|
||||
// collect_prefetch_task_started_metrics collects the prefetch task started metrics.
|
||||
/// collect_prefetch_task_started_metrics collects the prefetch task started metrics.
|
||||
pub fn collect_prefetch_task_started_metrics(typ: i32, tag: &str, app: &str, priority: &str) {
|
||||
PREFETCH_TASK_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str(), tag, app, priority])
|
||||
.inc();
|
||||
}
|
||||
|
||||
// collect_prefetch_task_failure_metrics collects the prefetch task failure metrics.
|
||||
/// collect_prefetch_task_failure_metrics collects the prefetch task failure metrics.
|
||||
pub fn collect_prefetch_task_failure_metrics(typ: i32, tag: &str, app: &str, priority: &str) {
|
||||
PREFETCH_TASK_FAILURE_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str(), tag, app, priority])
|
||||
.inc();
|
||||
}
|
||||
|
||||
// collect_download_piece_traffic_metrics collects the download piece traffic metrics.
|
||||
/// collect_download_piece_traffic_metrics collects the download piece traffic metrics.
|
||||
pub fn collect_download_piece_traffic_metrics(typ: &TrafficType, task_type: i32, length: u64) {
|
||||
DOWNLOAD_TRAFFIC
|
||||
.with_label_values(&[typ.as_str_name(), task_type.to_string().as_str()])
|
||||
.inc_by(length);
|
||||
}
|
||||
|
||||
// collect_upload_piece_started_metrics collects the upload piece started metrics.
|
||||
/// collect_upload_piece_started_metrics collects the upload piece started metrics.
|
||||
pub fn collect_upload_piece_started_metrics() {
|
||||
CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).inc();
|
||||
}
|
||||
|
||||
// collect_upload_piece_finished_metrics collects the upload piece finished metrics.
|
||||
/// collect_upload_piece_finished_metrics collects the upload piece finished metrics.
|
||||
pub fn collect_upload_piece_finished_metrics() {
|
||||
CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).dec();
|
||||
}
|
||||
|
||||
// collect_upload_piece_traffic_metrics collects the upload piece traffic metrics.
|
||||
/// collect_upload_piece_traffic_metrics collects the upload piece traffic metrics.
|
||||
pub fn collect_upload_piece_traffic_metrics(task_type: i32, length: u64) {
|
||||
UPLOAD_TRAFFIC
|
||||
.with_label_values(&[task_type.to_string().as_str()])
|
||||
.inc_by(length);
|
||||
}
|
||||
|
||||
// collect_upload_piece_failure_metrics collects the upload piece failure metrics.
|
||||
/// collect_upload_piece_failure_metrics collects the upload piece failure metrics.
|
||||
pub fn collect_upload_piece_failure_metrics() {
|
||||
CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).dec();
|
||||
}
|
||||
|
||||
// collect_backend_request_started_metrics collects the backend request started metrics.
|
||||
/// collect_backend_request_started_metrics collects the backend request started metrics.
|
||||
pub fn collect_backend_request_started_metrics(scheme: &str, method: &str) {
|
||||
BACKEND_REQUEST_COUNT
|
||||
.with_label_values(&[scheme, method])
|
||||
.inc();
|
||||
}
|
||||
|
||||
// collect_backend_request_failure_metrics collects the backend request failure metrics.
|
||||
/// collect_backend_request_failure_metrics collects the backend request failure metrics.
|
||||
pub fn collect_backend_request_failure_metrics(scheme: &str, method: &str) {
|
||||
BACKEND_REQUEST_FAILURE_COUNT
|
||||
.with_label_values(&[scheme, method])
|
||||
.inc();
|
||||
}
|
||||
|
||||
// collect_backend_request_finished_metrics collects the backend request finished metrics.
|
||||
/// collect_backend_request_finished_metrics collects the backend request finished metrics.
|
||||
pub fn collect_backend_request_finished_metrics(scheme: &str, method: &str, cost: Duration) {
|
||||
BACKEND_REQUEST_DURATION
|
||||
.with_label_values(&[scheme, method])
|
||||
.observe(cost.as_millis() as f64);
|
||||
}
|
||||
|
||||
// collect_proxy_request_started_metrics collects the proxy request started metrics.
|
||||
/// collect_proxy_request_started_metrics collects the proxy request started metrics.
|
||||
pub fn collect_proxy_request_started_metrics() {
|
||||
PROXY_REQUEST_COUNT.with_label_values(&[]).inc();
|
||||
}
|
||||
|
||||
// collect_proxy_request_failure_metrics collects the proxy request failure metrics.
|
||||
/// collect_proxy_request_failure_metrics collects the proxy request failure metrics.
|
||||
pub fn collect_proxy_request_failure_metrics() {
|
||||
PROXY_REQUEST_FAILURE_COUNT.with_label_values(&[]).inc();
|
||||
}
|
||||
|
||||
// collect_stat_task_started_metrics collects the stat task started metrics.
|
||||
/// collect_stat_task_started_metrics collects the stat task started metrics.
|
||||
pub fn collect_stat_task_started_metrics(typ: i32) {
|
||||
STAT_TASK_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str()])
|
||||
.inc();
|
||||
}
|
||||
|
||||
// collect_stat_task_failure_metrics collects the stat task failure metrics.
|
||||
/// collect_stat_task_failure_metrics collects the stat task failure metrics.
|
||||
pub fn collect_stat_task_failure_metrics(typ: i32) {
|
||||
STAT_TASK_FAILURE_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str()])
|
||||
.inc();
|
||||
}
|
||||
|
||||
// collect_delete_task_started_metrics collects the delete task started metrics.
|
||||
/// collect_delete_task_started_metrics collects the delete task started metrics.
|
||||
pub fn collect_delete_task_started_metrics(typ: i32) {
|
||||
DELETE_TASK_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str()])
|
||||
.inc();
|
||||
}
|
||||
|
||||
// collect_delete_task_failure_metrics collects the delete task failure metrics.
|
||||
/// collect_delete_task_failure_metrics collects the delete task failure metrics.
|
||||
pub fn collect_delete_task_failure_metrics(typ: i32) {
|
||||
DELETE_TASK_FAILURE_COUNT
|
||||
.with_label_values(&[typ.to_string().as_str()])
|
||||
.inc();
|
||||
}
|
||||
|
||||
// collect_delete_host_started_metrics collects the delete host started metrics.
|
||||
/// collect_delete_host_started_metrics collects the delete host started metrics.
|
||||
pub fn collect_delete_host_started_metrics() {
|
||||
DELETE_HOST_COUNT.with_label_values(&[]).inc();
|
||||
}
|
||||
|
||||
// collect_delete_host_failure_metrics collects the delete host failure metrics.
|
||||
/// collect_delete_host_failure_metrics collects the delete host failure metrics.
|
||||
pub fn collect_delete_host_failure_metrics() {
|
||||
DELETE_HOST_FAILURE_COUNT.with_label_values(&[]).inc();
|
||||
}
|
||||
|
||||
// collect_disk_space_metrics collects the disk space metrics.
|
||||
/// collect_disk_space_metrics collects the disk space metrics.
|
||||
pub fn collect_disk_space_metrics(path: &Path) {
|
||||
let stats = match fs2::statvfs(path) {
|
||||
Ok(stats) => stats,
|
||||
|
|
@ -599,22 +599,22 @@ pub fn collect_disk_space_metrics(path: &Path) {
|
|||
.set(usage_space as i64);
|
||||
}
|
||||
|
||||
// Metrics is the metrics server.
|
||||
/// Metrics is the metrics server.
|
||||
#[derive(Debug)]
|
||||
pub struct Metrics {
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// shutdown is used to shutdown the metrics server.
|
||||
/// shutdown is used to shutdown the metrics server.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
// _shutdown_complete is used to notify the metrics server is shutdown.
|
||||
/// _shutdown_complete is used to notify the metrics server is shutdown.
|
||||
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
// Metrics implements the metrics server.
|
||||
/// Metrics implements the metrics server.
|
||||
impl Metrics {
|
||||
// new creates a new Metrics.
|
||||
/// new creates a new Metrics.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -628,7 +628,7 @@ impl Metrics {
|
|||
}
|
||||
}
|
||||
|
||||
// run starts the metrics server.
|
||||
/// run starts the metrics server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
|
|
@ -680,7 +680,7 @@ impl Metrics {
|
|||
}
|
||||
}
|
||||
|
||||
// register_custom_metrics registers all custom metrics.
|
||||
/// register_custom_metrics registers all custom metrics.
|
||||
#[instrument(skip_all)]
|
||||
fn register_custom_metrics(&self) {
|
||||
REGISTRY
|
||||
|
|
@ -776,7 +776,7 @@ impl Metrics {
|
|||
.expect("metric can be registered");
|
||||
}
|
||||
|
||||
// metrics_handler handles the metrics request.
|
||||
/// metrics_handler handles the metrics request.
|
||||
#[instrument(skip_all)]
|
||||
async fn metrics_handler(config: Arc<Config>) -> Result<impl Reply, Rejection> {
|
||||
// Collect the disk space metrics.
|
||||
|
|
|
|||
|
|
@ -18,34 +18,34 @@ use dragonfly_api::common::v2::Priority;
|
|||
use reqwest::header::HeaderMap;
|
||||
use tracing::{error, instrument};
|
||||
|
||||
// DRAGONFLY_TAG_HEADER is the header key of tag in http request.
|
||||
/// DRAGONFLY_TAG_HEADER is the header key of tag in http request.
|
||||
pub const DRAGONFLY_TAG_HEADER: &str = "X-Dragonfly-Tag";
|
||||
|
||||
// DRAGONFLY_APPLICATION_HEADER is the header key of application in http request.
|
||||
/// DRAGONFLY_APPLICATION_HEADER is the header key of application in http request.
|
||||
pub const DRAGONFLY_APPLICATION_HEADER: &str = "X-Dragonfly-Application";
|
||||
|
||||
// DRAGONFLY_PRIORITY_HEADER is the header key of priority in http request,
|
||||
// refer to https://github.com/dragonflyoss/api/blob/main/proto/common.proto#L67.
|
||||
/// DRAGONFLY_PRIORITY_HEADER is the header key of priority in http request,
|
||||
/// refer to https://github.com/dragonflyoss/api/blob/main/proto/common.proto#L67.
|
||||
pub const DRAGONFLY_PRIORITY_HEADER: &str = "X-Dragonfly-Priority";
|
||||
|
||||
// DRAGONFLY_REGISTRY_HEADER is the header key of custom address of container registry.
|
||||
/// DRAGONFLY_REGISTRY_HEADER is the header key of custom address of container registry.
|
||||
pub const DRAGONFLY_REGISTRY_HEADER: &str = "X-Dragonfly-Registry";
|
||||
|
||||
// DRAGONFLY_FILTERS_HEADER is the header key of filters in http request,
|
||||
// it is the filtered query params to generate the task id.
|
||||
// When filter is "X-Dragonfly-Filtered-Query-Params: Signature,Expires,ns" for example:
|
||||
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
|
||||
// will generate the same task id.
|
||||
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
|
||||
/// DRAGONFLY_FILTERS_HEADER is the header key of filters in http request,
|
||||
/// it is the filtered query params to generate the task id.
|
||||
/// When filter is "X-Dragonfly-Filtered-Query-Params: Signature,Expires,ns" for example:
|
||||
/// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
|
||||
/// will generate the same task id.
|
||||
/// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
|
||||
pub const DRAGONFLY_FILTERED_QUERY_PARAMS_HEADER: &str = "X-Dragonfly-Filtered-Query-Params";
|
||||
|
||||
// DRAGONFLY_USE_P2P_HEADER is the header key of use p2p in http request.
|
||||
// If the value is "true", the request will use P2P technology to distribute
|
||||
// the content. If the value is "false", but url matches the regular expression in proxy config.
|
||||
// The request will also use P2P technology to distribute the content.
|
||||
/// DRAGONFLY_USE_P2P_HEADER is the header key of use p2p in http request.
|
||||
/// If the value is "true", the request will use P2P technology to distribute
|
||||
/// the content. If the value is "false", but url matches the regular expression in proxy config.
|
||||
/// The request will also use P2P technology to distribute the content.
|
||||
pub const DRAGONFLY_USE_P2P_HEADER: &str = "X-Dragonfly-Use-P2P";
|
||||
|
||||
// get_tag gets the tag from http header.
|
||||
/// get_tag gets the tag from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_tag(header: &HeaderMap) -> Option<String> {
|
||||
match header.get(DRAGONFLY_TAG_HEADER) {
|
||||
|
|
@ -60,7 +60,7 @@ pub fn get_tag(header: &HeaderMap) -> Option<String> {
|
|||
}
|
||||
}
|
||||
|
||||
// get_application gets the application from http header.
|
||||
/// get_application gets the application from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_application(header: &HeaderMap) -> Option<String> {
|
||||
match header.get(DRAGONFLY_APPLICATION_HEADER) {
|
||||
|
|
@ -75,7 +75,7 @@ pub fn get_application(header: &HeaderMap) -> Option<String> {
|
|||
}
|
||||
}
|
||||
|
||||
// get_priority gets the priority from http header.
|
||||
/// get_priority gets the priority from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_priority(header: &HeaderMap) -> i32 {
|
||||
let default_priority = Priority::Level6 as i32;
|
||||
|
|
@ -97,7 +97,7 @@ pub fn get_priority(header: &HeaderMap) -> i32 {
|
|||
}
|
||||
}
|
||||
|
||||
// get_registry gets the custom address of container registry from http header.
|
||||
/// get_registry gets the custom address of container registry from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_registry(header: &HeaderMap) -> Option<String> {
|
||||
match header.get(DRAGONFLY_REGISTRY_HEADER) {
|
||||
|
|
@ -112,7 +112,7 @@ pub fn get_registry(header: &HeaderMap) -> Option<String> {
|
|||
}
|
||||
}
|
||||
|
||||
// get_filters gets the filters from http header.
|
||||
/// get_filters gets the filters from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_filtered_query_params(
|
||||
header: &HeaderMap,
|
||||
|
|
@ -130,7 +130,7 @@ pub fn get_filtered_query_params(
|
|||
}
|
||||
}
|
||||
|
||||
// get_use_p2p gets the use p2p from http header.
|
||||
/// get_use_p2p gets the use p2p from http header.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get_use_p2p(header: &HeaderMap) -> bool {
|
||||
match header.get(DRAGONFLY_USE_P2P_HEADER) {
|
||||
|
|
|
|||
|
|
@ -67,37 +67,37 @@ use tracing::{error, info, instrument, Span};
|
|||
|
||||
pub mod header;
|
||||
|
||||
// Response is the response of the proxy server.
|
||||
/// Response is the response of the proxy server.
|
||||
pub type Response = hyper::Response<BoxBody<Bytes, ClientError>>;
|
||||
|
||||
// Proxy is the proxy server.
|
||||
/// Proxy is the proxy server.
|
||||
pub struct Proxy {
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// task is the task manager.
|
||||
/// task is the task manager.
|
||||
task: Arc<Task>,
|
||||
|
||||
// addr is the address of the proxy server.
|
||||
/// addr is the address of the proxy server.
|
||||
addr: SocketAddr,
|
||||
|
||||
// registry_certs is the certificate of the client for the registry.
|
||||
/// registry_certs is the certificate of the client for the registry.
|
||||
registry_certs: Arc<Option<Vec<CertificateDer<'static>>>>,
|
||||
|
||||
// server_ca_cert is the CA certificate of the proxy server to
|
||||
// sign the self-signed certificate.
|
||||
/// server_ca_cert is the CA certificate of the proxy server to
|
||||
/// sign the self-signed certificate.
|
||||
server_ca_cert: Arc<Option<Certificate>>,
|
||||
|
||||
// shutdown is used to shutdown the proxy server.
|
||||
/// shutdown is used to shutdown the proxy server.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
// _shutdown_complete is used to notify the proxy server is shutdown.
|
||||
/// _shutdown_complete is used to notify the proxy server is shutdown.
|
||||
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
// Proxy implements the proxy server.
|
||||
/// Proxy implements the proxy server.
|
||||
impl Proxy {
|
||||
// new creates a new Proxy.
|
||||
/// new creates a new Proxy.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -168,7 +168,7 @@ impl Proxy {
|
|||
proxy
|
||||
}
|
||||
|
||||
// run starts the proxy server.
|
||||
/// run starts the proxy server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) -> ClientResult<()> {
|
||||
let listener = TcpListener::bind(self.addr).await?;
|
||||
|
|
@ -223,7 +223,7 @@ impl Proxy {
|
|||
}
|
||||
}
|
||||
|
||||
// handler handles the request from the client.
|
||||
/// handler handles the request from the client.
|
||||
#[instrument(skip_all, fields(uri, method))]
|
||||
pub async fn handler(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -289,7 +289,7 @@ pub async fn handler(
|
|||
.await
|
||||
}
|
||||
|
||||
// registry_mirror_http_handler handles the http request for the registry mirror by client.
|
||||
/// registry_mirror_http_handler handles the http request for the registry mirror by client.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn registry_mirror_http_handler(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -309,7 +309,7 @@ pub async fn registry_mirror_http_handler(
|
|||
.await;
|
||||
}
|
||||
|
||||
// registry_mirror_https_handler handles the https request for the registry mirror by client.
|
||||
/// registry_mirror_https_handler handles the https request for the registry mirror by client.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn registry_mirror_https_handler(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -331,7 +331,7 @@ pub async fn registry_mirror_https_handler(
|
|||
.await;
|
||||
}
|
||||
|
||||
// http_handler handles the http request by client.
|
||||
/// http_handler handles the http request by client.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn http_handler(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -397,7 +397,7 @@ pub async fn http_handler(
|
|||
return proxy_http(request).await;
|
||||
}
|
||||
|
||||
// https_handler handles the https request by client.
|
||||
/// https_handler handles the https request by client.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn https_handler(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -439,9 +439,9 @@ pub async fn https_handler(
|
|||
}
|
||||
}
|
||||
|
||||
// upgraded_tunnel handles the upgraded connection. If the ca_cert is not set, use the
|
||||
// self-signed certificate. Otherwise, use the CA certificate to sign the
|
||||
// self-signed certificate.
|
||||
/// upgraded_tunnel handles the upgraded connection. If the ca_cert is not set, use the
|
||||
/// self-signed certificate. Otherwise, use the CA certificate to sign the
|
||||
/// self-signed certificate.
|
||||
#[instrument(skip_all)]
|
||||
async fn upgraded_tunnel(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -503,7 +503,7 @@ async fn upgraded_tunnel(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// upgraded_handler handles the upgraded https request from the client.
|
||||
/// upgraded_handler handles the upgraded https request from the client.
|
||||
#[instrument(skip_all, fields(uri, method))]
|
||||
pub async fn upgraded_handler(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -579,7 +579,7 @@ pub async fn upgraded_handler(
|
|||
return proxy_http(request).await;
|
||||
}
|
||||
|
||||
// proxy_by_dfdaemon proxies the request via the dfdaemon.
|
||||
/// proxy_by_dfdaemon proxies the request via the dfdaemon.
|
||||
#[instrument(skip_all)]
|
||||
async fn proxy_by_dfdaemon(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -839,7 +839,7 @@ async fn proxy_by_dfdaemon(
|
|||
}
|
||||
}
|
||||
|
||||
// proxy_http proxies the HTTP request directly to the remote server.
|
||||
/// proxy_http proxies the HTTP request directly to the remote server.
|
||||
#[instrument(skip_all)]
|
||||
async fn proxy_http(request: Request<hyper::body::Incoming>) -> ClientResult<Response> {
|
||||
let Some(host) = request.uri().host() else {
|
||||
|
|
@ -866,7 +866,7 @@ async fn proxy_http(request: Request<hyper::body::Incoming>) -> ClientResult<Res
|
|||
Ok(response.map(|b| b.map_err(ClientError::from).boxed()))
|
||||
}
|
||||
|
||||
// proxy_https proxies the HTTPS request directly to the remote server.
|
||||
/// proxy_https proxies the HTTPS request directly to the remote server.
|
||||
#[instrument(skip_all)]
|
||||
async fn proxy_https(
|
||||
request: Request<hyper::body::Incoming>,
|
||||
|
|
@ -904,7 +904,7 @@ async fn proxy_https(
|
|||
Ok(response.map(|b| b.map_err(ClientError::from).boxed()))
|
||||
}
|
||||
|
||||
// make_registry_mirror_request makes a registry mirror request by the request.
|
||||
/// make_registry_mirror_request makes a registry mirror request by the request.
|
||||
#[instrument(skip_all)]
|
||||
fn make_registry_mirror_request(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -940,7 +940,7 @@ fn make_registry_mirror_request(
|
|||
Ok(request)
|
||||
}
|
||||
|
||||
// make_download_task_request makes a download task request by the request.
|
||||
/// make_download_task_request makes a download task request by the request.
|
||||
#[instrument(skip_all)]
|
||||
fn make_download_task_request(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -983,7 +983,7 @@ fn make_download_task_request(
|
|||
})
|
||||
}
|
||||
|
||||
// make_download_url makes a download url by the given uri.
|
||||
/// make_download_url makes a download url by the given uri.
|
||||
#[instrument(skip_all)]
|
||||
fn make_download_url(
|
||||
uri: &hyper::Uri,
|
||||
|
|
@ -1009,7 +1009,7 @@ fn make_download_url(
|
|||
.to_string())
|
||||
}
|
||||
|
||||
// make_response_headers makes the response headers.
|
||||
/// make_response_headers makes the response headers.
|
||||
#[instrument(skip_all)]
|
||||
fn make_response_headers(
|
||||
mut download_task_started_response: DownloadTaskStartedResponse,
|
||||
|
|
@ -1035,14 +1035,14 @@ fn make_response_headers(
|
|||
hashmap_to_hyper_header_map(&download_task_started_response.response_header)
|
||||
}
|
||||
|
||||
// find_matching_rule returns whether the dfdaemon should be used to download the task.
|
||||
// If the dfdaemon should be used, return the matched rule.
|
||||
/// find_matching_rule returns whether the dfdaemon should be used to download the task.
|
||||
/// If the dfdaemon should be used, return the matched rule.
|
||||
#[instrument(skip_all)]
|
||||
fn find_matching_rule(rules: Option<Vec<Rule>>, url: &str) -> Option<Rule> {
|
||||
rules?.iter().find(|rule| rule.regex.is_match(url)).cloned()
|
||||
}
|
||||
|
||||
// make_error_response makes an error response with the given status and message.
|
||||
/// make_error_response makes an error response with the given status and message.
|
||||
#[instrument(skip_all)]
|
||||
fn make_error_response(status: http::StatusCode, header: Option<http::HeaderMap>) -> Response {
|
||||
let mut response = Response::new(empty());
|
||||
|
|
@ -1056,7 +1056,7 @@ fn make_error_response(status: http::StatusCode, header: Option<http::HeaderMap>
|
|||
response
|
||||
}
|
||||
|
||||
// empty returns an empty body.
|
||||
/// empty returns an empty body.
|
||||
#[instrument(skip_all)]
|
||||
fn empty() -> BoxBody<Bytes, ClientError> {
|
||||
Empty::<Bytes>::new()
|
||||
|
|
|
|||
|
|
@ -54,27 +54,27 @@ use tracing::{error, info, instrument, Instrument};
|
|||
|
||||
use super::*;
|
||||
|
||||
// CacheTask represents a cache task manager.
|
||||
/// CacheTask represents a cache task manager.
|
||||
pub struct CacheTask {
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// id_generator is the id generator.
|
||||
/// id_generator is the id generator.
|
||||
pub id_generator: Arc<IDGenerator>,
|
||||
|
||||
// storage is the local storage.
|
||||
/// storage is the local storage.
|
||||
storage: Arc<Storage>,
|
||||
|
||||
// scheduler_client is the grpc client of the scheduler.
|
||||
/// scheduler_client is the grpc client of the scheduler.
|
||||
pub scheduler_client: Arc<SchedulerClient>,
|
||||
|
||||
// piece is the piece manager.
|
||||
/// piece is the piece manager.
|
||||
pub piece: Arc<piece::Piece>,
|
||||
}
|
||||
|
||||
// CacheTask is the implementation of CacheTask.
|
||||
/// CacheTask is the implementation of CacheTask.
|
||||
impl CacheTask {
|
||||
// new creates a new CacheTask.
|
||||
/// new creates a new CacheTask.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -100,7 +100,7 @@ impl CacheTask {
|
|||
}
|
||||
}
|
||||
|
||||
// create_persistent creates a persistent cache task from local.
|
||||
/// create_persistent creates a persistent cache task from local.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn create_persistent(
|
||||
&self,
|
||||
|
|
@ -224,7 +224,7 @@ impl CacheTask {
|
|||
}
|
||||
}
|
||||
|
||||
// download_started updates the metadata of the cache task when the cache task downloads started.
|
||||
/// download_started updates the metadata of the cache task when the cache task downloads started.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_started(
|
||||
&self,
|
||||
|
|
@ -253,20 +253,20 @@ impl CacheTask {
|
|||
)
|
||||
}
|
||||
|
||||
// download_finished updates the metadata of the cache task when the task downloads finished.
|
||||
/// download_finished updates the metadata of the cache task when the task downloads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_finished(&self, id: &str) -> ClientResult<metadata::CacheTask> {
|
||||
self.storage.download_cache_task_finished(id)
|
||||
}
|
||||
|
||||
// download_failed updates the metadata of the cache task when the task downloads failed.
|
||||
/// download_failed updates the metadata of the cache task when the task downloads failed.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_failed(&self, id: &str) -> ClientResult<()> {
|
||||
let _ = self.storage.download_cache_task_failed(id).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// hard_link_or_copy hard links or copies the cache task content to the destination.
|
||||
/// hard_link_or_copy hard links or copies the cache task content to the destination.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn hard_link_or_copy(
|
||||
&self,
|
||||
|
|
@ -276,7 +276,7 @@ impl CacheTask {
|
|||
self.storage.hard_link_or_copy_cache_task(task, to).await
|
||||
}
|
||||
|
||||
// download downloads a cache task.
|
||||
/// download downloads a cache task.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download(
|
||||
|
|
@ -455,7 +455,7 @@ impl CacheTask {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// download_partial_with_scheduler downloads a partial cache task with scheduler.
|
||||
/// download_partial_with_scheduler downloads a partial cache task with scheduler.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
async fn download_partial_with_scheduler(
|
||||
|
|
@ -758,7 +758,7 @@ impl CacheTask {
|
|||
Ok(finished_pieces)
|
||||
}
|
||||
|
||||
// download_partial_with_scheduler_from_remote_peer downloads a partial cache task with scheduler from a remote peer.
|
||||
/// download_partial_with_scheduler_from_remote_peer downloads a partial cache task with scheduler from a remote peer.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
async fn download_partial_with_scheduler_from_remote_peer(
|
||||
|
|
@ -984,7 +984,7 @@ impl CacheTask {
|
|||
Ok(finished_pieces)
|
||||
}
|
||||
|
||||
// download_partial_from_local_peer downloads a partial cache task from a local peer.
|
||||
/// download_partial_from_local_peer downloads a partial cache task from a local peer.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
async fn download_partial_from_local_peer(
|
||||
|
|
@ -1073,7 +1073,7 @@ impl CacheTask {
|
|||
Ok(finished_pieces)
|
||||
}
|
||||
|
||||
// stat stats the cache task from the scheduler.
|
||||
/// stat stats the cache task from the scheduler.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn stat(&self, task_id: &str, host_id: &str) -> ClientResult<CommonCacheTask> {
|
||||
self.scheduler_client
|
||||
|
|
@ -1084,7 +1084,7 @@ impl CacheTask {
|
|||
.await
|
||||
}
|
||||
|
||||
// delete_cache_task deletes a cache task.
|
||||
/// delete_cache_task deletes a cache task.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete(&self, task_id: &str, host_id: &str) -> ClientResult<()> {
|
||||
self.scheduler_client
|
||||
|
|
|
|||
|
|
@ -38,48 +38,48 @@ use tracing::{error, info, instrument, Span};
|
|||
|
||||
use super::*;
|
||||
|
||||
// MAX_PIECE_COUNT is the maximum piece count. If the piece count is upper
|
||||
// than MAX_PIECE_COUNT, the piece length will be optimized by the file length.
|
||||
// When piece length becames the MAX_PIECE_LENGTH, the piece piece count
|
||||
// probably will be upper than MAX_PIECE_COUNT.
|
||||
/// MAX_PIECE_COUNT is the maximum piece count. If the piece count is upper
|
||||
/// than MAX_PIECE_COUNT, the piece length will be optimized by the file length.
|
||||
/// When piece length becames the MAX_PIECE_LENGTH, the piece piece count
|
||||
/// probably will be upper than MAX_PIECE_COUNT.
|
||||
const MAX_PIECE_COUNT: u64 = 500;
|
||||
|
||||
// MIN_PIECE_LENGTH is the minimum piece length.
|
||||
/// MIN_PIECE_LENGTH is the minimum piece length.
|
||||
const MIN_PIECE_LENGTH: u64 = 4 * 1024 * 1024;
|
||||
|
||||
// MAX_PIECE_LENGTH is the maximum piece length.
|
||||
/// MAX_PIECE_LENGTH is the maximum piece length.
|
||||
const MAX_PIECE_LENGTH: u64 = 16 * 1024 * 1024;
|
||||
|
||||
// PieceLengthStrategy sets the optimization strategy of piece length.
|
||||
/// PieceLengthStrategy sets the optimization strategy of piece length.
|
||||
pub enum PieceLengthStrategy {
|
||||
// OptimizeByFileLength optimizes the piece length by the file length.
|
||||
/// OptimizeByFileLength optimizes the piece length by the file length.
|
||||
OptimizeByFileLength,
|
||||
}
|
||||
|
||||
// Piece represents a piece manager.
|
||||
/// Piece represents a piece manager.
|
||||
pub struct Piece {
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// id_generator is the id generator.
|
||||
/// id_generator is the id generator.
|
||||
id_generator: Arc<IDGenerator>,
|
||||
|
||||
// storage is the local storage.
|
||||
/// storage is the local storage.
|
||||
storage: Arc<Storage>,
|
||||
|
||||
// backend_factory is the backend factory.
|
||||
/// backend_factory is the backend factory.
|
||||
backend_factory: Arc<BackendFactory>,
|
||||
|
||||
// download_rate_limiter is the rate limiter of the download speed in bps(bytes per second).
|
||||
/// download_rate_limiter is the rate limiter of the download speed in bps(bytes per second).
|
||||
download_rate_limiter: Arc<RateLimiter>,
|
||||
|
||||
// upload_rate_limiter is the rate limiter of the upload speed in bps(bytes per second).
|
||||
/// upload_rate_limiter is the rate limiter of the upload speed in bps(bytes per second).
|
||||
upload_rate_limiter: Arc<RateLimiter>,
|
||||
}
|
||||
|
||||
// Piece implements the piece manager.
|
||||
/// Piece implements the piece manager.
|
||||
impl Piece {
|
||||
// new returns a new Piece.
|
||||
/// new returns a new Piece.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -110,13 +110,13 @@ impl Piece {
|
|||
}
|
||||
}
|
||||
|
||||
// get gets a piece from the local storage.
|
||||
/// get gets a piece from the local storage.
|
||||
#[instrument(skip_all)]
|
||||
pub fn get(&self, task_id: &str, number: u32) -> Result<Option<metadata::Piece>> {
|
||||
self.storage.get_piece(task_id, number)
|
||||
}
|
||||
|
||||
// calculate_interested calculates the interested pieces by content_length and range.
|
||||
/// calculate_interested calculates the interested pieces by content_length and range.
|
||||
#[instrument(skip_all)]
|
||||
pub fn calculate_interested(
|
||||
&self,
|
||||
|
|
@ -230,7 +230,7 @@ impl Piece {
|
|||
Ok(pieces)
|
||||
}
|
||||
|
||||
// remove_finished_from_interested removes the finished pieces from interested pieces.
|
||||
/// remove_finished_from_interested removes the finished pieces from interested pieces.
|
||||
#[instrument(skip_all)]
|
||||
pub fn remove_finished_from_interested(
|
||||
&self,
|
||||
|
|
@ -248,7 +248,7 @@ impl Piece {
|
|||
.collect::<Vec<metadata::Piece>>()
|
||||
}
|
||||
|
||||
// merge_finished_pieces merges the finished pieces and has finished pieces.
|
||||
/// merge_finished_pieces merges the finished pieces and has finished pieces.
|
||||
#[instrument(skip_all)]
|
||||
pub fn merge_finished_pieces(
|
||||
&self,
|
||||
|
|
@ -269,7 +269,7 @@ impl Piece {
|
|||
pieces.into_values().collect()
|
||||
}
|
||||
|
||||
// calculate_piece_size calculates the piece size by content_length.
|
||||
/// calculate_piece_size calculates the piece size by content_length.
|
||||
pub fn calculate_piece_length(
|
||||
&self,
|
||||
strategy: PieceLengthStrategy,
|
||||
|
|
@ -292,7 +292,7 @@ impl Piece {
|
|||
}
|
||||
}
|
||||
|
||||
// upload_from_local_peer_into_async_read uploads a single piece from a local peer.
|
||||
/// upload_from_local_peer_into_async_read uploads a single piece from a local peer.
|
||||
#[instrument(skip_all, fields(piece_id))]
|
||||
pub async fn upload_from_local_peer_into_async_read(
|
||||
&self,
|
||||
|
|
@ -323,7 +323,7 @@ impl Piece {
|
|||
})
|
||||
}
|
||||
|
||||
// download_from_local_peer_into_async_read downloads a single piece from a local peer.
|
||||
/// download_from_local_peer_into_async_read downloads a single piece from a local peer.
|
||||
#[instrument(skip_all, fields(piece_id))]
|
||||
pub async fn download_from_local_peer_into_async_read(
|
||||
&self,
|
||||
|
|
@ -345,8 +345,8 @@ impl Piece {
|
|||
self.storage.upload_piece(task_id, number, range).await
|
||||
}
|
||||
|
||||
// download_from_local_peer downloads a single piece from a local peer. Fake the download piece
|
||||
// from the local peer, just collect the metrics.
|
||||
/// download_from_local_peer downloads a single piece from a local peer. Fake the download piece
|
||||
/// from the local peer, just collect the metrics.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_from_local_peer(&self, task_id: &str, length: u64) {
|
||||
collect_download_piece_traffic_metrics(
|
||||
|
|
@ -356,7 +356,7 @@ impl Piece {
|
|||
);
|
||||
}
|
||||
|
||||
// download_from_remote_peer downloads a single piece from a remote peer.
|
||||
/// download_from_remote_peer downloads a single piece from a remote peer.
|
||||
#[instrument(skip_all, fields(piece_id))]
|
||||
pub async fn download_from_remote_peer(
|
||||
&self,
|
||||
|
|
@ -482,7 +482,7 @@ impl Piece {
|
|||
})
|
||||
}
|
||||
|
||||
// download_from_source downloads a single piece from the source.
|
||||
/// download_from_source downloads a single piece from the source.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all, fields(piece_id))]
|
||||
pub async fn download_from_source(
|
||||
|
|
|
|||
|
|
@ -29,51 +29,51 @@ use tokio::task::JoinSet;
|
|||
use tokio_stream::StreamExt;
|
||||
use tracing::{error, info, instrument, Instrument};
|
||||
|
||||
// CollectedParent is the parent peer collected from the remote peer.
|
||||
/// CollectedParent is the parent peer collected from the remote peer.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CollectedParent {
|
||||
// id is the id of the parent.
|
||||
/// id is the id of the parent.
|
||||
pub id: String,
|
||||
|
||||
// host is the host of the parent.
|
||||
/// host is the host of the parent.
|
||||
pub host: Option<Host>,
|
||||
}
|
||||
|
||||
// CollectedPiece is the piece collected from a peer.
|
||||
/// CollectedPiece is the piece collected from a peer.
|
||||
pub struct CollectedPiece {
|
||||
// number is the piece number.
|
||||
/// number is the piece number.
|
||||
pub number: u32,
|
||||
|
||||
// length is the piece length.
|
||||
/// length is the piece length.
|
||||
pub length: u64,
|
||||
|
||||
// parent is the parent peer.
|
||||
/// parent is the parent peer.
|
||||
pub parent: CollectedParent,
|
||||
}
|
||||
|
||||
// PieceCollector is used to collect pieces from peers.
|
||||
/// PieceCollector is used to collect pieces from peers.
|
||||
pub struct PieceCollector {
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// host_id is the id of the host.
|
||||
/// host_id is the id of the host.
|
||||
host_id: String,
|
||||
|
||||
// task_id is the id of the task.
|
||||
/// task_id is the id of the task.
|
||||
task_id: String,
|
||||
|
||||
// parents is the parent peers.
|
||||
/// parents is the parent peers.
|
||||
parents: Vec<CollectedParent>,
|
||||
|
||||
// interested_pieces is the pieces interested by the collector.
|
||||
/// interested_pieces is the pieces interested by the collector.
|
||||
interested_pieces: Vec<metadata::Piece>,
|
||||
|
||||
// collected_pieces is the pieces collected from peers.
|
||||
/// collected_pieces is the pieces collected from peers.
|
||||
collected_pieces: Arc<DashMap<u32, String>>,
|
||||
}
|
||||
|
||||
impl PieceCollector {
|
||||
// new creates a new PieceCollector.
|
||||
/// new creates a new PieceCollector.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -100,7 +100,7 @@ impl PieceCollector {
|
|||
}
|
||||
}
|
||||
|
||||
// run runs the piece collector.
|
||||
/// run runs the piece collector.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) -> Receiver<CollectedPiece> {
|
||||
let host_id = self.host_id.clone();
|
||||
|
|
@ -132,7 +132,7 @@ impl PieceCollector {
|
|||
collected_piece_rx
|
||||
}
|
||||
|
||||
// collect_from_remote_peers collects pieces from remote peers.
|
||||
/// collect_from_remote_peers collects pieces from remote peers.
|
||||
#[instrument(skip_all)]
|
||||
async fn collect_from_remote_peers(
|
||||
host_id: String,
|
||||
|
|
|
|||
|
|
@ -64,30 +64,30 @@ use tracing::{error, info, instrument, Instrument};
|
|||
|
||||
use super::*;
|
||||
|
||||
// Task represents a task manager.
|
||||
/// Task represents a task manager.
|
||||
pub struct Task {
|
||||
// config is the configuration of the dfdaemon.
|
||||
/// config is the configuration of the dfdaemon.
|
||||
config: Arc<Config>,
|
||||
|
||||
// id_generator is the id generator.
|
||||
/// id_generator is the id generator.
|
||||
pub id_generator: Arc<IDGenerator>,
|
||||
|
||||
// storage is the local storage.
|
||||
/// storage is the local storage.
|
||||
storage: Arc<Storage>,
|
||||
|
||||
// scheduler_client is the grpc client of the scheduler.
|
||||
/// scheduler_client is the grpc client of the scheduler.
|
||||
pub scheduler_client: Arc<SchedulerClient>,
|
||||
|
||||
// backend_factory is the backend factory.
|
||||
/// backend_factory is the backend factory.
|
||||
pub backend_factory: Arc<BackendFactory>,
|
||||
|
||||
// piece is the piece manager.
|
||||
/// piece is the piece manager.
|
||||
pub piece: Arc<piece::Piece>,
|
||||
}
|
||||
|
||||
// Task implements the task manager.
|
||||
/// Task implements the task manager.
|
||||
impl Task {
|
||||
// new returns a new Task.
|
||||
/// new returns a new Task.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
config: Arc<Config>,
|
||||
|
|
@ -114,7 +114,7 @@ impl Task {
|
|||
}
|
||||
}
|
||||
|
||||
// download_started updates the metadata of the task when the task downloads started.
|
||||
/// download_started updates the metadata of the task when the task downloads started.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_started(
|
||||
&self,
|
||||
|
|
@ -208,31 +208,31 @@ impl Task {
|
|||
)
|
||||
}
|
||||
|
||||
// download_finished updates the metadata of the task when the task downloads finished.
|
||||
/// download_finished updates the metadata of the task when the task downloads finished.
|
||||
#[instrument(skip_all)]
|
||||
pub fn download_finished(&self, id: &str) -> ClientResult<metadata::Task> {
|
||||
self.storage.download_task_finished(id)
|
||||
}
|
||||
|
||||
// download_failed updates the metadata of the task when the task downloads failed.
|
||||
/// download_failed updates the metadata of the task when the task downloads failed.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download_failed(&self, id: &str) -> ClientResult<()> {
|
||||
self.storage.download_task_failed(id).await.map(|_| ())
|
||||
}
|
||||
|
||||
// prefetch_task_started updates the metadata of the task when the task prefetch started.
|
||||
/// prefetch_task_started updates the metadata of the task when the task prefetch started.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn prefetch_task_started(&self, id: &str) -> ClientResult<metadata::Task> {
|
||||
self.storage.prefetch_task_started(id).await
|
||||
}
|
||||
|
||||
// prefetch_task_failed updates the metadata of the task when the task prefetch failed.
|
||||
/// prefetch_task_failed updates the metadata of the task when the task prefetch failed.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn prefetch_task_failed(&self, id: &str) -> ClientResult<metadata::Task> {
|
||||
self.storage.prefetch_task_failed(id).await
|
||||
}
|
||||
|
||||
// hard_link_or_copy hard links or copies the task content to the destination.
|
||||
/// hard_link_or_copy hard links or copies the task content to the destination.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn hard_link_or_copy(
|
||||
&self,
|
||||
|
|
@ -243,7 +243,7 @@ impl Task {
|
|||
self.storage.hard_link_or_copy_task(task, to, range).await
|
||||
}
|
||||
|
||||
// download downloads a task.
|
||||
/// download downloads a task.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
pub async fn download(
|
||||
|
|
@ -462,7 +462,7 @@ impl Task {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// download_partial_with_scheduler downloads a partial task with scheduler.
|
||||
/// download_partial_with_scheduler downloads a partial task with scheduler.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
async fn download_partial_with_scheduler(
|
||||
|
|
@ -894,7 +894,7 @@ impl Task {
|
|||
Ok(finished_pieces)
|
||||
}
|
||||
|
||||
// download_partial_with_scheduler_from_remote_peer downloads a partial task with scheduler from a remote peer.
|
||||
/// download_partial_with_scheduler_from_remote_peer downloads a partial task with scheduler from a remote peer.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
async fn download_partial_with_scheduler_from_remote_peer(
|
||||
|
|
@ -1160,7 +1160,7 @@ impl Task {
|
|||
Ok(finished_pieces)
|
||||
}
|
||||
|
||||
// download_partial_with_scheduler_from_source downloads a partial task with scheduler from the source.
|
||||
/// download_partial_with_scheduler_from_source downloads a partial task with scheduler from the source.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
async fn download_partial_with_scheduler_from_source(
|
||||
|
|
@ -1407,7 +1407,7 @@ impl Task {
|
|||
Ok(finished_pieces)
|
||||
}
|
||||
|
||||
// download_partial_from_local_peer downloads a partial task from a local peer.
|
||||
/// download_partial_from_local_peer downloads a partial task from a local peer.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
async fn download_partial_from_local_peer(
|
||||
|
|
@ -1500,7 +1500,7 @@ impl Task {
|
|||
Ok(finished_pieces)
|
||||
}
|
||||
|
||||
// download_partial_from_source downloads a partial task from the source.
|
||||
/// download_partial_from_source downloads a partial task from the source.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[instrument(skip_all)]
|
||||
async fn download_partial_from_source(
|
||||
|
|
@ -1663,7 +1663,7 @@ impl Task {
|
|||
))
|
||||
}
|
||||
|
||||
// stat_task returns the task metadata.
|
||||
/// stat_task returns the task metadata.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn stat(&self, task_id: &str, host_id: &str) -> ClientResult<CommonTask> {
|
||||
let task = self
|
||||
|
|
@ -1681,7 +1681,7 @@ impl Task {
|
|||
Ok(task)
|
||||
}
|
||||
|
||||
// Delete a task and reclaim local storage.
|
||||
/// Delete a task and reclaim local storage.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn delete(&self, task_id: &str, host_id: &str) -> ClientResult<()> {
|
||||
let task = self.storage.get_task(task_id).map_err(|err| {
|
||||
|
|
|
|||
|
|
@ -18,22 +18,22 @@ use tokio::signal::unix::{signal, SignalKind};
|
|||
use tokio::sync::broadcast;
|
||||
use tracing::info;
|
||||
|
||||
// Shutdown is a signal to shutdown.
|
||||
/// Shutdown is a signal to shutdown.
|
||||
#[derive(Debug)]
|
||||
pub struct Shutdown {
|
||||
// is_shutdown is true if the shutdown signal has been received.
|
||||
/// is_shutdown is true if the shutdown signal has been received.
|
||||
is_shutdown: bool,
|
||||
|
||||
// sender is used to send the shutdown signal.
|
||||
/// sender is used to send the shutdown signal.
|
||||
sender: broadcast::Sender<()>,
|
||||
|
||||
// receiver is used to receive the shutdown signal.
|
||||
/// receiver is used to receive the shutdown signal.
|
||||
receiver: broadcast::Receiver<()>,
|
||||
}
|
||||
|
||||
// Shutdown implements the shutdown signal.
|
||||
/// Shutdown implements the shutdown signal.
|
||||
impl Shutdown {
|
||||
// new creates a new Shutdown.
|
||||
/// new creates a new Shutdown.
|
||||
pub fn new() -> Shutdown {
|
||||
let (sender, receiver) = broadcast::channel(1);
|
||||
Self {
|
||||
|
|
@ -43,17 +43,17 @@ impl Shutdown {
|
|||
}
|
||||
}
|
||||
|
||||
// is_shutdown returns true if the shutdown signal has been received.
|
||||
/// is_shutdown returns true if the shutdown signal has been received.
|
||||
pub fn is_shutdown(&self) -> bool {
|
||||
self.is_shutdown
|
||||
}
|
||||
|
||||
// trigger triggers the shutdown signal.
|
||||
/// trigger triggers the shutdown signal.
|
||||
pub fn trigger(&self) {
|
||||
let _ = self.sender.send(());
|
||||
}
|
||||
|
||||
// recv waits for the shutdown signal.
|
||||
/// recv waits for the shutdown signal.
|
||||
pub async fn recv(&mut self) {
|
||||
// Return immediately if the shutdown signal has already been received.
|
||||
if self.is_shutdown {
|
||||
|
|
@ -76,9 +76,9 @@ impl Default for Shutdown {
|
|||
}
|
||||
}
|
||||
|
||||
// Clone implements the Clone trait.
|
||||
/// Clone implements the Clone trait.
|
||||
impl Clone for Shutdown {
|
||||
// clone returns a new Shutdown.
|
||||
/// clone returns a new Shutdown.
|
||||
fn clone(&self) -> Self {
|
||||
let sender = self.sender.clone();
|
||||
let receiver = self.sender.subscribe();
|
||||
|
|
@ -90,8 +90,8 @@ impl Clone for Shutdown {
|
|||
}
|
||||
}
|
||||
|
||||
// shutdown_signal returns a future that will resolve when a SIGINT, SIGTERM or SIGQUIT signal is
|
||||
// received by the process.
|
||||
/// shutdown_signal returns a future that will resolve when a SIGINT, SIGTERM or SIGQUIT signal is
|
||||
/// received by the process.
|
||||
pub async fn shutdown_signal() {
|
||||
let mut sigint = signal(SignalKind::interrupt()).unwrap();
|
||||
let mut sigterm = signal(SignalKind::terminate()).unwrap();
|
||||
|
|
|
|||
|
|
@ -24,24 +24,24 @@ use tokio::sync::mpsc;
|
|||
use tracing::{error, info, instrument};
|
||||
use warp::{Filter, Rejection, Reply};
|
||||
|
||||
// DEFAULT_PROFILER_SECONDS is the default seconds to start profiling.
|
||||
/// DEFAULT_PROFILER_SECONDS is the default seconds to start profiling.
|
||||
const DEFAULT_PROFILER_SECONDS: u64 = 10;
|
||||
|
||||
// DEFAULT_PROFILER_FREQUENCY is the default frequency to start profiling.
|
||||
/// DEFAULT_PROFILER_FREQUENCY is the default frequency to start profiling.
|
||||
const DEFAULT_PROFILER_FREQUENCY: i32 = 1000;
|
||||
|
||||
// PProfProfileQueryParams is the query params to start profiling.
|
||||
/// PProfProfileQueryParams is the query params to start profiling.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(default)]
|
||||
pub struct PProfProfileQueryParams {
|
||||
// seconds is the seconds to start profiling.
|
||||
/// seconds is the seconds to start profiling.
|
||||
pub seconds: u64,
|
||||
|
||||
// frequency is the frequency to start profiling.
|
||||
/// frequency is the frequency to start profiling.
|
||||
pub frequency: i32,
|
||||
}
|
||||
|
||||
// PProfProfileQueryParams implements the default.
|
||||
/// PProfProfileQueryParams implements the default.
|
||||
impl Default for PProfProfileQueryParams {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
|
|
@ -51,22 +51,22 @@ impl Default for PProfProfileQueryParams {
|
|||
}
|
||||
}
|
||||
|
||||
// Stats is the stats server.
|
||||
/// Stats is the stats server.
|
||||
#[derive(Debug)]
|
||||
pub struct Stats {
|
||||
// addr is the address of the stats server.
|
||||
/// addr is the address of the stats server.
|
||||
addr: SocketAddr,
|
||||
|
||||
// shutdown is used to shutdown the stats server.
|
||||
/// shutdown is used to shutdown the stats server.
|
||||
shutdown: shutdown::Shutdown,
|
||||
|
||||
// _shutdown_complete is used to notify the stats server is shutdown.
|
||||
/// _shutdown_complete is used to notify the stats server is shutdown.
|
||||
_shutdown_complete: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
// Stats implements the stats server.
|
||||
/// Stats implements the stats server.
|
||||
impl Stats {
|
||||
// new creates a new Stats.
|
||||
/// new creates a new Stats.
|
||||
#[instrument(skip_all)]
|
||||
pub fn new(
|
||||
addr: SocketAddr,
|
||||
|
|
@ -80,7 +80,7 @@ impl Stats {
|
|||
}
|
||||
}
|
||||
|
||||
// run starts the stats server.
|
||||
/// run starts the stats server.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn run(&self) {
|
||||
// Clone the shutdown channel.
|
||||
|
|
@ -114,7 +114,7 @@ impl Stats {
|
|||
}
|
||||
}
|
||||
|
||||
// stats_handler handles the stats request.
|
||||
/// stats_handler handles the stats request.
|
||||
#[instrument(skip_all)]
|
||||
async fn pprof_profile_handler(
|
||||
query_params: PProfProfileQueryParams,
|
||||
|
|
@ -149,7 +149,7 @@ impl Stats {
|
|||
Ok(body)
|
||||
}
|
||||
|
||||
// pprof_heap_handler handles the pprof heap request.
|
||||
/// pprof_heap_handler handles the pprof heap request.
|
||||
#[instrument(skip_all)]
|
||||
async fn pprof_heap_handler() -> Result<impl Reply, Rejection> {
|
||||
info!("start heap profiling");
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ use tracing_subscriber::{
|
|||
EnvFilter, Registry,
|
||||
};
|
||||
|
||||
// init_tracing initializes the tracing system.
|
||||
/// init_tracing initializes the tracing system.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn init_tracing(
|
||||
name: &str,
|
||||
|
|
@ -141,7 +141,7 @@ pub fn init_tracing(
|
|||
guards
|
||||
}
|
||||
|
||||
// redirect_stderr_to_file redirects stderr to a file.
|
||||
/// redirect_stderr_to_file redirects stderr to a file.
|
||||
fn redirect_stderr_to_file(log_dir: PathBuf) {
|
||||
let log_path = log_dir.join("stderr.log");
|
||||
let file = OpenOptions::new()
|
||||
|
|
|
|||
Loading…
Reference in New Issue