diff --git a/dragonfly-client-backend/examples/plugin/src/lib.rs b/dragonfly-client-backend/examples/plugin/src/lib.rs index efdac01a..3045767e 100644 --- a/dragonfly-client-backend/examples/plugin/src/lib.rs +++ b/dragonfly-client-backend/examples/plugin/src/lib.rs @@ -17,39 +17,39 @@ use dragonfly_client_backend::{Backend, Body, GetRequest, GetResponse, HeadRequest, HeadResponse}; use dragonfly_client_core::{Error, Result}; -// Hdfs is a struct that implements the Backend trait +/// Hdfs is a struct that implements the Backend trait struct Hdfs; -// Hdfs implements the Backend trait +/// Hdfs implements the Backend trait impl Hdfs { pub fn new() -> Self { Self {} } } -// Implement the Backend trait for Hdfs. +/// Implement the Backend trait for Hdfs. #[tonic::async_trait] impl Backend for Hdfs { - // scheme returns the scheme of the backend. + /// scheme returns the scheme of the backend. fn scheme(&self) -> String { "hdfs".to_string() } - // head is an async function that takes a HeadRequest and returns a HeadResponse. + /// head is an async function that takes a HeadRequest and returns a HeadResponse. async fn head(&self, request: HeadRequest) -> Result { println!("HDFS head url: {}", request.url); Err(Error::Unimplemented) } - // get is an async function that takes a GetRequest and returns a GetResponse. + /// get is an async function that takes a GetRequest and returns a GetResponse. async fn get(&self, request: GetRequest) -> Result> { println!("HDFS get url: {}", request.url); Err(Error::Unimplemented) } } -// register_plugin is a function that returns a Box. -// This function is used to register the HDFS plugin to the Backend. +/// register_plugin is a function that returns a Box. +/// This function is used to register the HDFS plugin to the Backend. #[no_mangle] pub fn register_plugin() -> Box { Box::new(Hdfs::new()) diff --git a/dragonfly-client-backend/src/http.rs b/dragonfly-client-backend/src/http.rs index 165bafc6..6e998ba5 100644 --- a/dragonfly-client-backend/src/http.rs +++ b/dragonfly-client-backend/src/http.rs @@ -22,15 +22,15 @@ use std::io::{Error as IOError, ErrorKind}; use tokio_util::io::StreamReader; use tracing::{error, info, instrument}; -// HTTP is the HTTP backend. +/// HTTP is the HTTP backend. pub struct HTTP { - // scheme is the scheme of the HTTP backend. + /// scheme is the scheme of the HTTP backend. scheme: String, } -// HTTP implements the http interface. +/// HTTP implements the http interface. impl HTTP { - // new returns a new HTTP. + /// new returns a new HTTP. #[instrument(skip_all)] pub fn new(scheme: &str) -> HTTP { Self { @@ -38,7 +38,7 @@ impl HTTP { } } - // client returns a new reqwest client. + /// client returns a new reqwest client. #[instrument(skip_all)] fn client( &self, @@ -68,16 +68,16 @@ impl HTTP { } } -// Backend implements the Backend trait. +/// Backend implements the Backend trait. #[tonic::async_trait] impl super::Backend for HTTP { - // scheme returns the scheme of the HTTP backend. + /// scheme returns the scheme of the HTTP backend. #[instrument(skip_all)] fn scheme(&self) -> String { self.scheme.clone() } - // head gets the header of the request. + /// head gets the header of the request. #[instrument(skip_all)] async fn head(&self, request: super::HeadRequest) -> Result { info!( @@ -124,7 +124,7 @@ impl super::Backend for HTTP { }) } - // get gets the content of the request. + /// get gets the content of the request. #[instrument(skip_all)] async fn get(&self, request: super::GetRequest) -> Result> { info!( @@ -171,9 +171,9 @@ impl super::Backend for HTTP { } } -// Default implements the Default trait. +/// Default implements the Default trait. impl Default for HTTP { - // default returns a new default HTTP. + /// default returns a new default HTTP. fn default() -> Self { Self::new("http") } diff --git a/dragonfly-client-backend/src/lib.rs b/dragonfly-client-backend/src/lib.rs index 0d301dab..5f962d54 100644 --- a/dragonfly-client-backend/src/lib.rs +++ b/dragonfly-client-backend/src/lib.rs @@ -32,104 +32,104 @@ use url::Url; pub mod http; pub mod object_storage; -// NAME is the name of the package. +/// NAME is the name of the package. pub const NAME: &str = "backend"; -// Body is the body of the response. +/// Body is the body of the response. pub type Body = Box; -// HeadRequest is the head request for backend. +/// HeadRequest is the head request for backend. pub struct HeadRequest { - // task_id is the id of the task. + /// task_id is the id of the task. pub task_id: String, - // url is the url of the request. + /// url is the url of the request. pub url: String, - // http_header is the headers of the request. + /// http_header is the headers of the request. pub http_header: Option, - // timeout is the timeout of the request. + /// timeout is the timeout of the request. pub timeout: Duration, - // client_certs is the client certificates for the request. + /// client_certs is the client certificates for the request. pub client_certs: Option>>, - // object_storage is the object storage related information. + /// object_storage is the object storage related information. pub object_storage: Option, } -// HeadResponse is the head response for backend. +/// HeadResponse is the head response for backend. #[derive(Debug)] pub struct HeadResponse { - // success is the success of the response. + /// success is the success of the response. pub success: bool, - // content_length is the content length of the response. + /// content_length is the content length of the response. pub content_length: Option, - // http_header is the headers of the response. + /// http_header is the headers of the response. pub http_header: Option, - // http_status_code is the status code of the response. + /// http_status_code is the status code of the response. pub http_status_code: Option, - // Entries is the information of the entries in the directory. + /// Entries is the information of the entries in the directory. pub entries: Vec, - // error_message is the error message of the response. + /// error_message is the error message of the response. pub error_message: Option, } -// GetRequest is the get request for backend. +/// GetRequest is the get request for backend. pub struct GetRequest { - // task_id is the id of the task. + /// task_id is the id of the task. pub task_id: String, - // piece_id is the id of the piece. + /// piece_id is the id of the piece. pub piece_id: String, - // url is the url of the request. + /// url is the url of the request. pub url: String, - // range is the range of the request. + /// range is the range of the request. pub range: Option, - // http_header is the headers of the request. + /// http_header is the headers of the request. pub http_header: Option, - // timeout is the timeout of the request. + /// timeout is the timeout of the request. pub timeout: Duration, - // client_certs is the client certificates for the request. + /// client_certs is the client certificates for the request. pub client_certs: Option>>, - // the object storage related information. + /// the object storage related information. pub object_storage: Option, } -// GetResponse is the get response for backend. +/// GetResponse is the get response for backend. pub struct GetResponse where R: AsyncRead + Unpin, { - // success is the success of the response. + /// success is the success of the response. pub success: bool, - // http_header is the headers of the response. + /// http_header is the headers of the response. pub http_header: Option, - // http_status_code is the status code of the response. + /// http_status_code is the status code of the response. pub http_status_code: Option, - // body is the content of the response. + /// body is the content of the response. pub reader: R, - // error_message is the error message of the response. + /// error_message is the error message of the response. pub error_message: Option, } -// GetResponse implements the response functions. +/// GetResponse implements the response functions. impl GetResponse where R: AsyncRead + Unpin, @@ -146,64 +146,64 @@ where /// The File Entry of a directory, including some relevant file metadata. #[derive(Debug, PartialEq, Eq)] pub struct DirEntry { - // url is the url of the entry. + /// url is the url of the entry. pub url: String, - // content_length is the content length of the entry. + /// content_length is the content length of the entry. pub content_length: usize, - // is_dir is the flag of the entry is a directory. + /// is_dir is the flag of the entry is a directory. pub is_dir: bool, } -// Backend is the interface of the backend. +/// Backend is the interface of the backend. #[tonic::async_trait] pub trait Backend { - // scheme returns the scheme of the backend. + /// scheme returns the scheme of the backend. fn scheme(&self) -> String; - // head gets the header of the request. + /// head gets the header of the request. async fn head(&self, request: HeadRequest) -> Result; - // get gets the content of the request. + /// get gets the content of the request. async fn get(&self, request: GetRequest) -> Result>; } -// BackendFactory is the factory of the backend. +/// BackendFactory is the factory of the backend. #[derive(Default)] pub struct BackendFactory { - // backends is the backends of the factory, including the plugin backends and - // the builtin backends. + /// backends is the backends of the factory, including the plugin backends and + /// the builtin backends. backends: HashMap>, - // libraries is used to store the plugin's dynamic library, because when not saving the `Library`, - // it will drop when out of scope, resulting in the null pointer error. + /// libraries is used to store the plugin's dynamic library, because when not saving the `Library`, + /// it will drop when out of scope, resulting in the null pointer error. libraries: Vec, } -// BackendFactory implements the factory of the backend. It supports loading builtin -// backends and plugin backends. -// -// The builtin backends are http, https, etc, which are implemented -// by the HTTP struct. -// -// The plugin backends are shared libraries, which are loaded -// by the `register_plugin` function. The file name of the shared -// library is the scheme of the backend. The shared library -// should implement the Backend trait. Default plugin directory -// is `/var/lib/dragonfly/plugins/` in linux and `~/.dragonfly/plugins` -// in macos. The plugin directory can be set by the dfdaemon configuration. -// -// For example: -// If implement a plugin backend named `hdfs`, the shared library -// should be named `libhdfs.so` or `libhdfs.dylib` and move the file to the backend plugin directory -// `/var/lib/dragonfly/plugins/backend/` in linux or `~/.dragonfly/plugins/backend/` -// in macos. When the dfdaemon starts, it will load the `hdfs` plugin backend in the -// backend plugin directory. So the dfdaemon or dfget can use the `hdfs` plugin backend -// to download the file by the url `hdfs://example.com/file`. -// The backend plugin implementation can refer to -// https://github.com/dragonflyoss/client/tree/main/dragonfly-client-backend/examples/plugin/. +/// BackendFactory implements the factory of the backend. It supports loading builtin +/// backends and plugin backends. +/// +/// The builtin backends are http, https, etc, which are implemented +/// by the HTTP struct. +/// +/// The plugin backends are shared libraries, which are loaded +/// by the `register_plugin` function. The file name of the shared +/// library is the scheme of the backend. The shared library +/// should implement the Backend trait. Default plugin directory +/// is `/var/lib/dragonfly/plugins/` in linux and `~/.dragonfly/plugins` +/// in macos. The plugin directory can be set by the dfdaemon configuration. +/// +/// For example: +/// If implement a plugin backend named `hdfs`, the shared library +/// should be named `libhdfs.so` or `libhdfs.dylib` and move the file to the backend plugin directory +/// `/var/lib/dragonfly/plugins/backend/` in linux or `~/.dragonfly/plugins/backend/` +/// in macos. When the dfdaemon starts, it will load the `hdfs` plugin backend in the +/// backend plugin directory. So the dfdaemon or dfget can use the `hdfs` plugin backend +/// to download the file by the url `hdfs://example.com/file`. +/// The backend plugin implementation can refer to +/// https://github.com/dragonflyoss/client/tree/main/dragonfly-client-backend/examples/plugin/. impl BackendFactory { - // new returns a new BackendFactory. + /// new returns a new BackendFactory. #[instrument(skip_all)] pub fn new(plugin_dir: Option<&Path>) -> Result { let mut backend_factory = Self::default(); @@ -220,7 +220,7 @@ impl BackendFactory { Ok(backend_factory) } - // build returns the backend by the scheme of the url. + /// build returns the backend by the scheme of the url. #[instrument(skip_all)] pub fn build(&self, url: &str) -> Result<&(dyn Backend + Send + Sync)> { let url = Url::parse(url).or_err(ErrorType::ParseError)?; @@ -231,7 +231,7 @@ impl BackendFactory { .ok_or(Error::InvalidParameter) } - // load_builtin_backends loads the builtin backends. + /// load_builtin_backends loads the builtin backends. #[instrument(skip_all)] fn load_builtin_backends(&mut self) { self.backends @@ -291,7 +291,7 @@ impl BackendFactory { info!("load [cos] builtin backend"); } - // load_plugin_backends loads the plugin backends. + /// load_plugin_backends loads the plugin backends. #[instrument(skip_all)] fn load_plugin_backends(&mut self, plugin_dir: &Path) -> Result<()> { let backend_plugin_dir = plugin_dir.join(NAME); diff --git a/dragonfly-client-backend/src/object_storage.rs b/dragonfly-client-backend/src/object_storage.rs index 232374c3..1654e82b 100644 --- a/dragonfly-client-backend/src/object_storage.rs +++ b/dragonfly-client-backend/src/object_storage.rs @@ -27,31 +27,31 @@ use tokio_util::io::StreamReader; use tracing::{error, info, instrument}; use url::Url; -// Scheme is the scheme of the object storage. +/// Scheme is the scheme of the object storage. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Scheme { - // S3 is the Amazon Simple Storage Service. + /// S3 is the Amazon Simple Storage Service. S3, - // GCS is the Google Cloud Storage Service. + /// GCS is the Google Cloud Storage Service. GCS, - // ABS is the Azure Blob Storage Service. + /// ABS is the Azure Blob Storage Service. ABS, - // OSS is the Aliyun Object Storage Service. + /// OSS is the Aliyun Object Storage Service. OSS, - // OBS is the Huawei Cloud Object Storage Service. + /// OBS is the Huawei Cloud Object Storage Service. OBS, - // COS is the Tencent Cloud Object Storage Service. + /// COS is the Tencent Cloud Object Storage Service. COS, } -// Scheme implements the Display. +/// Scheme implements the Display. impl fmt::Display for Scheme { - // fmt formats the value using the given formatter. + /// fmt formats the value using the given formatter. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Scheme::S3 => write!(f, "s3"), @@ -64,11 +64,11 @@ impl fmt::Display for Scheme { } } -// Scheme implements the FromStr. +/// Scheme implements the FromStr. impl FromStr for Scheme { type Err = String; - // from_str parses an scheme string. + /// from_str parses an scheme string. fn from_str(s: &str) -> Result { match s { "s3" => Ok(Scheme::S3), @@ -82,30 +82,30 @@ impl FromStr for Scheme { } } -// ParsedURL is a struct that contains the parsed URL, bucket, and path. +/// ParsedURL is a struct that contains the parsed URL, bucket, and path. #[derive(Debug)] pub struct ParsedURL { - // url is the requested URL of the object storage. + /// url is the requested URL of the object storage. pub url: Url, - // scheme is the scheme of the object storage. + /// scheme is the scheme of the object storage. pub scheme: Scheme, - // bucket is the bucket of the object storage. + /// bucket is the bucket of the object storage. pub bucket: String, - // key is the key of the object storage. + /// key is the key of the object storage. pub key: String, } -// ParsedURL implements the ParsedURL trait. +/// ParsedURL implements the ParsedURL trait. impl ParsedURL { - // is_dir returns true if the URL path ends with a slash. + /// is_dir returns true if the URL path ends with a slash. pub fn is_dir(&self) -> bool { self.url.path().ends_with('/') } - // make_url_by_entry_path makes a URL by the entry path when the URL is a directory. + /// make_url_by_entry_path makes a URL by the entry path when the URL is a directory. pub fn make_url_by_entry_path(&self, entry_path: &str) -> Url { let mut url = self.url.clone(); url.set_path(entry_path); @@ -113,13 +113,13 @@ impl ParsedURL { } } -// ParsedURL implements the TryFrom trait for the URL. -// -// The object storage URL should be in the format of `scheme:///`. +/// ParsedURL implements the TryFrom trait for the URL. +/// +/// The object storage URL should be in the format of `scheme:///`. impl TryFrom for ParsedURL { type Error = ClientError; - // try_from parses the URL and returns a ParsedURL. + /// try_from parses the URL and returns a ParsedURL. fn try_from(url: Url) -> Result { // Get the bucket from the URL host. let bucket = url @@ -150,7 +150,7 @@ impl TryFrom for ParsedURL { } } -// make_need_fields_message makes a message for the need fields in the object storage. +/// make_need_fields_message makes a message for the need fields in the object storage. macro_rules! make_need_fields_message { ($var:ident {$($field:ident),*}) => {{ let mut need_fields: Vec<&'static str> = vec![]; @@ -165,21 +165,21 @@ macro_rules! make_need_fields_message { }}; } -// ObjectStorage is a struct that implements the backend trait. +/// ObjectStorage is a struct that implements the backend trait. pub struct ObjectStorage { - // scheme is the scheme of the object storage. + /// scheme is the scheme of the object storage. scheme: Scheme, } -// ObjectStorage implements the ObjectStorage trait. +/// ObjectStorage implements the ObjectStorage trait. impl ObjectStorage { - // Returns ObjectStorage that implements the Backend trait. + /// Returns ObjectStorage that implements the Backend trait. #[instrument(skip_all)] pub fn new(scheme: Scheme) -> ObjectStorage { Self { scheme } } - // operator initializes the operator with the parsed URL and object storage. + /// operator initializes the operator with the parsed URL and object storage. #[instrument(skip_all)] pub fn operator( &self, @@ -206,7 +206,7 @@ impl ObjectStorage { } } - // s3_operator initializes the S3 operator with the parsed URL and object storage. + /// s3_operator initializes the S3 operator with the parsed URL and object storage. #[instrument(skip_all)] pub fn s3_operator( &self, @@ -260,7 +260,7 @@ impl ObjectStorage { Ok(Operator::new(builder)?.finish()) } - // gcs_operator initializes the GCS operator with the parsed URL and object storage. + /// gcs_operator initializes the GCS operator with the parsed URL and object storage. #[instrument(skip_all)] pub fn gcs_operator( &self, @@ -296,7 +296,7 @@ impl ObjectStorage { Ok(Operator::new(builder)?.finish()) } - // abs_operator initializes the ABS operator with the parsed URL and object storage. + /// abs_operator initializes the ABS operator with the parsed URL and object storage. #[instrument(skip_all)] pub fn abs_operator( &self, @@ -340,7 +340,7 @@ impl ObjectStorage { Ok(Operator::new(builder)?.finish()) } - // oss_operator initializes the OSS operator with the parsed URL and object storage. + /// oss_operator initializes the OSS operator with the parsed URL and object storage. #[instrument(skip_all)] pub fn oss_operator( &self, @@ -385,7 +385,7 @@ impl ObjectStorage { Ok(Operator::new(builder)?.finish()) } - // obs_operator initializes the OBS operator with the parsed URL and object storage. + /// obs_operator initializes the OBS operator with the parsed URL and object storage. #[instrument(skip_all)] pub fn obs_operator( &self, @@ -429,7 +429,7 @@ impl ObjectStorage { Ok(Operator::new(builder)?.finish()) } - // cos_operator initializes the COS operator with the parsed URL and object storage. + /// cos_operator initializes the COS operator with the parsed URL and object storage. pub fn cos_operator( &self, parsed_url: &super::object_storage::ParsedURL, @@ -473,16 +473,16 @@ impl ObjectStorage { } } -// Backend implements the Backend trait. +/// Backend implements the Backend trait. #[tonic::async_trait] impl crate::Backend for ObjectStorage { - // scheme returns the scheme of the object storage. + /// scheme returns the scheme of the object storage. #[instrument(skip_all)] fn scheme(&self) -> String { self.scheme.to_string() } - //head gets the header of the request. + /// head gets the header of the request. #[instrument(skip_all)] async fn head(&self, request: super::HeadRequest) -> ClientResult { info!( @@ -568,7 +568,7 @@ impl crate::Backend for ObjectStorage { }) } - // Returns content of requested file. + /// Returns content of requested file. #[instrument(skip_all)] async fn get( &self, diff --git a/dragonfly-client-config/src/dfcache.rs b/dragonfly-client-config/src/dfcache.rs index 756d12d5..021e03e0 100644 --- a/dragonfly-client-config/src/dfcache.rs +++ b/dragonfly-client-config/src/dfcache.rs @@ -16,16 +16,16 @@ use std::path::PathBuf; -// NAME is the name of dfcache. +/// NAME is the name of dfcache. pub const NAME: &str = "dfcache"; -// default_dfcache_log_dir is the default log directory for dfcache. +/// default_dfcache_log_dir is the default log directory for dfcache. #[inline] pub fn default_dfcache_log_dir() -> PathBuf { crate::default_log_dir().join(NAME) } -// default_dfcache_persistent_replica_count is the default replica count of the persistent cache task. +/// default_dfcache_persistent_replica_count is the default replica count of the persistent cache task. #[inline] pub fn default_dfcache_persistent_replica_count() -> u64 { 2 diff --git a/dragonfly-client-config/src/dfdaemon.rs b/dragonfly-client-config/src/dfdaemon.rs index f30f3036..a11e2daf 100644 --- a/dragonfly-client-config/src/dfdaemon.rs +++ b/dragonfly-client-config/src/dfdaemon.rs @@ -29,185 +29,185 @@ use tokio::fs; use tracing::{info, instrument}; use validator::Validate; -// NAME is the name of dfdaemon. +/// NAME is the name of dfdaemon. pub const NAME: &str = "dfdaemon"; -// default_dfdaemon_config_path is the default config path for dfdaemon. +/// default_dfdaemon_config_path is the default config path for dfdaemon. #[inline] pub fn default_dfdaemon_config_path() -> PathBuf { crate::default_config_dir().join("dfdaemon.yaml") } -// default_dfdaemon_log_dir is the default log directory for dfdaemon. +/// default_dfdaemon_log_dir is the default log directory for dfdaemon. #[inline] pub fn default_dfdaemon_log_dir() -> PathBuf { crate::default_log_dir().join(NAME) } -// default_download_unix_socket_path is the default unix socket path for download GRPC service. +/// default_download_unix_socket_path is the default unix socket path for download GRPC service. pub fn default_download_unix_socket_path() -> PathBuf { crate::default_root_dir().join("dfdaemon.sock") } -// default_host_hostname is the default hostname of the host. +/// default_host_hostname is the default hostname of the host. #[inline] fn default_host_hostname() -> String { hostname::get().unwrap().to_string_lossy().to_string() } -// default_dfdaemon_plugin_dir is the default plugin directory for dfdaemon. +/// default_dfdaemon_plugin_dir is the default plugin directory for dfdaemon. #[inline] fn default_dfdaemon_plugin_dir() -> PathBuf { crate::default_plugin_dir().join(NAME) } -// default_dfdaemon_cache_dir is the default cache directory for dfdaemon. +/// default_dfdaemon_cache_dir is the default cache directory for dfdaemon. #[inline] fn default_dfdaemon_cache_dir() -> PathBuf { crate::default_cache_dir().join(NAME) } -// default_upload_grpc_server_port is the default port of the upload grpc server. +/// default_upload_grpc_server_port is the default port of the upload grpc server. #[inline] fn default_upload_grpc_server_port() -> u16 { 4000 } -// default_upload_rate_limit is the default rate limit of the upload speed in GiB/Mib/Kib per second. +/// default_upload_rate_limit is the default rate limit of the upload speed in GiB/Mib/Kib per second. #[inline] fn default_upload_rate_limit() -> ByteSize { // Default rate limit is 10GiB/s. ByteSize::gib(10) } -// default_health_server_port is the default port of the health server. +/// default_health_server_port is the default port of the health server. #[inline] fn default_health_server_port() -> u16 { 4003 } -// default_metrics_server_port is the default port of the metrics server. +/// default_metrics_server_port is the default port of the metrics server. #[inline] fn default_metrics_server_port() -> u16 { 4002 } -// default_stats_server_port is the default port of the stats server. +/// default_stats_server_port is the default port of the stats server. #[inline] fn default_stats_server_port() -> u16 { 4004 } -// default_download_rate_limit is the default rate limit of the download speed in GiB/Mib/Kib per second. +/// default_download_rate_limit is the default rate limit of the download speed in GiB/Mib/Kib per second. #[inline] fn default_download_rate_limit() -> ByteSize { // Default rate limit is 10GiB/s. ByteSize::gib(10) } -// default_download_piece_timeout is the default timeout for downloading a piece from source. +/// default_download_piece_timeout is the default timeout for downloading a piece from source. #[inline] fn default_download_piece_timeout() -> Duration { Duration::from_secs(60) } -// default_download_concurrent_piece_count is the default number of concurrent pieces to download. +/// default_download_concurrent_piece_count is the default number of concurrent pieces to download. #[inline] fn default_download_concurrent_piece_count() -> u32 { 16 } -// default_download_max_schedule_count is the default max count of schedule. +/// default_download_max_schedule_count is the default max count of schedule. #[inline] fn default_download_max_schedule_count() -> u32 { 5 } -// default_scheduler_announce_interval is the default interval to announce peer to the scheduler. +/// default_scheduler_announce_interval is the default interval to announce peer to the scheduler. #[inline] fn default_scheduler_announce_interval() -> Duration { Duration::from_secs(300) } -// default_scheduler_schedule_timeout is the default timeout for scheduling. +/// default_scheduler_schedule_timeout is the default timeout for scheduling. #[inline] fn default_scheduler_schedule_timeout() -> Duration { Duration::from_secs(10) } -// default_dynconfig_refresh_interval is the default interval to refresh dynamic configuration from manager. +/// default_dynconfig_refresh_interval is the default interval to refresh dynamic configuration from manager. #[inline] fn default_dynconfig_refresh_interval() -> Duration { Duration::from_secs(300) } -// default_storage_keep is the default keep of the task's metadata and content when the dfdaemon restarts. +/// default_storage_keep is the default keep of the task's metadata and content when the dfdaemon restarts. #[inline] fn default_storage_keep() -> bool { false } -// default_storage_write_buffer_size is the default buffer size for writing piece to disk, default is 128KB. +/// default_storage_write_buffer_size is the default buffer size for writing piece to disk, default is 128KB. #[inline] fn default_storage_write_buffer_size() -> usize { 128 * 1024 } -// default_storage_read_buffer_size is the default buffer size for reading piece from disk, default is 128KB. +/// default_storage_read_buffer_size is the default buffer size for reading piece from disk, default is 128KB. #[inline] fn default_storage_read_buffer_size() -> usize { 128 * 1024 } -// default_seed_peer_cluster_id is the default cluster id of seed peer. +/// default_seed_peer_cluster_id is the default cluster id of seed peer. #[inline] fn default_seed_peer_cluster_id() -> u64 { 1 } -// default_seed_peer_keepalive_interval is the default interval to keepalive with manager. +/// default_seed_peer_keepalive_interval is the default interval to keepalive with manager. #[inline] fn default_seed_peer_keepalive_interval() -> Duration { Duration::from_secs(15) } -// default_gc_interval is the default interval to do gc. +/// default_gc_interval is the default interval to do gc. #[inline] fn default_gc_interval() -> Duration { Duration::from_secs(900) } -// default_gc_policy_task_ttl is the default ttl of the task. +/// default_gc_policy_task_ttl is the default ttl of the task. #[inline] fn default_gc_policy_task_ttl() -> Duration { Duration::from_secs(21_600) } -// default_gc_policy_dist_high_threshold_percent is the default high threshold percent of the disk usage. +/// default_gc_policy_dist_high_threshold_percent is the default high threshold percent of the disk usage. #[inline] fn default_gc_policy_dist_high_threshold_percent() -> u8 { 80 } -// default_gc_policy_dist_low_threshold_percent is the default low threshold percent of the disk usage. +/// default_gc_policy_dist_low_threshold_percent is the default low threshold percent of the disk usage. #[inline] fn default_gc_policy_dist_low_threshold_percent() -> u8 { 60 } -// default_proxy_server_port is the default port of the proxy server. +/// default_proxy_server_port is the default port of the proxy server. #[inline] pub fn default_proxy_server_port() -> u16 { 4001 } -// default_proxy_read_buffer_size is the default buffer size for reading piece, default is 32KB. +/// default_proxy_read_buffer_size is the default buffer size for reading piece, default is 32KB. #[inline] pub fn default_proxy_read_buffer_size() -> usize { 32 * 1024 } -// default_s3_filtered_query_params is the default filtered query params with s3 protocol to generate the task id. +/// default_s3_filtered_query_params is the default filtered query params with s3 protocol to generate the task id. #[inline] fn s3_filtered_query_params() -> Vec { vec![ @@ -222,7 +222,7 @@ fn s3_filtered_query_params() -> Vec { ] } -// gcs_filtered_query_params is the filtered query params with gcs protocol to generate the task id. +/// gcs_filtered_query_params is the filtered query params with gcs protocol to generate the task id. #[inline] fn gcs_filtered_query_params() -> Vec { vec![ @@ -235,7 +235,7 @@ fn gcs_filtered_query_params() -> Vec { ] } -// oss_filtered_query_params is the filtered query params with oss protocol to generate the task id. +/// oss_filtered_query_params is the filtered query params with oss protocol to generate the task id. #[inline] fn oss_filtered_query_params() -> Vec { vec![ @@ -246,7 +246,7 @@ fn oss_filtered_query_params() -> Vec { ] } -// obs_filtered_query_params is the filtered query params with obs protocol to generate the task id. +/// obs_filtered_query_params is the filtered query params with obs protocol to generate the task id. #[inline] fn obs_filtered_query_params() -> Vec { vec![ @@ -258,7 +258,7 @@ fn obs_filtered_query_params() -> Vec { ] } -// cos_filtered_query_params is the filtered query params with cos protocol to generate the task id. +/// cos_filtered_query_params is the filtered query params with cos protocol to generate the task id. #[inline] fn cos_filtered_query_params() -> Vec { vec![ @@ -273,7 +273,7 @@ fn cos_filtered_query_params() -> Vec { ] } -// default_proxy_rule_filtered_query_params is the default filtered query params to generate the task id. +/// default_proxy_rule_filtered_query_params is the default filtered query params to generate the task id. #[inline] pub fn default_proxy_rule_filtered_query_params() -> Vec { let mut visited = HashSet::new(); @@ -300,31 +300,31 @@ pub fn default_proxy_rule_filtered_query_params() -> Vec { visited.into_iter().collect() } -// default_proxy_registry_mirror_addr is the default registry mirror address. +/// default_proxy_registry_mirror_addr is the default registry mirror address. #[inline] fn default_proxy_registry_mirror_addr() -> String { "https://index.docker.io".to_string() } -// Host is the host configuration for dfdaemon. +/// Host is the host configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Host { - // idc is the idc of the host. + /// idc is the idc of the host. pub idc: Option, - // location is the location of the host. + /// location is the location of the host. pub location: Option, - // hostname is the hostname of the host. + /// hostname is the hostname of the host. #[serde(default = "default_host_hostname")] pub hostname: String, - // ip is the advertise ip of the host. + /// ip is the advertise ip of the host. pub ip: Option, } -// Host implements Default. +/// Host implements Default. impl Default for Host { fn default() -> Self { Host { @@ -336,20 +336,20 @@ impl Default for Host { } } -// Server is the server configuration for dfdaemon. +/// Server is the server configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Server { - // plugin_dir is the directory to store plugins. + /// plugin_dir is the directory to store plugins. #[serde(default = "default_dfdaemon_plugin_dir")] pub plugin_dir: PathBuf, - // cache_dir is the directory to store cache files. + /// cache_dir is the directory to store cache files. #[serde(default = "default_dfdaemon_cache_dir")] pub cache_dir: PathBuf, } -// Server implements Default. +/// Server implements Default. impl Default for Server { fn default() -> Self { Server { @@ -359,16 +359,16 @@ impl Default for Server { } } -// DownloadServer is the download server configuration for dfdaemon. +/// DownloadServer is the download server configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct DownloadServer { - // socket_path is the unix socket path for dfdaemon GRPC service. + /// socket_path is the unix socket path for dfdaemon GRPC service. #[serde(default = "default_download_unix_socket_path")] pub socket_path: PathBuf, } -// DownloadServer implements Default. +/// DownloadServer implements Default. impl Default for DownloadServer { fn default() -> Self { DownloadServer { @@ -377,28 +377,28 @@ impl Default for DownloadServer { } } -// Download is the download configuration for dfdaemon. +/// Download is the download configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Download { - // server is the download server configuration for dfdaemon. + /// server is the download server configuration for dfdaemon. pub server: DownloadServer, - // rate_limit is the rate limit of the download speed in GiB/Mib/Kib per second. + /// rate_limit is the rate limit of the download speed in GiB/Mib/Kib per second. #[serde(with = "bytesize_serde", default = "default_download_rate_limit")] pub rate_limit: ByteSize, - // piece_timeout is the timeout for downloading a piece from source. + /// piece_timeout is the timeout for downloading a piece from source. #[serde(default = "default_download_piece_timeout", with = "humantime_serde")] pub piece_timeout: Duration, - // concurrent_piece_count is the number of concurrent pieces to download. + /// concurrent_piece_count is the number of concurrent pieces to download. #[serde(default = "default_download_concurrent_piece_count")] #[validate(range(min = 1))] pub concurrent_piece_count: u32, } -// Download implements Default. +/// Download implements Default. impl Default for Download { fn default() -> Self { Download { @@ -410,19 +410,19 @@ impl Default for Download { } } -// UploadServer is the upload server configuration for dfdaemon. +/// UploadServer is the upload server configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct UploadServer { - // ip is the listen ip of the grpc server. + /// ip is the listen ip of the grpc server. pub ip: Option, - // port is the port to the grpc server. + /// port is the port to the grpc server. #[serde(default = "default_upload_grpc_server_port")] pub port: u16, } -// UploadServer implements Default. +/// UploadServer implements Default. impl Default for UploadServer { fn default() -> Self { UploadServer { @@ -432,22 +432,22 @@ impl Default for UploadServer { } } -// Upload is the upload configuration for dfdaemon. +/// Upload is the upload configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Upload { - // server is the upload server configuration for dfdaemon. + /// server is the upload server configuration for dfdaemon. pub server: UploadServer, - // disable_shared indicates whether disable to share data for other peers. + /// disable_shared indicates whether disable to share data for other peers. pub disable_shared: bool, - // rate_limit is the rate limit of the upload speed in GiB/Mib/Kib per second. + /// rate_limit is the rate limit of the upload speed in GiB/Mib/Kib per second. #[serde(with = "bytesize_serde", default = "default_upload_rate_limit")] pub rate_limit: ByteSize, } -// Upload implements Default. +/// Upload implements Default. impl Default for Upload { fn default() -> Self { Upload { @@ -458,43 +458,43 @@ impl Default for Upload { } } -// Manager is the manager configuration for dfdaemon. +/// Manager is the manager configuration for dfdaemon. #[derive(Debug, Clone, Default, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Manager { - // addrs is manager addresses. + /// addrs is manager addresses. #[validate(length(min = 1))] pub addrs: Vec, } -// Scheduler is the scheduler configuration for dfdaemon. +/// Scheduler is the scheduler configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Scheduler { - // announce_interval is the interval to announce peer to the scheduler. - // Announcer will provide the scheduler with peer information for scheduling, - // peer information includes cpu, memory, etc. + /// announce_interval is the interval to announce peer to the scheduler. + /// Announcer will provide the scheduler with peer information for scheduling, + /// peer information includes cpu, memory, etc. #[serde( default = "default_scheduler_announce_interval", with = "humantime_serde" )] pub announce_interval: Duration, - // schedule_timeout is the timeout for scheduling. If the scheduling timeout, dfdaemon will back-to-source - // download if enable_back_to_source is true, otherwise dfdaemon will return download failed. + /// schedule_timeout is the timeout for scheduling. If the scheduling timeout, dfdaemon will back-to-source + /// download if enable_back_to_source is true, otherwise dfdaemon will return download failed. #[serde( default = "default_scheduler_schedule_timeout", with = "humantime_serde" )] pub schedule_timeout: Duration, - // max_schedule_count is the max count of schedule. + /// max_schedule_count is the max count of schedule. #[serde(default = "default_download_max_schedule_count")] #[validate(range(min = 1))] pub max_schedule_count: u32, } -// Scheduler implements Default. +/// Scheduler implements Default. impl Default for Scheduler { fn default() -> Self { Scheduler { @@ -505,28 +505,28 @@ impl Default for Scheduler { } } -// HostType is the type of the host. +/// HostType is the type of the host. #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Deserialize)] pub enum HostType { - // Normal indicates the peer is normal peer. + /// Normal indicates the peer is normal peer. #[serde(rename = "normal")] Normal, - // Super indicates the peer is super seed peer. + /// Super indicates the peer is super seed peer. #[default] #[serde(rename = "super")] Super, - // Strong indicates the peer is strong seed peer. + /// Strong indicates the peer is strong seed peer. #[serde(rename = "strong")] Strong, - // Weak indicates the peer is weak seed peer. + /// Weak indicates the peer is weak seed peer. #[serde(rename = "weak")] Weak, } -// HostType implements Display. +/// HostType implements Display. impl fmt::Display for HostType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { @@ -538,23 +538,23 @@ impl fmt::Display for HostType { } } -// SeedPeer is the seed peer configuration for dfdaemon. +/// SeedPeer is the seed peer configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct SeedPeer { - // enable indicates whether enable seed peer. + /// enable indicates whether enable seed peer. pub enable: bool, - // kind is the type of seed peer. + /// kind is the type of seed peer. #[serde(default, rename = "type")] pub kind: HostType, - // cluster_id is the cluster id of the seed peer cluster. + /// cluster_id is the cluster id of the seed peer cluster. #[serde(default = "default_seed_peer_cluster_id", rename = "clusterID")] #[validate(range(min = 1))] pub cluster_id: u64, - // keepalive_interval is the interval to keep alive with manager. + /// keepalive_interval is the interval to keep alive with manager. #[serde( default = "default_seed_peer_keepalive_interval", with = "humantime_serde" @@ -562,7 +562,7 @@ pub struct SeedPeer { pub keepalive_interval: Duration, } -// SeedPeer implements Default. +/// SeedPeer implements Default. impl Default for SeedPeer { fn default() -> Self { SeedPeer { @@ -574,11 +574,11 @@ impl Default for SeedPeer { } } -// Dynconfig is the dynconfig configuration for dfdaemon. +/// Dynconfig is the dynconfig configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Dynconfig { - // refresh_interval is the interval to refresh dynamic configuration from manager. + /// refresh_interval is the interval to refresh dynamic configuration from manager. #[serde( default = "default_dynconfig_refresh_interval", with = "humantime_serde" @@ -586,7 +586,7 @@ pub struct Dynconfig { pub refresh_interval: Duration, } -// Dynconfig implements Default. +/// Dynconfig implements Default. impl Default for Dynconfig { fn default() -> Self { Dynconfig { @@ -595,28 +595,28 @@ impl Default for Dynconfig { } } -// Storage is the storage configuration for dfdaemon. +/// Storage is the storage configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Storage { - // dir is the directory to store task's metadata and content. + /// dir is the directory to store task's metadata and content. #[serde(default = "crate::default_storage_dir")] pub dir: PathBuf, - // keep indicates whether keep the task's metadata and content when the dfdaemon restarts. + /// keep indicates whether keep the task's metadata and content when the dfdaemon restarts. #[serde(default = "default_storage_keep")] pub keep: bool, - // write_buffer_size is the buffer size for writing piece to disk, default is 4KB. + /// write_buffer_size is the buffer size for writing piece to disk, default is 4KB. #[serde(default = "default_storage_write_buffer_size")] pub write_buffer_size: usize, - // read_buffer_size is the buffer size for reading piece from disk, default is 4KB. + /// read_buffer_size is the buffer size for reading piece from disk, default is 4KB. #[serde(default = "default_storage_read_buffer_size")] pub read_buffer_size: usize, } -// Storage implements Default. +/// Storage implements Default. impl Default for Storage { fn default() -> Self { Storage { @@ -628,11 +628,11 @@ impl Default for Storage { } } -// Policy is the policy configuration for gc. +/// Policy is the policy configuration for gc. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Policy { - // task_ttl is the ttl of the task. + /// task_ttl is the ttl of the task. #[serde( default = "default_gc_policy_task_ttl", rename = "taskTTL", @@ -640,20 +640,20 @@ pub struct Policy { )] pub task_ttl: Duration, - // dist_high_threshold_percent is the high threshold percent of the disk usage. - // If the disk usage is greater than the threshold, dfdaemon will do gc. + /// dist_high_threshold_percent is the high threshold percent of the disk usage. + /// If the disk usage is greater than the threshold, dfdaemon will do gc. #[serde(default = "default_gc_policy_dist_high_threshold_percent")] #[validate(range(min = 1, max = 99))] pub dist_high_threshold_percent: u8, - // dist_low_threshold_percent is the low threshold percent of the disk usage. - // If the disk usage is less than the threshold, dfdaemon will stop gc. + /// dist_low_threshold_percent is the low threshold percent of the disk usage. + /// If the disk usage is less than the threshold, dfdaemon will stop gc. #[serde(default = "default_gc_policy_dist_low_threshold_percent")] #[validate(range(min = 1, max = 99))] pub dist_low_threshold_percent: u8, } -// Policy implements Default. +/// Policy implements Default. impl Default for Policy { fn default() -> Self { Policy { @@ -664,19 +664,19 @@ impl Default for Policy { } } -// GC is the gc configuration for dfdaemon. +/// GC is the gc configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct GC { - // interval is the interval to do gc. + /// interval is the interval to do gc. #[serde(default = "default_gc_interval", with = "humantime_serde")] pub interval: Duration, - // policy is the gc policy. + /// policy is the gc policy. pub policy: Policy, } -// GC implements Default. +/// GC implements Default. impl Default for GC { fn default() -> Self { GC { @@ -686,45 +686,45 @@ impl Default for GC { } } -// ProxyServer is the proxy server configuration for dfdaemon. +/// ProxyServer is the proxy server configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct ProxyServer { - // ip is the listen ip of the proxy server. + /// ip is the listen ip of the proxy server. pub ip: Option, - // port is the port to the proxy server. + /// port is the port to the proxy server. #[serde(default = "default_proxy_server_port")] pub port: u16, - // ca_cert is the root CA cert path with PEM format for the proxy server to generate the server cert. - // - // If ca_cert is empty, proxy will generate a smaple CA cert by rcgen::generate_simple_self_signed. - // When client requests via the proxy, the client should not verify the server cert and set - // insecure to true. - // - // If ca_cert is not empty, proxy will sign the server cert with the CA cert. If openssl is installed, - // you can use openssl to generate the root CA cert and make the system trust the root CA cert. - // Then set the ca_cert and ca_key to the root CA cert and key path. Dfdaemon generates the server cert - // and key, and signs the server cert with the root CA cert. When client requests via the proxy, - // the proxy can intercept the request by the server cert. + /// ca_cert is the root CA cert path with PEM format for the proxy server to generate the server cert. + /// + /// If ca_cert is empty, proxy will generate a smaple CA cert by rcgen::generate_simple_self_signed. + /// When client requests via the proxy, the client should not verify the server cert and set + /// insecure to true. + /// + /// If ca_cert is not empty, proxy will sign the server cert with the CA cert. If openssl is installed, + /// you can use openssl to generate the root CA cert and make the system trust the root CA cert. + /// Then set the ca_cert and ca_key to the root CA cert and key path. Dfdaemon generates the server cert + /// and key, and signs the server cert with the root CA cert. When client requests via the proxy, + /// the proxy can intercept the request by the server cert. pub ca_cert: Option, - // ca_key is the root CA key path with PEM format for the proxy server to generate the server cert. - // - // If ca_key is empty, proxy will generate a smaple CA key by rcgen::generate_simple_self_signed. - // When client requests via the proxy, the client should not verify the server cert and set - // insecure to true. - // - // If ca_key is not empty, proxy will sign the server cert with the CA cert. If openssl is installed, - // you can use openssl to generate the root CA cert and make the system trust the root CA cert. - // Then set the ca_cert and ca_key to the root CA cert and key path. Dfdaemon generates the server cert - // and key, and signs the server cert with the root CA cert. When client requests via the proxy, - // the proxy can intercept the request by the server cert. + /// ca_key is the root CA key path with PEM format for the proxy server to generate the server cert. + /// + /// If ca_key is empty, proxy will generate a smaple CA key by rcgen::generate_simple_self_signed. + /// When client requests via the proxy, the client should not verify the server cert and set + /// insecure to true. + /// + /// If ca_key is not empty, proxy will sign the server cert with the CA cert. If openssl is installed, + /// you can use openssl to generate the root CA cert and make the system trust the root CA cert. + /// Then set the ca_cert and ca_key to the root CA cert and key path. Dfdaemon generates the server cert + /// and key, and signs the server cert with the root CA cert. When client requests via the proxy, + /// the proxy can intercept the request by the server cert. pub ca_key: Option, } -// ProxyServer implements Default. +/// ProxyServer implements Default. impl Default for ProxyServer { fn default() -> Self { Self { @@ -736,31 +736,31 @@ impl Default for ProxyServer { } } -// Rule is the proxy rule configuration. +/// Rule is the proxy rule configuration. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Rule { - // regex is the regex of the request url. + /// regex is the regex of the request url. #[serde(with = "serde_regex")] pub regex: Regex, - // use_tls indicates whether use tls for the proxy backend. + /// use_tls indicates whether use tls for the proxy backend. #[serde(rename = "useTLS")] pub use_tls: bool, - // redirect is the redirect url. + /// redirect is the redirect url. pub redirect: Option, - // filtered_query_params is the filtered query params to generate the task id. - // When filter is ["Signature", "Expires", "ns"], for example: - // http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io - // will generate the same task id. - // Default value includes the filtered query params of s3, gcs, oss, obs, cos. + /// filtered_query_params is the filtered query params to generate the task id. + /// When filter is ["Signature", "Expires", "ns"], for example: + /// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io + /// will generate the same task id. + /// Default value includes the filtered query params of s3, gcs, oss, obs, cos. #[serde(default = "default_proxy_rule_filtered_query_params")] pub filtered_query_params: Vec, } -// Rule implements Default. +/// Rule implements Default. impl Default for Rule { fn default() -> Self { Self { @@ -772,24 +772,24 @@ impl Default for Rule { } } -// RegistryMirror is the registry mirror configuration. +/// RegistryMirror is the registry mirror configuration. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct RegistryMirror { - // addr is the default address of the registry mirror. Proxy will start a registry mirror service for the - // client to pull the image. The client can use the default address of the registry mirror in - // configuration to pull the image. The `X-Dragonfly-Registry` header can instead of the default address - // of registry mirror. + /// addr is the default address of the registry mirror. Proxy will start a registry mirror service for the + /// client to pull the image. The client can use the default address of the registry mirror in + /// configuration to pull the image. The `X-Dragonfly-Registry` header can instead of the default address + /// of registry mirror. #[serde(default = "default_proxy_registry_mirror_addr")] pub addr: String, - // certs is the client certs path with PEM format for the registry. - // If registry use self-signed cert, the client should set the - // cert for the registry mirror. + /// certs is the client certs path with PEM format for the registry. + /// If registry use self-signed cert, the client should set the + /// cert for the registry mirror. pub certs: Option, } -// RegistryMirror implements Default. +/// RegistryMirror implements Default. impl Default for RegistryMirror { fn default() -> Self { Self { @@ -799,32 +799,32 @@ impl Default for RegistryMirror { } } -// Proxy is the proxy configuration for dfdaemon. +/// Proxy is the proxy configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Proxy { - // server is the proxy server configuration for dfdaemon. + /// server is the proxy server configuration for dfdaemon. pub server: ProxyServer, - // rules is the proxy rules. + /// rules is the proxy rules. pub rules: Option>, - // registry_mirror is implementation of the registry mirror in the proxy. + /// registry_mirror is implementation of the registry mirror in the proxy. pub registry_mirror: RegistryMirror, - // disable_back_to_source indicates whether disable to download back-to-source - // when download failed. + /// disable_back_to_source indicates whether disable to download back-to-source + /// when download failed. pub disable_back_to_source: bool, - // prefetch pre-downloads full of the task when download with range request. + /// prefetch pre-downloads full of the task when download with range request. pub prefetch: bool, - // read_buffer_size is the buffer size for reading piece from disk, default is 1KB. + /// read_buffer_size is the buffer size for reading piece from disk, default is 1KB. #[serde(default = "default_proxy_read_buffer_size")] pub read_buffer_size: usize, } -// Proxy implements Default. +/// Proxy implements Default. impl Default for Proxy { fn default() -> Self { Self { @@ -838,35 +838,35 @@ impl Default for Proxy { } } -// Security is the security configuration for dfdaemon. +/// Security is the security configuration for dfdaemon. #[derive(Debug, Clone, Default, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Security { - // enable indicates whether enable security. + /// enable indicates whether enable security. pub enable: bool, } -// Network is the network configuration for dfdaemon. +/// Network is the network configuration for dfdaemon. #[derive(Debug, Clone, Default, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Network { - // enable_ipv6 indicates whether enable ipv6. + /// enable_ipv6 indicates whether enable ipv6. pub enable_ipv6: bool, } -// HealthServer is the health server configuration for dfdaemon. +/// HealthServer is the health server configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct HealthServer { - // ip is the listen ip of the health server. + /// ip is the listen ip of the health server. pub ip: Option, - // port is the port to the health server. + /// port is the port to the health server. #[serde(default = "default_health_server_port")] pub port: u16, } -// HealthServer implements Default. +/// HealthServer implements Default. impl Default for HealthServer { fn default() -> Self { Self { @@ -876,27 +876,27 @@ impl Default for HealthServer { } } -// Health is the health configuration for dfdaemon. +/// Health is the health configuration for dfdaemon. #[derive(Debug, Clone, Default, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Health { - // server is the health server configuration for dfdaemon. + /// server is the health server configuration for dfdaemon. pub server: HealthServer, } -// MetricsServer is the metrics server configuration for dfdaemon. +/// MetricsServer is the metrics server configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct MetricsServer { - // ip is the listen ip of the metrics server. + /// ip is the listen ip of the metrics server. pub ip: Option, - // port is the port to the metrics server. + /// port is the port to the metrics server. #[serde(default = "default_metrics_server_port")] pub port: u16, } -// MetricsServer implements Default. +/// MetricsServer implements Default. impl Default for MetricsServer { fn default() -> Self { Self { @@ -906,27 +906,27 @@ impl Default for MetricsServer { } } -// Metrics is the metrics configuration for dfdaemon. +/// Metrics is the metrics configuration for dfdaemon. #[derive(Debug, Clone, Default, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Metrics { - // server is the metrics server configuration for dfdaemon. + /// server is the metrics server configuration for dfdaemon. pub server: MetricsServer, } -// StatsServer is the stats server configuration for dfdaemon. +/// StatsServer is the stats server configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct StatsServer { - // ip is the listen ip of the stats server. + /// ip is the listen ip of the stats server. pub ip: Option, - // port is the port to the stats server. + /// port is the port to the stats server. #[serde(default = "default_stats_server_port")] pub port: u16, } -// StatsServer implements Default. +/// StatsServer implements Default. impl Default for StatsServer { fn default() -> Self { Self { @@ -936,101 +936,101 @@ impl Default for StatsServer { } } -// Stats is the stats configuration for dfdaemon. +/// Stats is the stats configuration for dfdaemon. #[derive(Debug, Clone, Default, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Stats { - // server is the stats server configuration for dfdaemon. + /// server is the stats server configuration for dfdaemon. pub server: StatsServer, } -// Tracing is the tracing configuration for dfdaemon. +/// Tracing is the tracing configuration for dfdaemon. #[derive(Debug, Clone, Default, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Tracing { - // addr is the address to report tracing log. + /// addr is the address to report tracing log. pub addr: Option, - // flamegraph indicates whether enable flamegraph tracing. + /// flamegraph indicates whether enable flamegraph tracing. pub flamegraph: bool, } -// Config is the configuration for dfdaemon. +/// Config is the configuration for dfdaemon. #[derive(Debug, Clone, Default, Validate, Deserialize)] #[serde(default, rename_all = "camelCase")] pub struct Config { - // host is the host configuration for dfdaemon. + /// host is the host configuration for dfdaemon. #[validate] pub host: Host, - // server is the server configuration for dfdaemon. + /// server is the server configuration for dfdaemon. #[validate] pub server: Server, - // download is the download configuration for dfdaemon. + /// download is the download configuration for dfdaemon. #[validate] pub download: Download, - // upload is the upload configuration for dfdaemon. + /// upload is the upload configuration for dfdaemon. #[validate] pub upload: Upload, - // manager is the manager configuration for dfdaemon. + /// manager is the manager configuration for dfdaemon. #[validate] pub manager: Manager, - // scheduler is the scheduler configuration for dfdaemon. + /// scheduler is the scheduler configuration for dfdaemon. #[validate] pub scheduler: Scheduler, - // seed_peer is the seed peer configuration for dfdaemon. + /// seed_peer is the seed peer configuration for dfdaemon. #[validate] pub seed_peer: SeedPeer, - // dynconfig is the dynconfig configuration for dfdaemon. + /// dynconfig is the dynconfig configuration for dfdaemon. #[validate] pub dynconfig: Dynconfig, - // storage is the storage configuration for dfdaemon. + /// storage is the storage configuration for dfdaemon. #[validate] pub storage: Storage, - // gc is the gc configuration for dfdaemon. + /// gc is the gc configuration for dfdaemon. #[validate] pub gc: GC, - // proxy is the proxy configuration for dfdaemon. + /// proxy is the proxy configuration for dfdaemon. #[validate] pub proxy: Proxy, - // security is the security configuration for dfdaemon. + /// security is the security configuration for dfdaemon. #[validate] pub security: Security, - // health is the health configuration for dfdaemon. + /// health is the health configuration for dfdaemon. #[validate] pub health: Health, - // metrics is the metrics configuration for dfdaemon. + /// metrics is the metrics configuration for dfdaemon. #[validate] pub metrics: Metrics, - // stats is the stats configuration for dfdaemon. + /// stats is the stats configuration for dfdaemon. #[validate] pub stats: Stats, - // tracing is the tracing configuration for dfdaemon. + /// tracing is the tracing configuration for dfdaemon. #[validate] pub tracing: Tracing, - // network is the network configuration for dfdaemon. + /// network is the network configuration for dfdaemon. #[validate] pub network: Network, } -// Config implements the config operation of dfdaemon. +/// Config implements the config operation of dfdaemon. impl Config { - // load loads configuration from file. + /// load loads configuration from file. #[instrument(skip_all)] pub async fn load(path: &PathBuf) -> Result { // Load configuration from file. @@ -1046,7 +1046,7 @@ impl Config { Ok(config) } - // convert converts the configuration. + /// convert converts the configuration. #[instrument(skip_all)] fn convert(&mut self) { // Convert advertise ip. diff --git a/dragonfly-client-config/src/dfget.rs b/dragonfly-client-config/src/dfget.rs index 48e97ddd..2e87dbde 100644 --- a/dragonfly-client-config/src/dfget.rs +++ b/dragonfly-client-config/src/dfget.rs @@ -16,10 +16,10 @@ use std::path::PathBuf; -// NAME is the name of dfget. +/// NAME is the name of dfget. pub const NAME: &str = "dfget"; -// default_dfget_log_dir is the default log directory for dfget. +/// default_dfget_log_dir is the default log directory for dfget. pub fn default_dfget_log_dir() -> PathBuf { crate::default_log_dir().join(NAME) } diff --git a/dragonfly-client-config/src/dfinit.rs b/dragonfly-client-config/src/dfinit.rs index 6c355761..d66931bc 100644 --- a/dragonfly-client-config/src/dfinit.rs +++ b/dragonfly-client-config/src/dfinit.rs @@ -24,40 +24,40 @@ use std::path::PathBuf; use tracing::{info, instrument}; use validator::Validate; -// NAME is the name of dfinit. +/// NAME is the name of dfinit. pub const NAME: &str = "dfinit"; -// default_dfinit_config_path is the default config path for dfinit. +/// default_dfinit_config_path is the default config path for dfinit. #[inline] pub fn default_dfinit_config_path() -> PathBuf { crate::default_config_dir().join("dfinit.yaml") } -// default_dfinit_log_dir is the default log directory for dfinit. +/// default_dfinit_log_dir is the default log directory for dfinit. pub fn default_dfinit_log_dir() -> PathBuf { crate::default_log_dir().join(NAME) } -// default_container_runtime_containerd_config_path is the default containerd configuration path. +/// default_container_runtime_containerd_config_path is the default containerd configuration path. #[inline] fn default_container_runtime_containerd_config_path() -> PathBuf { PathBuf::from("/etc/containerd/config.toml") } -// default_container_runtime_docker_config_path is the default docker configuration path. +/// default_container_runtime_docker_config_path is the default docker configuration path. #[inline] fn default_container_runtime_docker_config_path() -> PathBuf { PathBuf::from("/etc/docker/daemon.json") } -// default_container_runtime_crio_config_path is the default cri-o configuration path. +/// default_container_runtime_crio_config_path is the default cri-o configuration path. #[inline] fn default_container_runtime_crio_config_path() -> PathBuf { PathBuf::from("/etc/containers/registries.conf") } -// default_container_runtime_crio_unqualified_search_registries is the default unqualified search registries of cri-o, -// refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings. +/// default_container_runtime_crio_unqualified_search_registries is the default unqualified search registries of cri-o, +/// refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings. #[inline] fn default_container_runtime_crio_unqualified_search_registries() -> Vec { vec![ @@ -67,7 +67,7 @@ fn default_container_runtime_crio_unqualified_search_registries() -> Vec ] } -// default_proxy_addr is the default proxy address of dfdaemon. +/// default_proxy_addr is the default proxy address of dfdaemon. #[inline] fn default_proxy_addr() -> String { format!( @@ -77,95 +77,95 @@ fn default_proxy_addr() -> String { ) } -// default_container_runtime_containerd_registry_host_capabilities is the default -// capabilities of the containerd registry. +/// default_container_runtime_containerd_registry_host_capabilities is the default +/// capabilities of the containerd registry. #[inline] fn default_container_runtime_containerd_registry_capabilities() -> Vec { vec!["pull".to_string(), "resolve".to_string()] } -// Registry is the registry configuration for containerd. +/// Registry is the registry configuration for containerd. #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[serde(default, rename_all = "camelCase")] pub struct ContainerdRegistry { - // host_namespace is the location where container images and artifacts are sourced, - // refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#registry-host-namespace. - // The registry host namespace portion is [registry_host_name|IP address][:port], such as - // docker.io, ghcr.io, gcr.io, etc. + /// host_namespace is the location where container images and artifacts are sourced, + /// refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#registry-host-namespace. + /// The registry host namespace portion is [registry_host_name|IP address][:port], such as + /// docker.io, ghcr.io, gcr.io, etc. pub host_namespace: String, - // server_addr specifies the default server for this registry host namespace, refer to - // https://github.com/containerd/containerd/blob/main/docs/hosts.md#server-field. + /// server_addr specifies the default server for this registry host namespace, refer to + /// https://github.com/containerd/containerd/blob/main/docs/hosts.md#server-field. pub server_addr: String, - // capabilities is the list of capabilities in containerd configuration, refer to - // https://github.com/containerd/containerd/blob/main/docs/hosts.md#capabilities-field. + /// capabilities is the list of capabilities in containerd configuration, refer to + /// https://github.com/containerd/containerd/blob/main/docs/hosts.md#capabilities-field. #[serde(default = "default_container_runtime_containerd_registry_capabilities")] pub capabilities: Vec, - // skip_verify is the flag to skip verifying the server's certificate, refer to - // https://github.com/containerd/containerd/blob/main/docs/hosts.md#bypass-tls-verification-example. + /// skip_verify is the flag to skip verifying the server's certificate, refer to + /// https://github.com/containerd/containerd/blob/main/docs/hosts.md#bypass-tls-verification-example. pub skip_verify: Option, - // ca (Certificate Authority Certification) can be set to a path or an array of paths each pointing - // to a ca file for use in authenticating with the registry namespace, refer to - // https://github.com/containerd/containerd/blob/main/docs/hosts.md#ca-field. + /// ca (Certificate Authority Certification) can be set to a path or an array of paths each pointing + /// to a ca file for use in authenticating with the registry namespace, refer to + /// https://github.com/containerd/containerd/blob/main/docs/hosts.md#ca-field. pub ca: Option>, } -// Containerd is the containerd configuration for dfinit. +/// Containerd is the containerd configuration for dfinit. #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[serde(default, rename_all = "camelCase")] pub struct Containerd { - // config_path is the path of containerd configuration file. + /// config_path is the path of containerd configuration file. #[serde(default = "default_container_runtime_containerd_config_path")] pub config_path: PathBuf, - // registries is the list of containerd registries. + /// registries is the list of containerd registries. pub registries: Vec, } -// CRIORegistry is the registry configuration for cri-o. +/// CRIORegistry is the registry configuration for cri-o. #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize, PartialEq, Eq)] #[serde(default, rename_all = "camelCase")] pub struct CRIORegistry { - // prefix is the prefix of the user-specified image name, refer to - // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table. + /// prefix is the prefix of the user-specified image name, refer to + /// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table. pub prefix: String, - // location accepts the same format as the prefix field, and specifies the physical location of the prefix-rooted namespace, - // refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#remapping-and-mirroring-registries. + /// location accepts the same format as the prefix field, and specifies the physical location of the prefix-rooted namespace, + /// refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#remapping-and-mirroring-registries. pub location: String, } -// CRIO is the cri-o configuration for dfinit. +/// CRIO is the cri-o configuration for dfinit. #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[serde(default, rename_all = "camelCase")] pub struct CRIO { - // config_path is the path of cri-o registries's configuration file. + /// config_path is the path of cri-o registries's configuration file. #[serde(default = "default_container_runtime_crio_config_path")] pub config_path: PathBuf, - // unqualified_search_registries is an array of host[:port] registries to try when pulling an unqualified image, in order. - // Refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings. + /// unqualified_search_registries is an array of host[:port] registries to try when pulling an unqualified image, in order. + /// Refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings. #[serde(default = "default_container_runtime_crio_unqualified_search_registries")] pub unqualified_search_registries: Vec, - // registries is the list of cri-o registries, refer to - // https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#namespaced-registry-settings. + /// registries is the list of cri-o registries, refer to + /// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#namespaced-registry-settings. pub registries: Vec, } -// Docker is the docker configuration for dfinit. +/// Docker is the docker configuration for dfinit. #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[serde(default, rename_all = "camelCase")] pub struct Docker { - // config_path is the path of docker configuration file. + /// config_path is the path of docker configuration file. #[serde(default = "default_container_runtime_docker_config_path")] pub config_path: PathBuf, } -// ContainerRuntime is the container runtime configuration for dfinit. +/// ContainerRuntime is the container runtime configuration for dfinit. #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[serde(default, rename_all = "camelCase")] pub struct ContainerRuntime { @@ -173,7 +173,7 @@ pub struct ContainerRuntime { pub config: Option, } -// ContainerRuntimeConfig is the container runtime configuration for dfinit. +/// ContainerRuntimeConfig is the container runtime configuration for dfinit. #[derive(Debug, Clone)] pub enum ContainerRuntimeConfig { Containerd(Containerd), @@ -181,7 +181,7 @@ pub enum ContainerRuntimeConfig { CRIO(CRIO), } -// Serialize is the implementation of the Serialize trait for ContainerRuntimeConfig. +/// Serialize is the implementation of the Serialize trait for ContainerRuntimeConfig. impl Serialize for ContainerRuntimeConfig { fn serialize(&self, serializer: S) -> std::prelude::v1::Result where @@ -207,7 +207,7 @@ impl Serialize for ContainerRuntimeConfig { } } -// Deserialize is the implementation of the Deserialize trait for ContainerRuntimeConfig. +/// Deserialize is the implementation of the Deserialize trait for ContainerRuntimeConfig. impl<'de> Deserialize<'de> for ContainerRuntimeConfig { fn deserialize(deserializer: D) -> std::prelude::v1::Result where @@ -241,7 +241,7 @@ impl<'de> Deserialize<'de> for ContainerRuntimeConfig { } } -// Proxy is the proxy server configuration for dfdaemon. +/// Proxy is the proxy server configuration for dfdaemon. #[derive(Debug, Clone, Validate, Deserialize, Serialize)] #[serde(default, rename_all = "camelCase")] pub struct Proxy { @@ -250,7 +250,7 @@ pub struct Proxy { pub addr: String, } -// Proxy implements Default. +/// Proxy implements Default. impl Default for Proxy { fn default() -> Self { Self { @@ -259,22 +259,22 @@ impl Default for Proxy { } } -// Config is the configuration for dfinit. +/// Config is the configuration for dfinit. #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[serde(default, rename_all = "camelCase")] pub struct Config { - // proxy is the configuration of the dfdaemon's HTTP/HTTPS proxy. + /// proxy is the configuration of the dfdaemon's HTTP/HTTPS proxy. #[validate] pub proxy: Proxy, - // container_runtime is the container runtime configuration. + /// container_runtime is the container runtime configuration. #[validate] pub container_runtime: ContainerRuntime, } -// Config implements the config operation of dfinit. +/// Config implements the config operation of dfinit. impl Config { - // load loads configuration from file. + /// load loads configuration from file. #[instrument(skip_all)] pub fn load(path: &PathBuf) -> Result { // Load configuration from file. diff --git a/dragonfly-client-config/src/dfstore.rs b/dragonfly-client-config/src/dfstore.rs index f2e27e65..b411a4f6 100644 --- a/dragonfly-client-config/src/dfstore.rs +++ b/dragonfly-client-config/src/dfstore.rs @@ -16,10 +16,10 @@ use std::path::PathBuf; -// NAME is the name of dfstore. +/// NAME is the name of dfstore. pub const NAME: &str = "dfstore"; -// default_dfstore_log_dir is the default log directory for dfstore. +/// default_dfstore_log_dir is the default log directory for dfstore. pub fn default_dfstore_log_dir() -> PathBuf { crate::default_log_dir().join(NAME) } diff --git a/dragonfly-client-config/src/lib.rs b/dragonfly-client-config/src/lib.rs index b7b34efe..6e734099 100644 --- a/dragonfly-client-config/src/lib.rs +++ b/dragonfly-client-config/src/lib.rs @@ -22,22 +22,22 @@ pub mod dfget; pub mod dfinit; pub mod dfstore; -// SERVICE_NAME is the name of the service. +/// SERVICE_NAME is the name of the service. pub const SERVICE_NAME: &str = "dragonfly"; -// NAME is the name of the package. +/// NAME is the name of the package. pub const NAME: &str = "client"; -// CARGO_PKG_VERSION is the version of the cargo package. +/// CARGO_PKG_VERSION is the version of the cargo package. pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); -// CARGO_PKG_RUSTC_VERSION is the minimum Rust version supported by the package, not the current Rust version. +/// CARGO_PKG_RUSTC_VERSION is the minimum Rust version supported by the package, not the current Rust version. pub const CARGO_PKG_RUSTC_VERSION: &str = env!("CARGO_PKG_RUST_VERSION"); -// GIT_HASH is the git hash of the package. +/// GIT_HASH is the git hash of the package. pub const GIT_HASH: Option<&str> = option_env!("GIT_HASH"); -// default_root_dir is the default root directory for client. +/// default_root_dir is the default root directory for client. pub fn default_root_dir() -> PathBuf { #[cfg(target_os = "linux")] return PathBuf::from("/var/run/dragonfly/"); @@ -46,7 +46,7 @@ pub fn default_root_dir() -> PathBuf { return home::home_dir().unwrap().join(".dragonfly"); } -// default_config_dir is the default config directory for client. +/// default_config_dir is the default config directory for client. pub fn default_config_dir() -> PathBuf { #[cfg(target_os = "linux")] return PathBuf::from("/etc/dragonfly/"); @@ -55,7 +55,7 @@ pub fn default_config_dir() -> PathBuf { return home::home_dir().unwrap().join(".dragonfly").join("config"); } -// default_log_dir is the default log directory for client. +/// default_log_dir is the default log directory for client. pub fn default_log_dir() -> PathBuf { #[cfg(target_os = "linux")] return PathBuf::from("/var/log/dragonfly/"); @@ -64,7 +64,7 @@ pub fn default_log_dir() -> PathBuf { return home::home_dir().unwrap().join(".dragonfly").join("logs"); } -// default_storage_dir is the default storage directory for client. +/// default_storage_dir is the default storage directory for client. pub fn default_storage_dir() -> PathBuf { #[cfg(target_os = "linux")] return PathBuf::from("/var/lib/dragonfly/"); @@ -73,7 +73,7 @@ pub fn default_storage_dir() -> PathBuf { return home::home_dir().unwrap().join(".dragonfly").join("storage"); } -// default_lock_dir is the default lock directory for client. +/// default_lock_dir is the default lock directory for client. pub fn default_lock_dir() -> PathBuf { #[cfg(target_os = "linux")] return PathBuf::from("/var/lock/dragonfly/"); @@ -82,7 +82,7 @@ pub fn default_lock_dir() -> PathBuf { return home::home_dir().unwrap().join(".dragonfly"); } -// default_plugin_dir is the default plugin directory for client. +/// default_plugin_dir is the default plugin directory for client. pub fn default_plugin_dir() -> PathBuf { #[cfg(target_os = "linux")] return PathBuf::from("/var/lib/dragonfly/plugins/"); @@ -91,7 +91,7 @@ pub fn default_plugin_dir() -> PathBuf { return home::home_dir().unwrap().join(".dragonfly").join("plugins"); } -// default_cache_dir is the default cache directory for client. +/// default_cache_dir is the default cache directory for client. pub fn default_cache_dir() -> PathBuf { #[cfg(target_os = "linux")] return PathBuf::from("/var/cache/dragonfly/"); diff --git a/dragonfly-client-core/src/error/errors.rs b/dragonfly-client-core/src/error/errors.rs index 74b79d50..32fd2197 100644 --- a/dragonfly-client-core/src/error/errors.rs +++ b/dragonfly-client-core/src/error/errors.rs @@ -18,7 +18,7 @@ use std::{error::Error as ErrorTrait, fmt}; use super::message::Message; -// ErrorType is the type of the error. +/// ErrorType is the type of the error. #[derive(Debug, PartialEq, Eq, Clone)] pub enum ErrorType { StorageError, @@ -34,9 +34,9 @@ pub enum ErrorType { PluginError, } -// ErrorType implements the display for the error type. +/// ErrorType implements the display for the error type. impl ErrorType { - // as_str returns the string of the error type. + /// as_str returns the string of the error type. pub fn as_str(&self) -> &'static str { match self { ErrorType::StorageError => "StorageError", @@ -54,7 +54,7 @@ impl ErrorType { } } -// ExternalError is the external error. +/// ExternalError is the external error. #[derive(Debug)] pub struct ExternalError { pub etype: ErrorType, @@ -62,9 +62,9 @@ pub struct ExternalError { pub context: Option, } -// ExternalError implements the error trait. +/// ExternalError implements the error trait. impl ExternalError { - // new returns a new ExternalError. + /// new returns a new ExternalError. pub fn new(etype: ErrorType) -> Self { ExternalError { etype, @@ -73,19 +73,19 @@ impl ExternalError { } } - // with_context returns a new ExternalError with the context. + /// with_context returns a new ExternalError with the context. pub fn with_context(mut self, message: impl Into) -> Self { self.context = Some(message.into()); self } - // with_cause returns a new ExternalError with the cause. + /// with_cause returns a new ExternalError with the cause. pub fn with_cause(mut self, cause: Box) -> Self { self.cause = Some(cause); self } - // chain_display returns the display of the error with the previous error. + /// chain_display returns the display of the error with the previous error. fn chain_display( &self, previous: Option<&ExternalError>, @@ -112,17 +112,17 @@ impl ExternalError { } } -// ExternalError implements the display for the error. +/// ExternalError implements the display for the error. impl fmt::Display for ExternalError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.chain_display(None, f) } } -// ExternalError implements the error trait. +/// ExternalError implements the error trait. impl ErrorTrait for ExternalError {} -// OrErr is the trait to extend the result with error. +/// OrErr is the trait to extend the result with error. pub trait OrErr { /// Wrap the E in [Result] with new [ErrorType] and context, the existing E will be the cause. /// @@ -136,7 +136,7 @@ pub trait OrErr { E: Into>; } -// OrErr implements the OrErr for Result. +/// OrErr implements the OrErr for Result. impl OrErr for Result { fn or_err(self, et: ErrorType) -> Result where @@ -157,28 +157,28 @@ impl OrErr for Result { } } -// BackendError is the error for backend. +/// BackendError is the error for backend. #[derive(Debug, thiserror::Error)] #[error("backend error {message}")] pub struct BackendError { - // message is the error message. + /// message is the error message. pub message: String, - // status_code is the status code of the response. + /// status_code is the status code of the response. pub status_code: Option, - // header is the headers of the response. + /// header is the headers of the response. pub header: Option, } -// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed. +/// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed. #[derive(Debug, thiserror::Error)] #[error("download piece {piece_number} from remote peer {parent_id} failed")] pub struct DownloadFromRemotePeerFailed { - // piece_number is the number of the piece. + /// piece_number is the number of the piece. pub piece_number: u32, - // parent_id is the parent id of the piece. + /// parent_id is the parent id of the piece. pub parent_id: String, } diff --git a/dragonfly-client-core/src/error/message.rs b/dragonfly-client-core/src/error/message.rs index 9cd32ea0..536fb85b 100644 --- a/dragonfly-client-core/src/error/message.rs +++ b/dragonfly-client-core/src/error/message.rs @@ -16,29 +16,29 @@ use std::borrow::Cow; -// Message is the message for the error. +/// Message is the message for the error. #[derive(Debug)] pub struct Message(Cow<'static, str>); -// From<&'static str> for Message implements the conversion from &'static str to Message. +/// From<&'static str> for Message implements the conversion from &'static str to Message. impl From<&'static str> for Message { - // from returns the message from the string. + /// from returns the message from the string. fn from(s: &'static str) -> Self { Message(Cow::Borrowed(s)) } } -// From for Message implements the conversion from String to Message. +/// From for Message implements the conversion from String to Message. impl From for Message { - // from returns the message from the string. + /// from returns the message from the string. fn from(s: String) -> Self { Message(Cow::Owned(s)) } } -// Message implements the message for the error. +/// Message implements the message for the error. impl Message { - // as_str returns the string of the message. + /// as_str returns the string of the message. pub fn as_str(&self) -> &str { &self.0 } diff --git a/dragonfly-client-core/src/error/mod.rs b/dragonfly-client-core/src/error/mod.rs index 30a838ec..65b1f732 100644 --- a/dragonfly-client-core/src/error/mod.rs +++ b/dragonfly-client-core/src/error/mod.rs @@ -23,181 +23,181 @@ pub use errors::ExternalError; pub use errors::OrErr; pub use errors::{BackendError, DownloadFromRemotePeerFailed}; -// DFError is the error for dragonfly. +/// DFError is the error for dragonfly. #[derive(thiserror::Error, Debug)] pub enum DFError { - // IO is the error for IO operation. + /// IO is the error for IO operation. #[error(transparent)] IO(#[from] std::io::Error), - // MpscSend is the error for send. + /// MpscSend is the error for send. #[error("mpsc send: {0}")] MpscSend(String), - // SendTimeout is the error for send timeout. + /// SendTimeout is the error for send timeout. #[error("send timeout")] SendTimeout, - // HashRing is the error for hashring. + /// HashRing is the error for hashring. #[error{"hashring {0} is failed"}] HashRing(String), - // HostNotFound is the error when the host is not found. + /// HostNotFound is the error when the host is not found. #[error{"host {0} not found"}] HostNotFound(String), - // TaskNotFound is the error when the task is not found. + /// TaskNotFound is the error when the task is not found. #[error{"task {0} not found"}] TaskNotFound(String), - // PieceNotFound is the error when the piece is not found. + /// PieceNotFound is the error when the piece is not found. #[error{"piece {0} not found"}] PieceNotFound(String), - // PieceStateIsFailed is the error when the piece state is failed. + /// PieceStateIsFailed is the error when the piece state is failed. #[error{"piece {0} state is failed"}] PieceStateIsFailed(String), - // WaitForPieceFinishedTimeout is the error when the wait for piece finished timeout. + /// WaitForPieceFinishedTimeout is the error when the wait for piece finished timeout. #[error{"wait for piece {0} finished timeout"}] WaitForPieceFinishedTimeout(String), - // AvailableManagerNotFound is the error when the available manager is not found. + /// AvailableManagerNotFound is the error when the available manager is not found. #[error{"available manager not found"}] AvailableManagerNotFound, - // AvailableSchedulersNotFound is the error when the available schedulers is not found. + /// AvailableSchedulersNotFound is the error when the available schedulers is not found. #[error{"available schedulers not found"}] AvailableSchedulersNotFound, - // DownloadFromRemotePeerFailed is the error when the download from remote peer is failed. + /// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed. #[error(transparent)] DownloadFromRemotePeerFailed(DownloadFromRemotePeerFailed), - // ColumnFamilyNotFound is the error when the column family is not found. + /// ColumnFamilyNotFound is the error when the column family is not found. #[error{"column family {0} not found"}] ColumnFamilyNotFound(String), - // InvalidStateTransition is the error when the state transition is invalid. + /// InvalidStateTransition is the error when the state transition is invalid. #[error{"can not transit from {0} to {1}"}] InvalidStateTransition(String, String), - // InvalidState is the error when the state is invalid. + /// InvalidState is the error when the state is invalid. #[error{"invalid state {0}"}] InvalidState(String), - // InvalidURI is the error when the uri is invalid. + /// InvalidURI is the error when the uri is invalid. #[error("invalid uri {0}")] InvalidURI(String), - // InvalidPeer is the error when the peer is invalid. + /// InvalidPeer is the error when the peer is invalid. #[error("invalid peer {0}")] InvalidPeer(String), - // SchedulerClientNotFound is the error when the scheduler client is not found. + /// SchedulerClientNotFound is the error when the scheduler client is not found. #[error{"scheduler client not found"}] SchedulerClientNotFound, - // UnexpectedResponse is the error when the response is unexpected. + /// UnexpectedResponse is the error when the response is unexpected. #[error{"unexpected response"}] UnexpectedResponse, - // DigestMismatch is the error when the digest is mismatch. + /// DigestMismatch is the error when the digest is mismatch. #[error{"digest mismatch expected: {0}, actual: {1}"}] DigestMismatch(String, String), - // ContentLengthMismatch is the error when the content length is mismatch. + /// ContentLengthMismatch is the error when the content length is mismatch. #[error("content length mismatch expected: {0}, actual: {1}")] ContentLengthMismatch(u64, u64), - // MaxScheduleCountExceeded is the error when the max schedule count is exceeded. + /// MaxScheduleCountExceeded is the error when the max schedule count is exceeded. #[error("max schedule count {0} exceeded")] MaxScheduleCountExceeded(u32), - // InvalidContentLength is the error when the content length is invalid. + /// InvalidContentLength is the error when the content length is invalid. #[error("invalid content length")] InvalidContentLength, - // InvalidPieceLength is the error when the piece length is invalid. + /// InvalidPieceLength is the error when the piece length is invalid. #[error("invalid piece length")] InvalidPieceLength, - // InvalidParameter is the error when the parameter is invalid. + /// InvalidParameter is the error when the parameter is invalid. #[error("invalid parameter")] InvalidParameter, #[error(transparent)] Utf8(#[from] std::str::Utf8Error), - // Unknown is the error when the error is unknown. + /// Unknown is the error when the error is unknown. #[error("unknown {0}")] Unknown(String), - // Unimplemented is the error when the feature is not implemented. + /// Unimplemented is the error when the feature is not implemented. #[error{"unimplemented"}] Unimplemented, - // EmptyHTTPRangeError is the error when the range fallback error is empty. + /// EmptyHTTPRangeError is the error when the range fallback error is empty. #[error{"RangeUnsatisfiable: Failed to parse range fallback error, please file an issue"}] EmptyHTTPRangeError, - // TonicStatus is the error for tonic status. + /// TonicStatus is the error for tonic status. #[error(transparent)] TonicStatus(#[from] tonic::Status), - // TonicStreamElapsed is the error for tonic stream elapsed. + /// TonicStreamElapsed is the error for tonic stream elapsed. #[error(transparent)] TokioStreamElapsed(#[from] tokio_stream::Elapsed), - // ReqwestError is the error for reqwest. + /// ReqwestError is the error for reqwest. #[error(transparent)] ReqwesError(#[from] reqwest::Error), - // OpenDALError is the error for opendal. + /// OpenDALError is the error for opendal. #[error(transparent)] OpenDALError(#[from] opendal::Error), - // HyperError is the error for hyper. + /// HyperError is the error for hyper. #[error(transparent)] HyperError(#[from] hyper::Error), - // BackendError is the error for backend. + /// BackendError is the error for backend. #[error(transparent)] BackendError(BackendError), - // HyperUtilClientLegacyError is the error for hyper util client legacy. + /// HyperUtilClientLegacyError is the error for hyper util client legacy. #[error(transparent)] HyperUtilClientLegacyError(#[from] hyper_util::client::legacy::Error), - // ExternalError is the error for external error. + /// ExternalError is the error for external error. #[error(transparent)] ExternalError(#[from] ExternalError), - // MaxDownloadFilesExceeded is the error for max download files exceeded. + /// MaxDownloadFilesExceeded is the error for max download files exceeded. #[error("max number of files to download exceeded: {0}")] MaxDownloadFilesExceeded(usize), - // Unsupported is the error for unsupported. + /// Unsupported is the error for unsupported. #[error("unsupported {0}")] Unsupported(String), - // TokioJoinError is the error for tokio join. + /// TokioJoinError is the error for tokio join. #[error(transparent)] TokioJoinError(tokio::task::JoinError), - // ValidationError is the error for validate. + /// ValidationError is the error for validate. #[error("validate failed: {0}")] ValidationError(String), } -// SendError is the error for send. +/// SendError is the error for send. impl From> for DFError { fn from(e: tokio::sync::mpsc::error::SendError) -> Self { Self::MpscSend(e.to_string()) } } -// SendTimeoutError is the error for send timeout. +/// SendTimeoutError is the error for send timeout. impl From> for DFError { fn from(err: tokio::sync::mpsc::error::SendTimeoutError) -> Self { match err { diff --git a/dragonfly-client-init/src/bin/main.rs b/dragonfly-client-init/src/bin/main.rs index 20ad1233..49803369 100644 --- a/dragonfly-client-init/src/bin/main.rs +++ b/dragonfly-client-init/src/bin/main.rs @@ -92,6 +92,7 @@ async fn main() -> Result<(), anyhow::Error> { error!("failed to load config: {}", err); err })?; + // Handle features of the container runtime. let container_runtime = container_runtime::ContainerRuntime::new(&config); container_runtime.run().await.map_err(|err| { diff --git a/dragonfly-client-init/src/container_runtime/containerd.rs b/dragonfly-client-init/src/container_runtime/containerd.rs index c824a2ee..a2137761 100644 --- a/dragonfly-client-init/src/container_runtime/containerd.rs +++ b/dragonfly-client-init/src/container_runtime/containerd.rs @@ -25,20 +25,20 @@ use tokio::{self, fs}; use toml_edit::{value, Array, DocumentMut, Item, Table, Value}; use tracing::{info, instrument}; -// Containerd represents the containerd runtime manager. +/// Containerd represents the containerd runtime manager. #[derive(Debug, Clone)] pub struct Containerd { - // config is the configuration for initializing - // runtime environment for the dfdaemon. + /// config is the configuration for initializing + /// runtime environment for the dfdaemon. config: dfinit::Containerd, - // proxy_config is the configuration for the dfdaemon's proxy server. + /// proxy_config is the configuration for the dfdaemon's proxy server. proxy_config: dfinit::Proxy, } -// Containerd implements the containerd runtime manager. +/// Containerd implements the containerd runtime manager. impl Containerd { - // new creates a new containerd runtime manager. + /// new creates a new containerd runtime manager. #[instrument(skip_all)] pub fn new(config: dfinit::Containerd, proxy_config: dfinit::Proxy) -> Self { Self { @@ -47,8 +47,8 @@ impl Containerd { } } - // run runs the containerd runtime to initialize - // runtime environment for the dfdaemon. + /// run runs the containerd runtime to initialize + /// runtime environment for the dfdaemon. #[instrument(skip_all)] pub async fn run(&self) -> Result<()> { let content = fs::read_to_string(&self.config.config_path).await?; @@ -114,8 +114,8 @@ impl Containerd { Ok(()) } - // add_registries adds registries to the containerd configuration, when containerd supports - // config_path mode and config_path is not empty. + /// add_registries adds registries to the containerd configuration, when containerd supports + /// config_path mode and config_path is not empty. #[instrument(skip_all)] pub async fn add_registries( &self, diff --git a/dragonfly-client-init/src/container_runtime/crio.rs b/dragonfly-client-init/src/container_runtime/crio.rs index b6bc448e..15276ccb 100644 --- a/dragonfly-client-init/src/container_runtime/crio.rs +++ b/dragonfly-client-init/src/container_runtime/crio.rs @@ -24,20 +24,20 @@ use toml_edit::{value, Array, ArrayOfTables, Item, Table, Value}; use tracing::{info, instrument}; use url::Url; -// CRIO represents the cri-o runtime manager. +/// CRIO represents the cri-o runtime manager. #[derive(Debug, Clone)] pub struct CRIO { - // config is the configuration for initializing - // runtime environment for the dfdaemon. + /// config is the configuration for initializing + /// runtime environment for the dfdaemon. config: dfinit::CRIO, - // proxy_config is the configuration for the dfdaemon's proxy server. + /// proxy_config is the configuration for the dfdaemon's proxy server. proxy_config: dfinit::Proxy, } -// CRIO implements the cri-o runtime manager. +/// CRIO implements the cri-o runtime manager. impl CRIO { - // new creates a new cri-o runtime manager. + /// new creates a new cri-o runtime manager. #[instrument(skip_all)] pub fn new(config: dfinit::CRIO, proxy_config: dfinit::Proxy) -> Self { Self { @@ -46,8 +46,8 @@ impl CRIO { } } - // run runs the cri-o runtime to initialize - // runtime environment for the dfdaemon. + /// run runs the cri-o runtime to initialize + /// runtime environment for the dfdaemon. #[instrument(skip_all)] pub async fn run(&self) -> Result<()> { let mut registries_config_table = toml_edit::DocumentMut::new(); diff --git a/dragonfly-client-init/src/container_runtime/docker.rs b/dragonfly-client-init/src/container_runtime/docker.rs index 9d67bca4..2c2a884d 100644 --- a/dragonfly-client-init/src/container_runtime/docker.rs +++ b/dragonfly-client-init/src/container_runtime/docker.rs @@ -18,20 +18,20 @@ use dragonfly_client_config::dfinit; use dragonfly_client_core::{Error, Result}; use tracing::{info, instrument}; -// Docker represents the docker runtime manager. +/// Docker represents the docker runtime manager. #[derive(Debug, Clone)] pub struct Docker { - // config is the configuration for initializing - // runtime environment for the dfdaemon. + /// config is the configuration for initializing + /// runtime environment for the dfdaemon. config: dfinit::Docker, - // proxy_config is the configuration for the dfdaemon's proxy server. + /// proxy_config is the configuration for the dfdaemon's proxy server. proxy_config: dfinit::Proxy, } -// Docker implements the docker runtime manager. +/// Docker implements the docker runtime manager. impl Docker { - // new creates a new docker runtime manager. + /// new creates a new docker runtime manager. #[instrument(skip_all)] pub fn new(config: dfinit::Docker, proxy_config: dfinit::Proxy) -> Self { Self { @@ -40,10 +40,10 @@ impl Docker { } } - // TODO: Implement the run method for Docker. - // - // run runs the docker runtime to initialize - // runtime environment for the dfdaemon. + /// TODO: Implement the run method for Docker. + /// + /// run runs the docker runtime to initialize + /// runtime environment for the dfdaemon. #[instrument(skip_all)] pub async fn run(&self) -> Result<()> { info!( diff --git a/dragonfly-client-init/src/container_runtime/mod.rs b/dragonfly-client-init/src/container_runtime/mod.rs index 09f1c5ec..d81cad68 100644 --- a/dragonfly-client-init/src/container_runtime/mod.rs +++ b/dragonfly-client-init/src/container_runtime/mod.rs @@ -22,7 +22,7 @@ pub mod containerd; pub mod crio; pub mod docker; -// Engine represents config of the container runtime engine. +/// Engine represents config of the container runtime engine. #[derive(Debug, Clone)] enum Engine { Containerd(containerd::Containerd), @@ -30,14 +30,14 @@ enum Engine { Crio(crio::CRIO), } -// ContainerRuntime represents the container runtime manager. +/// ContainerRuntime represents the container runtime manager. pub struct ContainerRuntime { engine: Option, } -// ContainerRuntime implements the container runtime manager. +/// ContainerRuntime implements the container runtime manager. impl ContainerRuntime { - // new creates a new container runtime manager. + /// new creates a new container runtime manager. #[instrument(skip_all)] pub fn new(config: &Config) -> Self { Self { @@ -45,7 +45,7 @@ impl ContainerRuntime { } } - // run runs the container runtime to initialize runtime environment for the dfdaemon. + /// run runs the container runtime to initialize runtime environment for the dfdaemon. #[instrument(skip_all)] pub async fn run(&self) -> Result<()> { // If containerd is enabled, override the default containerd @@ -58,7 +58,7 @@ impl ContainerRuntime { } } - // get_engine returns the runtime engine from the config. + /// get_engine returns the runtime engine from the config. #[instrument(skip_all)] fn get_engine(config: &Config) -> Option { if let Some(ref container_runtime_config) = config.container_runtime.config { diff --git a/dragonfly-client-storage/src/content.rs b/dragonfly-client-storage/src/content.rs index a72987c6..e6a39f78 100644 --- a/dragonfly-client-storage/src/content.rs +++ b/dragonfly-client-storage/src/content.rs @@ -25,39 +25,39 @@ use tokio::io::{self, AsyncRead, AsyncReadExt, AsyncSeekExt, BufReader, SeekFrom use tokio_util::io::InspectReader; use tracing::{error, info, instrument, warn}; -// DEFAULT_DIR_NAME is the default directory name to store content. +/// DEFAULT_DIR_NAME is the default directory name to store content. const DEFAULT_DIR_NAME: &str = "content"; -// Content is the content of a piece. +/// Content is the content of a piece. pub struct Content { - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // dir is the directory to store content. + /// dir is the directory to store content. dir: PathBuf, } -// WritePieceResponse is the response of writing a piece. +/// WritePieceResponse is the response of writing a piece. pub struct WritePieceResponse { - // length is the length of the piece. + /// length is the length of the piece. pub length: u64, - // hash is the hash of the piece. + /// hash is the hash of the piece. pub hash: String, } -// WriteCacheTaskResponse is the response of writing a cache task. +/// WriteCacheTaskResponse is the response of writing a cache task. pub struct WriteCacheTaskResponse { - // length is the length of the cache task. + /// length is the length of the cache task. pub length: u64, - // hash is the hash of the cache task. + /// hash is the hash of the cache task. pub hash: String, } -// Content implements the content storage. +/// Content implements the content storage. impl Content { - // new returns a new content. + /// new returns a new content. #[instrument(skip_all)] pub async fn new(config: Arc, dir: &Path) -> Result { let dir = dir.join(DEFAULT_DIR_NAME); @@ -75,7 +75,7 @@ impl Content { Ok(Content { config, dir }) } - // hard_link_or_copy_task hard links or copies the task content to the destination. + /// hard_link_or_copy_task hard links or copies the task content to the destination. #[instrument(skip_all)] pub async fn hard_link_or_copy_task( &self, @@ -144,14 +144,14 @@ impl Content { Ok(()) } - // hard_link_task hard links the task content. + /// hard_link_task hard links the task content. #[instrument(skip_all)] async fn hard_link_task(&self, task_id: &str, link: &Path) -> Result<()> { fs::hard_link(self.dir.join(task_id), link).await?; Ok(()) } - // copy_task copies the task content to the destination. + /// copy_task copies the task content to the destination. #[instrument(skip_all)] async fn copy_task(&self, task_id: &str, to: &Path) -> Result<()> { // Ensure the parent directory of the destination exists. @@ -168,7 +168,7 @@ impl Content { Ok(()) } - // copy_task_by_range copies the task content to the destination by range. + /// copy_task_by_range copies the task content to the destination by range. #[instrument(skip_all)] async fn copy_task_by_range(&self, task_id: &str, to: &Path, range: Range) -> Result<()> { // Ensure the parent directory of the destination exists. @@ -200,7 +200,7 @@ impl Content { Ok(()) } - // read_task reads the task content by range. + /// read_task reads the task content by range. #[instrument(skip_all)] pub async fn read_task_by_range(&self, task_id: &str, range: Range) -> Result { let task_path = self.dir.join(task_id); @@ -221,7 +221,7 @@ impl Content { Ok(range_reader) } - // delete_task deletes the task content. + /// delete_task deletes the task content. #[instrument(skip_all)] pub async fn delete_task(&self, task_id: &str) -> Result<()> { info!("delete task content: {}", task_id); @@ -233,7 +233,7 @@ impl Content { Ok(()) } - // read_piece reads the piece from the content. + /// read_piece reads the piece from the content. #[instrument(skip_all)] pub async fn read_piece( &self, @@ -274,7 +274,7 @@ impl Content { Ok(f.take(length)) } - // write_piece writes the piece to the content. + /// write_piece writes the piece to the content. #[instrument(skip_all)] pub async fn write_piece( &self, @@ -326,7 +326,7 @@ impl Content { }) } - // hard_link_or_copy_cache_task hard links or copies the task content to the destination. + /// hard_link_or_copy_cache_task hard links or copies the task content to the destination. #[instrument(skip_all)] pub async fn hard_link_or_copy_cache_task( &self, @@ -379,7 +379,7 @@ impl Content { Ok(()) } - // copy_cache_task copies the cache task content to the destination. + /// copy_cache_task copies the cache task content to the destination. #[instrument(skip_all)] pub async fn write_cache_task( &self, @@ -426,7 +426,7 @@ impl Content { }) } - // delete_task deletes the cache task content. + /// delete_task deletes the cache task content. #[instrument(skip_all)] pub async fn delete_cache_task(&self, cache_task_id: &str) -> Result<()> { info!("delete cache task content: {}", cache_task_id); diff --git a/dragonfly-client-storage/src/lib.rs b/dragonfly-client-storage/src/lib.rs index dc0b9fb5..ab145db4 100644 --- a/dragonfly-client-storage/src/lib.rs +++ b/dragonfly-client-storage/src/lib.rs @@ -30,24 +30,24 @@ pub mod content; pub mod metadata; pub mod storage_engine; -// DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL is the default interval for waiting for the piece to be finished. +/// DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL is the default interval for waiting for the piece to be finished. pub const DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL: Duration = Duration::from_millis(500); -// Storage is the storage of the task. +/// Storage is the storage of the task. pub struct Storage { - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // metadata implements the metadata storage. + /// metadata implements the metadata storage. metadata: metadata::Metadata, - // content implements the content storage. + /// content implements the content storage. content: content::Content, } -// Storage implements the storage. +/// Storage implements the storage. impl Storage { - // new returns a new storage. + /// new returns a new storage. #[instrument(skip_all)] pub async fn new(config: Arc, dir: &Path, log_dir: PathBuf) -> Result { let metadata = metadata::Metadata::new(config.clone(), dir, &log_dir)?; @@ -59,7 +59,7 @@ impl Storage { }) } - // hard_link_or_copy_task hard links or copies the task content to the destination. + /// hard_link_or_copy_task hard links or copies the task content to the destination. #[instrument(skip_all)] pub async fn hard_link_or_copy_task( &self, @@ -70,7 +70,7 @@ impl Storage { self.content.hard_link_or_copy_task(task, to, range).await } - // read_task_by_range returns the reader of the task by range. + /// read_task_by_range returns the reader of the task by range. #[instrument(skip_all)] pub async fn read_task_by_range( &self, @@ -80,7 +80,7 @@ impl Storage { self.content.read_task_by_range(task_id, range).await } - // download_task_started updates the metadata of the task when the task downloads started. + /// download_task_started updates the metadata of the task when the task downloads started. #[instrument(skip_all)] pub fn download_task_started( &self, @@ -93,49 +93,49 @@ impl Storage { .download_task_started(id, piece_length, content_length, response_header) } - // download_task_finished updates the metadata of the task when the task downloads finished. + /// download_task_finished updates the metadata of the task when the task downloads finished. #[instrument(skip_all)] pub fn download_task_finished(&self, id: &str) -> Result { self.metadata.download_task_finished(id) } - // download_task_failed updates the metadata of the task when the task downloads failed. + /// download_task_failed updates the metadata of the task when the task downloads failed. #[instrument(skip_all)] pub async fn download_task_failed(&self, id: &str) -> Result { self.metadata.download_task_failed(id) } - // prefetch_task_started updates the metadata of the task when the task prefetches started. + /// prefetch_task_started updates the metadata of the task when the task prefetches started. #[instrument(skip_all)] pub async fn prefetch_task_started(&self, id: &str) -> Result { self.metadata.prefetch_task_started(id) } - // prefetch_task_failed updates the metadata of the task when the task prefetches failed. + /// prefetch_task_failed updates the metadata of the task when the task prefetches failed. #[instrument(skip_all)] pub async fn prefetch_task_failed(&self, id: &str) -> Result { self.metadata.prefetch_task_failed(id) } - // upload_task_finished updates the metadata of the task when task uploads finished. + /// upload_task_finished updates the metadata of the task when task uploads finished. #[instrument(skip_all)] pub fn upload_task_finished(&self, id: &str) -> Result { self.metadata.upload_task_finished(id) } - // get_task returns the task metadata. + /// get_task returns the task metadata. #[instrument(skip_all)] pub fn get_task(&self, id: &str) -> Result> { self.metadata.get_task(id) } - // get_tasks returns the task metadatas. + /// get_tasks returns the task metadatas. #[instrument(skip_all)] pub fn get_tasks(&self) -> Result> { self.metadata.get_tasks() } - // delete_task deletes the task metadatas, task content and piece metadatas. + /// delete_task deletes the task metadatas, task content and piece metadatas. #[instrument(skip_all)] pub async fn delete_task(&self, id: &str) { self.metadata @@ -151,7 +151,7 @@ impl Storage { }); } - // hard_link_or_copy_cache_task hard links or copies the cache task content to the destination. + /// hard_link_or_copy_cache_task hard links or copies the cache task content to the destination. #[instrument(skip_all)] pub async fn hard_link_or_copy_cache_task( &self, @@ -161,7 +161,7 @@ impl Storage { self.content.hard_link_or_copy_cache_task(task, to).await } - // create_persistent_cache_task creates a new persistent cache task. + /// create_persistent_cache_task creates a new persistent cache task. #[instrument(skip_all)] pub async fn create_persistent_cache_task( &self, @@ -190,7 +190,7 @@ impl Storage { ) } - // download_cache_task_started updates the metadata of the cache task when the cache task downloads started. + /// download_cache_task_started updates the metadata of the cache task when the cache task downloads started. #[instrument(skip_all)] pub fn download_cache_task_started( &self, @@ -204,37 +204,37 @@ impl Storage { .download_cache_task_started(id, ttl, persistent, piece_length, content_length) } - // download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished. + /// download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished. #[instrument(skip_all)] pub fn download_cache_task_finished(&self, id: &str) -> Result { self.metadata.download_cache_task_finished(id) } - // download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed. + /// download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed. #[instrument(skip_all)] pub async fn download_cache_task_failed(&self, id: &str) -> Result { self.metadata.download_cache_task_failed(id) } - // upload_cache_task_finished updates the metadata of the cahce task when cache task uploads finished. + /// upload_cache_task_finished updates the metadata of the cahce task when cache task uploads finished. #[instrument(skip_all)] pub fn upload_cache_task_finished(&self, id: &str) -> Result { self.metadata.upload_cache_task_finished(id) } - // get_cache_task returns the cache task metadata. + /// get_cache_task returns the cache task metadata. #[instrument(skip_all)] pub fn get_cache_task(&self, id: &str) -> Result> { self.metadata.get_cache_task(id) } - // get_tasks returns the task metadatas. + /// get_tasks returns the task metadatas. #[instrument(skip_all)] pub fn get_cache_tasks(&self) -> Result> { self.metadata.get_cache_tasks() } - // delete_cache_task deletes the cache task metadatas, cache task content and piece metadatas. + /// delete_cache_task deletes the cache task metadatas, cache task content and piece metadatas. #[instrument(skip_all)] pub async fn delete_cache_task(&self, id: &str) { self.metadata.delete_cache_task(id).unwrap_or_else(|err| { @@ -249,8 +249,8 @@ impl Storage { }); } - // download_piece_started updates the metadata of the piece and writes - // the data of piece to file when the piece downloads started. + /// download_piece_started updates the metadata of the piece and writes + /// the data of piece to file when the piece downloads started. #[instrument(skip_all)] pub async fn download_piece_started( &self, @@ -265,7 +265,7 @@ impl Storage { } } - // download_piece_from_source_finished is used for downloading piece from source. + /// download_piece_from_source_finished is used for downloading piece from source. #[instrument(skip_all)] pub async fn download_piece_from_source_finished( &self, @@ -288,7 +288,7 @@ impl Storage { ) } - // download_piece_from_remote_peer_finished is used for downloading piece from remote peer. + /// download_piece_from_remote_peer_finished is used for downloading piece from remote peer. #[instrument(skip_all)] pub async fn download_piece_from_remote_peer_finished( &self, @@ -321,14 +321,14 @@ impl Storage { ) } - // download_piece_failed updates the metadata of the piece when the piece downloads failed. + /// download_piece_failed updates the metadata of the piece when the piece downloads failed. #[instrument(skip_all)] pub fn download_piece_failed(&self, task_id: &str, number: u32) -> Result<()> { self.metadata.download_piece_failed(task_id, number) } - // upload_piece updates the metadata of the piece and - // returns the data of the piece. + /// upload_piece updates the metadata of the piece and + /// returns the data of the piece. #[instrument(skip_all)] pub async fn upload_piece( &self, @@ -394,24 +394,24 @@ impl Storage { } } - // get_piece returns the piece metadata. + /// get_piece returns the piece metadata. #[instrument(skip_all)] pub fn get_piece(&self, task_id: &str, number: u32) -> Result> { self.metadata.get_piece(task_id, number) } - // get_pieces returns the piece metadatas. + /// get_pieces returns the piece metadatas. pub fn get_pieces(&self, task_id: &str) -> Result> { self.metadata.get_pieces(task_id) } - // piece_id returns the piece id. + /// piece_id returns the piece id. #[instrument(skip_all)] pub fn piece_id(&self, task_id: &str, number: u32) -> String { self.metadata.piece_id(task_id, number) } - // wait_for_piece_finished waits for the piece to be finished. + /// wait_for_piece_finished waits for the piece to be finished. #[instrument(skip_all)] async fn wait_for_piece_finished(&self, task_id: &str, number: u32) -> Result { // Initialize the timeout of piece. diff --git a/dragonfly-client-storage/src/metadata.rs b/dragonfly-client-storage/src/metadata.rs index e99626a1..5656d532 100644 --- a/dragonfly-client-storage/src/metadata.rs +++ b/dragonfly-client-storage/src/metadata.rs @@ -30,83 +30,83 @@ use tracing::{error, info, instrument}; use crate::storage_engine::{rocksdb::RocksdbStorageEngine, DatabaseObject, StorageEngineOwned}; -// Task is the metadata of the task. +/// Task is the metadata of the task. #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct Task { - // id is the task id. + /// id is the task id. pub id: String, - // piece_length is the length of the piece. + /// piece_length is the length of the piece. pub piece_length: Option, - // content_length is the length of the content. + /// content_length is the length of the content. pub content_length: Option, - // header is the header of the response. + /// header is the header of the response. pub response_header: HashMap, - // uploading_count is the count of the task being uploaded by other peers. + /// uploading_count is the count of the task being uploaded by other peers. pub uploading_count: u64, - // uploaded_count is the count of the task has been uploaded by other peers. + /// uploaded_count is the count of the task has been uploaded by other peers. pub uploaded_count: u64, - // updated_at is the time when the task metadata is updated. If the task is downloaded - // by other peers, it will also update updated_at. + /// updated_at is the time when the task metadata is updated. If the task is downloaded + /// by other peers, it will also update updated_at. pub updated_at: NaiveDateTime, - // created_at is the time when the task metadata is created. + /// created_at is the time when the task metadata is created. pub created_at: NaiveDateTime, - // prefetched_at is the time when the task prefetched. + /// prefetched_at is the time when the task prefetched. pub prefetched_at: Option, - // failed_at is the time when the task downloads failed. + /// failed_at is the time when the task downloads failed. pub failed_at: Option, - // finished_at is the time when the task downloads finished. + /// finished_at is the time when the task downloads finished. pub finished_at: Option, } -// Task implements the task database object. +/// Task implements the task database object. impl DatabaseObject for Task { - // NAMESPACE is the namespace of [Task] objects. + /// NAMESPACE is the namespace of [Task] objects. const NAMESPACE: &'static str = "task"; } -// Task implements the task metadata. +/// Task implements the task metadata. impl Task { - // is_started returns whether the task downloads started. + /// is_started returns whether the task downloads started. pub fn is_started(&self) -> bool { self.finished_at.is_none() } - // is_downloading returns whether the task is downloading. + /// is_downloading returns whether the task is downloading. pub fn is_uploading(&self) -> bool { self.uploading_count > 0 } - // is_expired returns whether the task is expired. + /// is_expired returns whether the task is expired. pub fn is_expired(&self, ttl: Duration) -> bool { self.updated_at + ttl < Utc::now().naive_utc() } - // is_prefetched returns whether the task is prefetched. + /// is_prefetched returns whether the task is prefetched. pub fn is_prefetched(&self) -> bool { self.prefetched_at.is_some() } - // is_failed returns whether the task downloads failed. + /// is_failed returns whether the task downloads failed. pub fn is_failed(&self) -> bool { self.failed_at.is_some() } - // is_finished returns whether the task downloads finished. + /// is_finished returns whether the task downloads finished. pub fn is_finished(&self) -> bool { self.finished_at.is_some() } - // is_empty returns whether the task is empty. + /// is_empty returns whether the task is empty. pub fn is_empty(&self) -> bool { if let Some(content_length) = self.content_length() { if content_length == 0 { @@ -117,79 +117,79 @@ impl Task { false } - // piece_length returns the piece length of the task. + /// piece_length returns the piece length of the task. pub fn piece_length(&self) -> Option { self.piece_length } - // content_length returns the content length of the task. + /// content_length returns the content length of the task. pub fn content_length(&self) -> Option { self.content_length } } -// CacheTask is the metadata of the cache task. +/// CacheTask is the metadata of the cache task. #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct CacheTask { - // id is the task id. + /// id is the task id. pub id: String, - // persistent represents whether the cache task is persistent. - // If the cache task is persistent, the cache peer will - // not be deleted when dfdamon runs garbage collection. + /// persistent represents whether the cache task is persistent. + /// If the cache task is persistent, the cache peer will + /// not be deleted when dfdamon runs garbage collection. pub persistent: bool, - // ttl is the time to live of the cache task. + /// ttl is the time to live of the cache task. pub ttl: Duration, - // digests is the digests of the cache task. + /// digests is the digests of the cache task. pub digest: String, - // piece_length is the length of the piece. + /// piece_length is the length of the piece. pub piece_length: u64, - // content_length is the length of the content. + /// content_length is the length of the content. pub content_length: u64, - // uploading_count is the count of the task being uploaded by other peers. + /// uploading_count is the count of the task being uploaded by other peers. pub uploading_count: u64, - // uploaded_count is the count of the task has been uploaded by other peers. + /// uploaded_count is the count of the task has been uploaded by other peers. pub uploaded_count: u64, - // updated_at is the time when the task metadata is updated. If the task is downloaded - // by other peers, it will also update updated_at. + /// updated_at is the time when the task metadata is updated. If the task is downloaded + /// by other peers, it will also update updated_at. pub updated_at: NaiveDateTime, - // created_at is the time when the task metadata is created. + /// created_at is the time when the task metadata is created. pub created_at: NaiveDateTime, - // failed_at is the time when the task downloads failed. + /// failed_at is the time when the task downloads failed. pub failed_at: Option, - // finished_at is the time when the task downloads finished. + /// finished_at is the time when the task downloads finished. pub finished_at: Option, } -// CacheTask implements the cache task database object. +/// CacheTask implements the cache task database object. impl DatabaseObject for CacheTask { - // NAMESPACE is the namespace of [CacheTask] objects. + /// NAMESPACE is the namespace of [CacheTask] objects. const NAMESPACE: &'static str = "cache_task"; } -// CacheTask implements the cache task metadata. +/// CacheTask implements the cache task metadata. impl CacheTask { - // is_started returns whether the cache task downloads started. + /// is_started returns whether the cache task downloads started. pub fn is_started(&self) -> bool { self.finished_at.is_none() } - // is_downloading returns whether the cache task is downloading. + /// is_downloading returns whether the cache task is downloading. pub fn is_uploading(&self) -> bool { self.uploading_count > 0 } - // is_expired returns whether the cache task is expired. + /// is_expired returns whether the cache task is expired. pub fn is_expired(&self) -> bool { // When scheduler runs garbage collection, it will trigger dfdaemon to evict the cache task. // But sometimes the dfdaemon may not evict the cache task in time, so we select the ttl * 1.2 @@ -197,17 +197,17 @@ impl CacheTask { self.created_at + self.ttl * 2 < Utc::now().naive_utc() } - // is_failed returns whether the cache task downloads failed. + /// is_failed returns whether the cache task downloads failed. pub fn is_failed(&self) -> bool { self.failed_at.is_some() } - // is_finished returns whether the cache task downloads finished. + /// is_finished returns whether the cache task downloads finished. pub fn is_finished(&self) -> bool { self.finished_at.is_some() } - // is_empty returns whether the cache task is empty. + /// is_empty returns whether the cache task is empty. pub fn is_empty(&self) -> bool { if self.content_length == 0 { return true; @@ -216,76 +216,76 @@ impl CacheTask { false } - // is_persistent returns whether the cache task is persistent. + /// is_persistent returns whether the cache task is persistent. pub fn is_persistent(&self) -> bool { self.persistent } - // piece_length returns the piece length of the cache task. + /// piece_length returns the piece length of the cache task. pub fn piece_length(&self) -> u64 { self.piece_length } - // content_length returns the content length of the cache task. + /// content_length returns the content length of the cache task. pub fn content_length(&self) -> u64 { self.content_length } } -// Piece is the metadata of the piece. +/// Piece is the metadata of the piece. #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct Piece { - // number is the piece number. + /// number is the piece number. pub number: u32, - // offset is the offset of the piece in the task. + /// offset is the offset of the piece in the task. pub offset: u64, - // length is the length of the piece. + /// length is the length of the piece. pub length: u64, - // digest is the digest of the piece. + /// digest is the digest of the piece. pub digest: String, - // parent_id is the parent id of the piece. + /// parent_id is the parent id of the piece. pub parent_id: Option, - // uploading_count is the count of the piece being uploaded by other peers. + /// uploading_count is the count of the piece being uploaded by other peers. pub uploading_count: u64, - // uploaded_count is the count of the piece has been uploaded by other peers. + /// uploaded_count is the count of the piece has been uploaded by other peers. pub uploaded_count: u64, - // updated_at is the time when the piece metadata is updated. If the piece is downloaded - // by other peers, it will also update updated_at. + /// updated_at is the time when the piece metadata is updated. If the piece is downloaded + /// by other peers, it will also update updated_at. pub updated_at: NaiveDateTime, - // created_at is the time when the piece metadata is created. + /// created_at is the time when the piece metadata is created. pub created_at: NaiveDateTime, - // finished_at is the time when the piece downloads finished. + /// finished_at is the time when the piece downloads finished. pub finished_at: Option, } -// Piece implements the piece database object. +/// Piece implements the piece database object. impl DatabaseObject for Piece { - // NAMESPACE is the namespace of [Piece] objects. + /// NAMESPACE is the namespace of [Piece] objects. const NAMESPACE: &'static str = "piece"; } -// Piece implements the piece metadata. +/// Piece implements the piece metadata. impl Piece { - // is_started returns whether the piece downloads started. + /// is_started returns whether the piece downloads started. pub fn is_started(&self) -> bool { self.finished_at.is_none() } - // is_finished returns whether the piece downloads finished. + /// is_finished returns whether the piece downloads finished. pub fn is_finished(&self) -> bool { self.finished_at.is_some() } - // cost returns the cost of the piece downloaded. + /// cost returns the cost of the piece downloaded. pub fn cost(&self) -> Option { match self .finished_at @@ -302,7 +302,7 @@ impl Piece { } } - // prost_cost returns the prost cost of the piece downloaded. + /// prost_cost returns the prost cost of the piece downloaded. pub fn prost_cost(&self) -> Option { match self.cost() { Some(cost) => match prost_wkt_types::Duration::try_from(cost) { @@ -317,17 +317,17 @@ impl Piece { } } -// Metadata manages the metadata of [Task], [Piece] and [CacheTask]. +/// Metadata manages the metadata of [Task], [Piece] and [CacheTask]. pub struct Metadata where E: StorageEngineOwned, { - // db is the underlying storage engine instance. + /// db is the underlying storage engine instance. db: E, } impl Metadata { - // download_task_started updates the metadata of the task when the task downloads started. + /// download_task_started updates the metadata of the task when the task downloads started. #[instrument(skip_all)] pub fn download_task_started( &self, @@ -381,7 +381,7 @@ impl Metadata { Ok(task) } - // download_task_finished updates the metadata of the task when the task downloads finished. + /// download_task_finished updates the metadata of the task when the task downloads finished. #[instrument(skip_all)] pub fn download_task_finished(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -398,7 +398,7 @@ impl Metadata { Ok(task) } - // download_task_failed updates the metadata of the task when the task downloads failed. + /// download_task_failed updates the metadata of the task when the task downloads failed. #[instrument(skip_all)] pub fn download_task_failed(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -414,7 +414,7 @@ impl Metadata { Ok(task) } - // prefetch_task_started updates the metadata of the task when the task prefetch started. + /// prefetch_task_started updates the metadata of the task when the task prefetch started. #[instrument(skip_all)] pub fn prefetch_task_started(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -436,7 +436,7 @@ impl Metadata { Ok(task) } - // prefetch_task_failed updates the metadata of the task when the task prefetch failed. + /// prefetch_task_failed updates the metadata of the task when the task prefetch failed. #[instrument(skip_all)] pub fn prefetch_task_failed(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -453,7 +453,7 @@ impl Metadata { Ok(task) } - // upload_task_started updates the metadata of the task when task uploads started. + /// upload_task_started updates the metadata of the task when task uploads started. #[instrument(skip_all)] pub fn upload_task_started(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -469,7 +469,7 @@ impl Metadata { Ok(task) } - // upload_task_finished updates the metadata of the task when task uploads finished. + /// upload_task_finished updates the metadata of the task when task uploads finished. #[instrument(skip_all)] pub fn upload_task_finished(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -486,7 +486,7 @@ impl Metadata { Ok(task) } - // upload_task_failed updates the metadata of the task when the task uploads failed. + /// upload_task_failed updates the metadata of the task when the task uploads failed. #[instrument(skip_all)] pub fn upload_task_failed(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -502,13 +502,13 @@ impl Metadata { Ok(task) } - // get_task gets the task metadata. + /// get_task gets the task metadata. #[instrument(skip_all)] pub fn get_task(&self, id: &str) -> Result> { self.db.get(id.as_bytes()) } - // get_tasks gets the task metadatas. + /// get_tasks gets the task metadatas. #[instrument(skip_all)] pub fn get_tasks(&self) -> Result> { let tasks = self @@ -526,16 +526,16 @@ impl Metadata { .collect() } - // delete_task deletes the task metadata. + /// delete_task deletes the task metadata. #[instrument(skip_all)] pub fn delete_task(&self, id: &str) -> Result<()> { info!("delete task metadata {}", id); self.db.delete::(id.as_bytes()) } - // create_persistent_cache_task creates a new persistent cache task. - // If the cache task imports the content to the dfdaemon finished, - // the dfdaemon will create a persistent cache task metadata. + /// create_persistent_cache_task creates a new persistent cache task. + /// If the cache task imports the content to the dfdaemon finished, + /// the dfdaemon will create a persistent cache task metadata. #[instrument(skip_all)] pub fn create_persistent_cache_task( &self, @@ -562,9 +562,9 @@ impl Metadata { Ok(task) } - // download_cache_task_started updates the metadata of the cache task when - // the cache task downloads started. If the cache task downloaded by scheduler - // to create persistent cache task, the persistent should be set to true. + /// download_cache_task_started updates the metadata of the cache task when + /// the cache task downloads started. If the cache task downloaded by scheduler + /// to create persistent cache task, the persistent should be set to true. #[instrument(skip_all)] pub fn download_cache_task_started( &self, @@ -597,7 +597,7 @@ impl Metadata { Ok(task) } - // download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished. + /// download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished. #[instrument(skip_all)] pub fn download_cache_task_finished(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -619,7 +619,7 @@ impl Metadata { Ok(task) } - // download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed. + /// download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed. #[instrument(skip_all)] pub fn download_cache_task_failed(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -635,7 +635,7 @@ impl Metadata { Ok(task) } - // upload_cache_task_started updates the metadata of the cache task when cache task uploads started. + /// upload_cache_task_started updates the metadata of the cache task when cache task uploads started. #[instrument(skip_all)] pub fn upload_cache_task_started(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -651,7 +651,7 @@ impl Metadata { Ok(task) } - // upload_cache_task_finished updates the metadata of the cache task when cache task uploads finished. + /// upload_cache_task_finished updates the metadata of the cache task when cache task uploads finished. #[instrument(skip_all)] pub fn upload_cache_task_finished(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -668,7 +668,7 @@ impl Metadata { Ok(task) } - // upload_cache_task_failed updates the metadata of the cache task when the cache task uploads failed. + /// upload_cache_task_failed updates the metadata of the cache task when the cache task uploads failed. #[instrument(skip_all)] pub fn upload_cache_task_failed(&self, id: &str) -> Result { let task = match self.db.get::(id.as_bytes())? { @@ -684,27 +684,27 @@ impl Metadata { Ok(task) } - // get_cache_task gets the cache task metadata. + /// get_cache_task gets the cache task metadata. #[instrument(skip_all)] pub fn get_cache_task(&self, id: &str) -> Result> { self.db.get(id.as_bytes()) } - // get_cache_tasks gets the cache task metadatas. + /// get_cache_tasks gets the cache task metadatas. #[instrument(skip_all)] pub fn get_cache_tasks(&self) -> Result> { let iter = self.db.iter::()?; iter.map(|ele| ele.map(|(_, task)| task)).collect() } - // delete_cache_task deletes the cache task metadata. + /// delete_cache_task deletes the cache task metadata. #[instrument(skip_all)] pub fn delete_cache_task(&self, id: &str) -> Result<()> { info!("delete cache task metadata {}", id); self.db.delete::(id.as_bytes()) } - // download_piece_started updates the metadata of the piece when the piece downloads started. + /// download_piece_started updates the metadata of the piece when the piece downloads started. #[instrument(skip_all)] pub fn download_piece_started(&self, task_id: &str, number: u32) -> Result { // Construct the piece metadata. @@ -721,7 +721,7 @@ impl Metadata { Ok(piece) } - // download_piece_finished updates the metadata of the piece when the piece downloads finished. + /// download_piece_finished updates the metadata of the piece when the piece downloads finished. #[instrument(skip_all)] pub fn download_piece_finished( &self, @@ -751,19 +751,19 @@ impl Metadata { Ok(piece) } - // download_piece_failed updates the metadata of the piece when the piece downloads failed. + /// download_piece_failed updates the metadata of the piece when the piece downloads failed. #[instrument(skip_all)] pub fn download_piece_failed(&self, task_id: &str, number: u32) -> Result<()> { self.delete_piece(task_id, number) } - // wait_for_piece_finished_failed waits for the piece to be finished or failed. + /// wait_for_piece_finished_failed waits for the piece to be finished or failed. #[instrument(skip_all)] pub fn wait_for_piece_finished_failed(&self, task_id: &str, number: u32) -> Result<()> { self.delete_piece(task_id, number) } - // upload_piece_started updates the metadata of the piece when piece uploads started. + /// upload_piece_started updates the metadata of the piece when piece uploads started. #[instrument(skip_all)] pub fn upload_piece_started(&self, task_id: &str, number: u32) -> Result { // Get the piece id. @@ -781,7 +781,7 @@ impl Metadata { Ok(piece) } - // upload_piece_finished updates the metadata of the piece when piece uploads finished. + /// upload_piece_finished updates the metadata of the piece when piece uploads finished. #[instrument(skip_all)] pub fn upload_piece_finished(&self, task_id: &str, number: u32) -> Result { // Get the piece id. @@ -800,7 +800,7 @@ impl Metadata { Ok(piece) } - // upload_piece_failed updates the metadata of the piece when the piece uploads failed. + /// upload_piece_failed updates the metadata of the piece when the piece uploads failed. #[instrument(skip_all)] pub fn upload_piece_failed(&self, task_id: &str, number: u32) -> Result { // Get the piece id. @@ -818,13 +818,13 @@ impl Metadata { Ok(piece) } - // get_piece gets the piece metadata. + /// get_piece gets the piece metadata. #[instrument(skip_all)] pub fn get_piece(&self, task_id: &str, number: u32) -> Result> { self.db.get(self.piece_id(task_id, number).as_bytes()) } - // get_pieces gets the piece metadatas. + /// get_pieces gets the piece metadatas. pub fn get_pieces(&self, task_id: &str) -> Result> { let pieces = self .db @@ -841,7 +841,7 @@ impl Metadata { .collect() } - // delete_piece deletes the piece metadata. + /// delete_piece deletes the piece metadata. #[instrument(skip_all)] pub fn delete_piece(&self, task_id: &str, number: u32) -> Result<()> { info!("delete piece metadata {}", self.piece_id(task_id, number)); @@ -849,7 +849,7 @@ impl Metadata { .delete::(self.piece_id(task_id, number).as_bytes()) } - // delete_pieces deletes the piece metadatas. + /// delete_pieces deletes the piece metadatas. #[instrument(skip_all)] pub fn delete_pieces(&self, task_id: &str) -> Result<()> { let piece_ids = self @@ -878,16 +878,16 @@ impl Metadata { Ok(()) } - // piece_id returns the piece id. + /// piece_id returns the piece id. #[instrument(skip_all)] pub fn piece_id(&self, task_id: &str, number: u32) -> String { format!("{}-{}", task_id, number) } } -// Metadata implements the metadata of the storage engine. +/// Metadata implements the metadata of the storage engine. impl Metadata { - // new creates a new metadata instance. + /// new creates a new metadata instance. #[instrument(skip_all)] pub fn new( config: Arc, diff --git a/dragonfly-client-storage/src/storage_engine/rocksdb.rs b/dragonfly-client-storage/src/storage_engine/rocksdb.rs index 1449a08d..2e2a9f5b 100644 --- a/dragonfly-client-storage/src/storage_engine/rocksdb.rs +++ b/dragonfly-client-storage/src/storage_engine/rocksdb.rs @@ -32,18 +32,18 @@ pub struct RocksdbStorageEngine { inner: rocksdb::DB, } -// RocksdbStorageEngine implements deref of the storage engine. +/// RocksdbStorageEngine implements deref of the storage engine. impl Deref for RocksdbStorageEngine { - // Target is the inner rocksdb DB. + /// Target is the inner rocksdb DB. type Target = rocksdb::DB; - // deref returns the inner rocksdb DB. + /// deref returns the inner rocksdb DB. fn deref(&self) -> &Self::Target { &self.inner } } -// RocksdbStorageEngine implements the storage engine of the rocksdb. +/// RocksdbStorageEngine implements the storage engine of the rocksdb. impl RocksdbStorageEngine { /// DEFAULT_DIR_NAME is the default directory name to store metadata. const DEFAULT_DIR_NAME: &'static str = "metadata"; @@ -60,10 +60,10 @@ impl RocksdbStorageEngine { /// DEFAULT_CACHE_SIZE is the default cache size for rocksdb, default is 512MB. const DEFAULT_CACHE_SIZE: usize = 512 * 1024 * 1024; - // DEFAULT_LOG_MAX_SIZE is the default max log size for rocksdb, default is 64MB. + /// DEFAULT_LOG_MAX_SIZE is the default max log size for rocksdb, default is 64MB. const DEFAULT_LOG_MAX_SIZE: usize = 64 * 1024 * 1024; - // DEFAULT_LOG_MAX_FILES is the default max log files for rocksdb. + /// DEFAULT_LOG_MAX_FILES is the default max log files for rocksdb. const DEFAULT_LOG_MAX_FILES: usize = 10; /// open opens a rocksdb storage engine with the given directory and column families. @@ -124,9 +124,9 @@ impl RocksdbStorageEngine { } } -// RocksdbStorageEngine implements the storage engine operations. +/// RocksdbStorageEngine implements the storage engine operations. impl Operations for RocksdbStorageEngine { - // get gets the object by key. + /// get gets the object by key. #[instrument(skip_all)] fn get(&self, key: &[u8]) -> Result> { let cf = cf_handle::(self)?; @@ -142,7 +142,7 @@ impl Operations for RocksdbStorageEngine { } } - // put puts the object by key. + /// put puts the object by key. #[instrument(skip_all)] fn put(&self, key: &[u8], value: &O) -> Result<()> { let cf = cf_handle::(self)?; @@ -155,7 +155,7 @@ impl Operations for RocksdbStorageEngine { Ok(()) } - // delete deletes the object by key. + /// delete deletes the object by key. #[instrument(skip_all)] fn delete(&self, key: &[u8]) -> Result<()> { let cf = cf_handle::(self)?; @@ -167,7 +167,7 @@ impl Operations for RocksdbStorageEngine { Ok(()) } - // iter iterates all objects. + /// iter iterates all objects. #[instrument(skip_all)] fn iter(&self) -> Result, O)>>> { let cf = cf_handle::(self)?; @@ -178,7 +178,7 @@ impl Operations for RocksdbStorageEngine { })) } - // iter_raw iterates all objects without serialization. + /// iter_raw iterates all objects without serialization. #[instrument(skip_all)] fn iter_raw( &self, @@ -192,7 +192,7 @@ impl Operations for RocksdbStorageEngine { })) } - // prefix_iter iterates all objects with prefix. + /// prefix_iter iterates all objects with prefix. #[instrument(skip_all)] fn prefix_iter( &self, @@ -206,7 +206,7 @@ impl Operations for RocksdbStorageEngine { })) } - // prefix_iter_raw iterates all objects with prefix without serialization. + /// prefix_iter_raw iterates all objects with prefix without serialization. #[instrument(skip_all)] fn prefix_iter_raw( &self, @@ -219,7 +219,7 @@ impl Operations for RocksdbStorageEngine { })) } - // batch_delete deletes objects by keys. + /// batch_delete deletes objects by keys. #[instrument(skip_all)] fn batch_delete(&self, keys: Vec<&[u8]>) -> Result<()> { let cf = cf_handle::(self)?; @@ -236,7 +236,7 @@ impl Operations for RocksdbStorageEngine { } } -// RocksdbStorageEngine implements the rocksdb of the storage engine. +/// RocksdbStorageEngine implements the rocksdb of the storage engine. impl<'db> StorageEngine<'db> for RocksdbStorageEngine {} /// cf_handle returns the column family handle for the given object. diff --git a/dragonfly-client-util/src/digest/mod.rs b/dragonfly-client-util/src/digest/mod.rs index 7a371ecd..80308ee8 100644 --- a/dragonfly-client-util/src/digest/mod.rs +++ b/dragonfly-client-util/src/digest/mod.rs @@ -22,28 +22,28 @@ use std::path::Path; use std::str::FromStr; use tracing::instrument; -// SEPARATOR is the separator of digest. +/// SEPARATOR is the separator of digest. pub const SEPARATOR: &str = ":"; -// Algorithm is an enum of the algorithm that is used to generate digest. +/// Algorithm is an enum of the algorithm that is used to generate digest. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Algorithm { - // Crc32 is crc32 algorithm for generate digest. + /// Crc32 is crc32 algorithm for generate digest. Crc32, - // Blake3 is blake3 algorithm for generate digest. + /// Blake3 is blake3 algorithm for generate digest. Blake3, - // Sha256 is sha256 algorithm for generate digest. + /// Sha256 is sha256 algorithm for generate digest. Sha256, - // Sha512 is sha512 algorithm for generate digest. + /// Sha512 is sha512 algorithm for generate digest. Sha512, } -// Algorithm implements the Display. +/// Algorithm implements the Display. impl fmt::Display for Algorithm { - // fmt formats the value using the given formatter. + /// fmt formats the value using the given formatter. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Algorithm::Crc32 => write!(f, "crc32"), @@ -54,11 +54,11 @@ impl fmt::Display for Algorithm { } } -// Algorithm implements the FromStr. +/// Algorithm implements the FromStr. impl FromStr for Algorithm { type Err = String; - // from_str parses an algorithm string. + /// from_str parses an algorithm string. fn from_str(s: &str) -> Result { match s { "crc32" => Ok(Algorithm::Crc32), @@ -70,23 +70,23 @@ impl FromStr for Algorithm { } } -// Digest is a struct that is used to generate digest. +/// Digest is a struct that is used to generate digest. pub struct Digest { - // algorithm is the algorithm that is used to generate digest. + /// algorithm is the algorithm that is used to generate digest. algorithm: Algorithm, - // encoded is the encoded digest. + /// encoded is the encoded digest. encoded: String, } -// Digest implements the Digest. +/// Digest implements the Digest. impl Digest { - // new returns a new Digest. + /// new returns a new Digest. pub fn new(algorithm: Algorithm, encoded: String) -> Self { Self { algorithm, encoded } } - // algorithm returns the algorithm of the digest. + /// algorithm returns the algorithm of the digest. pub fn algorithm(&self) -> Algorithm { self.algorithm } @@ -97,19 +97,19 @@ impl Digest { } } -// Digest implements the Display. +/// Digest implements the Display. impl fmt::Display for Digest { - // fmt formats the value using the given formatter. + /// fmt formats the value using the given formatter. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}{}{}", self.algorithm, SEPARATOR, self.encoded) } } -// Digest implements the FromStr. +/// Digest implements the FromStr. impl FromStr for Digest { type Err = String; - // from_str parses a digest string. + /// from_str parses a digest string. fn from_str(s: &str) -> Result { let parts: Vec<&str> = s.splitn(2, SEPARATOR).collect(); if parts.len() != 2 { @@ -128,7 +128,7 @@ impl FromStr for Digest { } } -// calculate_file_hash calculates the hash of a file. +/// calculate_file_hash calculates the hash of a file. #[instrument(skip_all)] pub fn calculate_file_hash(algorithm: Algorithm, path: &Path) -> ClientResult { let f = std::fs::File::open(path)?; diff --git a/dragonfly-client-util/src/http/mod.rs b/dragonfly-client-util/src/http/mod.rs index 641b2bae..61cdc0b9 100644 --- a/dragonfly-client-util/src/http/mod.rs +++ b/dragonfly-client-util/src/http/mod.rs @@ -23,7 +23,7 @@ use reqwest::header::{HeaderMap, HeaderValue}; use std::collections::HashMap; use tracing::{error, instrument}; -// reqwest_headermap_to_hashmap converts a reqwest headermap to a hashmap. +/// reqwest_headermap_to_hashmap converts a reqwest headermap to a hashmap. #[instrument(skip_all)] pub fn reqwest_headermap_to_hashmap(header: &HeaderMap) -> HashMap { let mut hashmap: HashMap = HashMap::new(); @@ -38,7 +38,7 @@ pub fn reqwest_headermap_to_hashmap(header: &HeaderMap) -> HashMap< hashmap } -// hashmap_to_reqwest_headermap converts a hashmap to a reqwest headermap. +/// hashmap_to_reqwest_headermap converts a hashmap to a reqwest headermap. #[instrument(skip_all)] pub fn hashmap_to_reqwest_headermap( header: &HashMap, @@ -47,7 +47,7 @@ pub fn hashmap_to_reqwest_headermap( Ok(header) } -// hashmap_to_hyper_header_map converts a hashmap to a hyper header map. +/// hashmap_to_hyper_header_map converts a hashmap to a hyper header map. #[instrument(skip_all)] pub fn hashmap_to_hyper_header_map( header: &HashMap, @@ -56,10 +56,10 @@ pub fn hashmap_to_hyper_header_map( Ok(header) } -// TODO: Remove the conversion after the http crate version is the same. -// Convert the Reqwest header to the Hyper header, because of the http crate -// version is different. Reqwest header depends on the http crate -// version 0.2, but the Hyper header depends on the http crate version 0.1. +/// TODO: Remove the conversion after the http crate version is the same. +/// Convert the Reqwest header to the Hyper header, because of the http crate +/// version is different. Reqwest header depends on the http crate +/// version 0.2, but the Hyper header depends on the http crate version 0.1. #[instrument(skip_all)] pub fn hyper_headermap_to_reqwest_headermap( hyper_header: &hyper::header::HeaderMap, @@ -95,7 +95,7 @@ pub fn hyper_headermap_to_reqwest_headermap( reqwest_header } -// header_vec_to_hashmap converts a vector of header string to a hashmap. +/// header_vec_to_hashmap converts a vector of header string to a hashmap. #[instrument(skip_all)] pub fn header_vec_to_hashmap(raw_header: Vec) -> Result> { let mut header = HashMap::new(); @@ -109,7 +109,7 @@ pub fn header_vec_to_hashmap(raw_header: Vec) -> Result, @@ -117,7 +117,7 @@ pub fn header_vec_to_reqwest_headermap( hashmap_to_reqwest_headermap(&header_vec_to_hashmap(raw_header)?) } -// get_range gets the range from http header. +/// get_range gets the range from http header. #[instrument(skip_all)] pub fn get_range(header: &HeaderMap, content_length: u64) -> Result> { match header.get(reqwest::header::RANGE) { @@ -129,9 +129,9 @@ pub fn get_range(header: &HeaderMap, content_length: u64) -> Result Result { let parsed_ranges = diff --git a/dragonfly-client-util/src/id_generator/mod.rs b/dragonfly-client-util/src/id_generator/mod.rs index 1cc6ddb5..28875c3f 100644 --- a/dragonfly-client-util/src/id_generator/mod.rs +++ b/dragonfly-client-util/src/id_generator/mod.rs @@ -25,31 +25,31 @@ use tracing::instrument; use url::Url; use uuid::Uuid; -// SEED_PEER_KEY is the key of the seed peer. +/// SEED_PEER_KEY is the key of the seed peer. const SEED_PEER_KEY: &str = "seed"; -// CACHE_KEY is the key of the cache. +/// CACHE_KEY is the key of the cache. const CACHE_KEY: &str = "cache"; -// PERSISTENT_CACHE_KEY is the key of the persistent cache. +/// PERSISTENT_CACHE_KEY is the key of the persistent cache. const PERSISTENT_CACHE_KEY: &str = "persistent"; -// IDGenerator is used to generate the id for the resources. +/// IDGenerator is used to generate the id for the resources. #[derive(Debug)] pub struct IDGenerator { - // ip is the ip of the host. + /// ip is the ip of the host. ip: String, - // hostname is the hostname of the host. + /// hostname is the hostname of the host. hostname: String, - // is_seed_peer indicates whether the host is a seed peer. + /// is_seed_peer indicates whether the host is a seed peer. is_seed_peer: bool, } -// IDGenerator implements the IDGenerator. +/// IDGenerator implements the IDGenerator. impl IDGenerator { - // new creates a new IDGenerator. + /// new creates a new IDGenerator. #[instrument(skip_all)] pub fn new(ip: String, hostname: String, is_seed_peer: bool) -> Self { IDGenerator { @@ -59,7 +59,7 @@ impl IDGenerator { } } - // host_id generates the host id. + /// host_id generates the host id. #[instrument(skip_all)] pub fn host_id(&self) -> String { if self.is_seed_peer { @@ -69,7 +69,7 @@ impl IDGenerator { format!("{}-{}", self.ip, self.hostname) } - // task_id generates the task id. + /// task_id generates the task id. #[instrument(skip_all)] pub fn task_id( &self, @@ -113,7 +113,7 @@ impl IDGenerator { Ok(hex::encode(hasher.finalize())) } - // cache_task_id generates the cache task id. + /// cache_task_id generates the cache task id. #[instrument(skip_all)] pub fn cache_task_id( &self, @@ -142,7 +142,7 @@ impl IDGenerator { Ok(hasher.finalize().to_hex().to_string()) } - // peer_id generates the peer id. + /// peer_id generates the peer id. #[instrument(skip_all)] pub fn peer_id(&self) -> String { if self.is_seed_peer { @@ -158,7 +158,7 @@ impl IDGenerator { format!("{}-{}-{}", self.ip, self.hostname, Uuid::new_v4()) } - // cache_peer_id generates the cache peer id. + /// cache_peer_id generates the cache peer id. #[instrument(skip_all)] pub fn cache_peer_id(&self, persistent: bool) -> String { if persistent { @@ -181,7 +181,7 @@ impl IDGenerator { ) } - // task_type generates the task type by the task id. + /// task_type generates the task type by the task id. #[instrument(skip_all)] pub fn task_type(&self, id: &str) -> TaskType { if id.contains(CACHE_KEY) { diff --git a/dragonfly-client-util/src/tls/mod.rs b/dragonfly-client-util/src/tls/mod.rs index ee028c3e..87704e1b 100644 --- a/dragonfly-client-util/src/tls/mod.rs +++ b/dragonfly-client-util/src/tls/mod.rs @@ -24,22 +24,22 @@ use std::vec::Vec; use std::{fs, io}; use tracing::instrument; -// NoVerifier is a verifier that does not verify the server certificate. -// It is used for testing and should not be used in production. +/// NoVerifier is a verifier that does not verify the server certificate. +/// It is used for testing and should not be used in production. #[derive(Debug)] pub struct NoVerifier(Arc); -// Implement the NoVerifier. +/// Implement the NoVerifier. impl NoVerifier { - // new creates a new NoVerifier. + /// new creates a new NoVerifier. pub fn new() -> Arc { Arc::new(Self(Arc::new(rustls::crypto::ring::default_provider()))) } } -// Implement the ServerCertVerifier trait for NoVerifier. +/// Implement the ServerCertVerifier trait for NoVerifier. impl rustls::client::danger::ServerCertVerifier for NoVerifier { - // verify_server_cert verifies the server certificate. + /// verify_server_cert verifies the server certificate. fn verify_server_cert( &self, _end_entity: &CertificateDer<'_>, @@ -51,7 +51,7 @@ impl rustls::client::danger::ServerCertVerifier for NoVerifier { Ok(rustls::client::danger::ServerCertVerified::assertion()) } - // verify_tls12_signature verifies the TLS 1.2 signature. + /// verify_tls12_signature verifies the TLS 1.2 signature. fn verify_tls12_signature( &self, message: &[u8], @@ -66,7 +66,7 @@ impl rustls::client::danger::ServerCertVerifier for NoVerifier { ) } - // verify_tls13_signature verifies the TLS 1.3 signature. + /// verify_tls13_signature verifies the TLS 1.3 signature. fn verify_tls13_signature( &self, message: &[u8], @@ -81,15 +81,15 @@ impl rustls::client::danger::ServerCertVerifier for NoVerifier { ) } - // supported_verify_schemes returns the supported signature schemes. + /// supported_verify_schemes returns the supported signature schemes. fn supported_verify_schemes(&self) -> Vec { self.0.signature_verification_algorithms.supported_schemes() } } -// Generate a CA certificate from PEM format files. -// Generate CA by openssl with PEM format files: -// openssl req -x509 -sha256 -days 36500 -nodes -newkey rsa:4096 -keyout ca.key -out ca.crt +/// Generate a CA certificate from PEM format files. +/// Generate CA by openssl with PEM format files: +/// openssl req -x509 -sha256 -days 36500 -nodes -newkey rsa:4096 -keyout ca.key -out ca.crt #[instrument(skip_all)] pub fn generate_ca_cert_from_pem( ca_cert_path: &PathBuf, @@ -110,7 +110,7 @@ pub fn generate_ca_cert_from_pem( Ok(ca_cert) } -// Generate certificates from PEM format files. +/// Generate certificates from PEM format files. #[instrument(skip_all)] pub fn generate_certs_from_pem(cert_path: &PathBuf) -> ClientResult>> { let f = fs::File::open(cert_path)?; @@ -119,8 +119,8 @@ pub fn generate_certs_from_pem(cert_path: &PathBuf) -> ClientResult>, @@ -162,7 +162,7 @@ pub fn generate_simple_self_signed_certs( Ok((certs, key)) } -// certs_to_raw_certs converts DER format of the certificates to raw certificates. +/// certs_to_raw_certs converts DER format of the certificates to raw certificates. #[instrument(skip_all)] pub fn certs_to_raw_certs(certs: Vec>) -> Vec> { certs @@ -171,7 +171,7 @@ pub fn certs_to_raw_certs(certs: Vec>) -> Vec> { .collect() } -// raw_certs_to_certs converts raw certificates to DER format of certificates. +/// raw_certs_to_certs converts raw certificates to DER format of certificates. #[instrument(skip_all)] pub fn raw_certs_to_certs(raw_certs: Vec>) -> Vec> { raw_certs.into_iter().map(|cert| cert.into()).collect() diff --git a/dragonfly-client/build.rs b/dragonfly-client/build.rs index 8fb5b200..509f4801 100644 --- a/dragonfly-client/build.rs +++ b/dragonfly-client/build.rs @@ -18,7 +18,7 @@ use std::env; use std::process::Command; use std::time::{SystemTime, UNIX_EPOCH}; -// git_commit_hash returns the short hash of the current git commit. +/// git_commit_hash returns the short hash of the current git commit. fn git_commit_hash() -> String { if let Ok(output) = Command::new("git") .args(["rev-parse", "--short", "HEAD"]) diff --git a/dragonfly-client/src/announcer/mod.rs b/dragonfly-client/src/announcer/mod.rs index 712cae2e..d4acbd59 100644 --- a/dragonfly-client/src/announcer/mod.rs +++ b/dragonfly-client/src/announcer/mod.rs @@ -31,24 +31,24 @@ use sysinfo::System; use tokio::sync::mpsc; use tracing::{error, info, instrument}; -// ManagerAnnouncer is used to announce the dfdaemon information to the manager. +/// ManagerAnnouncer is used to announce the dfdaemon information to the manager. pub struct ManagerAnnouncer { - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // manager_client is the grpc client of the manager. + /// manager_client is the grpc client of the manager. manager_client: Arc, - // shutdown is used to shutdown the announcer. + /// shutdown is used to shutdown the announcer. shutdown: shutdown::Shutdown, - // _shutdown_complete is used to notify the announcer is shutdown. + /// _shutdown_complete is used to notify the announcer is shutdown. _shutdown_complete: mpsc::UnboundedSender<()>, } -// ManagerAnnouncer implements the manager announcer of the dfdaemon. +/// ManagerAnnouncer implements the manager announcer of the dfdaemon. impl ManagerAnnouncer { - // new creates a new manager announcer. + /// new creates a new manager announcer. #[instrument(skip_all)] pub fn new( config: Arc, @@ -64,7 +64,7 @@ impl ManagerAnnouncer { } } - // run announces the dfdaemon information to the manager. + /// run announces the dfdaemon information to the manager. #[instrument(skip_all)] pub async fn run(&self) -> Result<()> { // Clone the shutdown channel. @@ -110,27 +110,27 @@ impl ManagerAnnouncer { } } -// Announcer is used to announce the dfdaemon information to the manager and scheduler. +/// Announcer is used to announce the dfdaemon information to the manager and scheduler. pub struct SchedulerAnnouncer { - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // host_id is the id of the host. + /// host_id is the id of the host. host_id: String, - // scheduler_client is the grpc client of the scheduler. + /// scheduler_client is the grpc client of the scheduler. scheduler_client: Arc, - // shutdown is used to shutdown the announcer. + /// shutdown is used to shutdown the announcer. shutdown: shutdown::Shutdown, - // _shutdown_complete is used to notify the announcer is shutdown. + /// _shutdown_complete is used to notify the announcer is shutdown. _shutdown_complete: mpsc::UnboundedSender<()>, } -// SchedulerAnnouncer implements the scheduler announcer of the dfdaemon. +/// SchedulerAnnouncer implements the scheduler announcer of the dfdaemon. impl SchedulerAnnouncer { - // new creates a new scheduler announcer. + /// new creates a new scheduler announcer. #[instrument(skip_all)] pub async fn new( config: Arc, @@ -155,7 +155,7 @@ impl SchedulerAnnouncer { Ok(announcer) } - // run announces the dfdaemon information to the scheduler. + /// run announces the dfdaemon information to the scheduler. #[instrument(skip_all)] pub async fn run(&self) { // Clone the shutdown channel. @@ -193,7 +193,7 @@ impl SchedulerAnnouncer { } } - // make_announce_host_request makes the announce host request. + /// make_announce_host_request makes the announce host request. #[instrument(skip_all)] fn make_announce_host_request(&self) -> Result { // If the seed peer is enabled, we should announce the seed peer to the scheduler. diff --git a/dragonfly-client/src/bin/dfcache/export.rs b/dragonfly-client/src/bin/dfcache/export.rs index 10fbf9e9..6738a5c2 100644 --- a/dragonfly-client/src/bin/dfcache/export.rs +++ b/dragonfly-client/src/bin/dfcache/export.rs @@ -31,7 +31,7 @@ use tracing::{error, info}; use super::*; -// ExportCommand is the subcommand of export. +/// ExportCommand is the subcommand of export. #[derive(Debug, Clone, Parser)] pub struct ExportCommand { #[arg(help = "Specify the cache task ID to export")] @@ -67,9 +67,9 @@ pub struct ExportCommand { timeout: Duration, } -// Implement the execute for ExportCommand. +/// Implement the execute for ExportCommand. impl ExportCommand { - // execute executes the export command. + /// execute executes the export command. pub async fn execute(&self, endpoint: &Path) -> Result<()> { // Validate the command line arguments. if let Err(err) = self.validate_args() { @@ -358,7 +358,7 @@ impl ExportCommand { Ok(()) } - // run runs the export command. + /// run runs the export command. async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> { // Get the absolute path of the output file. let absolute_path = Path::new(&self.output).absolutize()?; @@ -428,7 +428,7 @@ impl ExportCommand { Ok(()) } - // validate_args validates the command line arguments. + /// validate_args validates the command line arguments. fn validate_args(&self) -> Result<()> { let absolute_path = Path::new(&self.output).absolutize()?; match absolute_path.parent() { diff --git a/dragonfly-client/src/bin/dfcache/import.rs b/dragonfly-client/src/bin/dfcache/import.rs index 5d2a75f2..d07d6866 100644 --- a/dragonfly-client/src/bin/dfcache/import.rs +++ b/dragonfly-client/src/bin/dfcache/import.rs @@ -28,10 +28,10 @@ use termion::{color, style}; use super::*; -// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar. +/// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar. const DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL: Duration = Duration::from_millis(80); -// ImportCommand is the subcommand of import. +/// ImportCommand is the subcommand of import. #[derive(Debug, Clone, Parser)] pub struct ImportCommand { #[arg(help = "Specify the path of the file to import")] @@ -75,9 +75,9 @@ pub struct ImportCommand { timeout: Duration, } -// Implement the execute for ImportCommand. +/// Implement the execute for ImportCommand. impl ImportCommand { - // execute executes the import sub command. + /// execute executes the import sub command. pub async fn execute(&self, endpoint: &Path) -> Result<()> { // Validate the command line arguments. if let Err(err) = self.validate_args() { @@ -257,7 +257,7 @@ impl ImportCommand { Ok(()) } - // run runs the import sub command. + /// run runs the import sub command. async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> { let pb = ProgressBar::new_spinner(); pb.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL); @@ -288,7 +288,7 @@ impl ImportCommand { Ok(()) } - // validate_args validates the command line arguments. + /// validate_args validates the command line arguments. fn validate_args(&self) -> Result<()> { if self.path.is_dir() { return Err(Error::ValidationError(format!( diff --git a/dragonfly-client/src/bin/dfcache/main.rs b/dragonfly-client/src/bin/dfcache/main.rs index 33ba45aa..ceab0add 100644 --- a/dragonfly-client/src/bin/dfcache/main.rs +++ b/dragonfly-client/src/bin/dfcache/main.rs @@ -119,7 +119,7 @@ pub enum Command { Remove(remove::RemoveCommand), } -// Implement the execute for Command. +/// Implement the execute for Command. impl Command { #[allow(unused)] pub async fn execute(self, endpoint: &Path) -> Result<()> { @@ -154,7 +154,7 @@ async fn main() -> anyhow::Result<()> { Ok(()) } -// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health. +/// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health. pub async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result { // Check dfdaemon's health. let health_client = HealthClient::new_unix(endpoint.clone()).await?; diff --git a/dragonfly-client/src/bin/dfcache/remove.rs b/dragonfly-client/src/bin/dfcache/remove.rs index b2f38f50..b4d7af84 100644 --- a/dragonfly-client/src/bin/dfcache/remove.rs +++ b/dragonfly-client/src/bin/dfcache/remove.rs @@ -24,19 +24,19 @@ use termion::{color, style}; use super::*; -// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar. +/// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar. const DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL: Duration = Duration::from_millis(80); -// RemoveCommand is the subcommand of remove. +/// RemoveCommand is the subcommand of remove. #[derive(Debug, Clone, Parser)] pub struct RemoveCommand { #[arg(help = "Specify the cache task ID to remove")] id: String, } -// Implement the execute for RemoveCommand. +/// Implement the execute for RemoveCommand. impl RemoveCommand { - // execute executes the delete command. + /// execute executes the delete command. pub async fn execute(&self, endpoint: &Path) -> Result<()> { // Get dfdaemon download client. let dfdaemon_download_client = @@ -178,7 +178,7 @@ impl RemoveCommand { Ok(()) } - // run runs the delete command. + /// run runs the delete command. async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> { let pb = ProgressBar::new_spinner(); pb.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL); diff --git a/dragonfly-client/src/bin/dfcache/stat.rs b/dragonfly-client/src/bin/dfcache/stat.rs index 6dcc1395..0624fb7f 100644 --- a/dragonfly-client/src/bin/dfcache/stat.rs +++ b/dragonfly-client/src/bin/dfcache/stat.rs @@ -32,16 +32,16 @@ use termion::{color, style}; use super::*; -// StatCommand is the subcommand of stat. +/// StatCommand is the subcommand of stat. #[derive(Debug, Clone, Parser)] pub struct StatCommand { #[arg(help = "Specify the cache task ID to stat")] id: String, } -// Implement the execute for StatCommand. +/// Implement the execute for StatCommand. impl StatCommand { - // execute executes the stat command. + /// execute executes the stat command. pub async fn execute(&self, endpoint: &Path) -> Result<()> { // Get dfdaemon download client. let dfdaemon_download_client = @@ -183,7 +183,7 @@ impl StatCommand { Ok(()) } - // run runs the stat command. + /// run runs the stat command. async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> { let task = dfdaemon_download_client .stat_cache_task(StatCacheTaskRequest { diff --git a/dragonfly-client/src/bin/dfget/main.rs b/dragonfly-client/src/bin/dfget/main.rs index 3c846589..2b441aa4 100644 --- a/dragonfly-client/src/bin/dfget/main.rs +++ b/dragonfly-client/src/bin/dfget/main.rs @@ -546,7 +546,7 @@ async fn main() -> anyhow::Result<()> { Ok(()) } -// run runs the dfget command. +/// run runs the dfget command. async fn run(mut args: Args, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> { // Get the absolute path of the output file. args.output = Path::new(&args.output).absolutize()?.into(); @@ -567,7 +567,7 @@ async fn run(mut args: Args, dfdaemon_download_client: DfdaemonDownloadClient) - download(args, ProgressBar::new(0), dfdaemon_download_client).await } -// download_dir downloads all files in the directory. +/// download_dir downloads all files in the directory. async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Result<()> { // Initalize the object storage. let object_storage = Some(ObjectStorage { @@ -657,7 +657,7 @@ async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Re Ok(()) } -// download downloads the single file. +/// download downloads the single file. async fn download( args: Args, progress_bar: ProgressBar, @@ -759,7 +759,7 @@ async fn download( Ok(()) } -// get_entries gets all entries in the directory. +/// get_entries gets all entries in the directory. async fn get_entries(args: Args, object_storage: Option) -> Result> { // Initialize backend factory and build backend. let backend_factory = BackendFactory::new(None)?; @@ -818,7 +818,7 @@ async fn get_entries(args: Args, object_storage: Option) -> Resul Ok(response.entries) } -// make_output_by_entry makes the output path by the entry information. +/// make_output_by_entry makes the output path by the entry information. fn make_output_by_entry(url: Url, output: &Path, entry: DirEntry) -> Result { // Get the root directory of the download directory and the output root directory. let root_dir = url.path().to_string(); @@ -836,7 +836,7 @@ fn make_output_by_entry(url: Url, output: &Path, entry: DirEntry) -> Result Result { // Check dfdaemon's health. let health_client = HealthClient::new_unix(endpoint.clone()).await?; @@ -847,7 +847,7 @@ async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result Result<()> { // If the URL is a directory, the output path should be a directory. if args.url.path().ends_with('/') && !args.output.is_dir() { diff --git a/dragonfly-client/src/bin/dfstore/main.rs b/dragonfly-client/src/bin/dfstore/main.rs index 0d55262f..09c10e98 100644 --- a/dragonfly-client/src/bin/dfstore/main.rs +++ b/dragonfly-client/src/bin/dfstore/main.rs @@ -95,11 +95,11 @@ pub enum Command { Remove(RemoveCommand), } -// Download or upload files using object storage in Dragonfly. +/// Download or upload files using object storage in Dragonfly. #[derive(Debug, Clone, Parser)] pub struct CopyCommand {} -// Remove a file from Dragonfly object storage. +/// Remove a file from Dragonfly object storage. #[derive(Debug, Clone, Parser)] pub struct RemoveCommand {} diff --git a/dragonfly-client/src/dynconfig/mod.rs b/dragonfly-client/src/dynconfig/mod.rs index 7a305dce..8ba716b8 100644 --- a/dragonfly-client/src/dynconfig/mod.rs +++ b/dragonfly-client/src/dynconfig/mod.rs @@ -27,43 +27,43 @@ use tokio::sync::{mpsc, Mutex, RwLock}; use tonic_health::pb::health_check_response::ServingStatus; use tracing::{error, info, instrument}; -// Data is the dynamic configuration of the dfdaemon. +/// Data is the dynamic configuration of the dfdaemon. #[derive(Default)] pub struct Data { - // schedulers is the schedulers of the dfdaemon. + /// schedulers is the schedulers of the dfdaemon. pub schedulers: ListSchedulersResponse, - // available_schedulers is the available schedulers of the dfdaemon. + /// available_schedulers is the available schedulers of the dfdaemon. pub available_schedulers: Vec, - // available_scheduler_cluster_id is the id of the available scheduler cluster of the dfdaemon. + /// available_scheduler_cluster_id is the id of the available scheduler cluster of the dfdaemon. pub available_scheduler_cluster_id: Option, } -// Dynconfig supports dynamic configuration of the client. +/// Dynconfig supports dynamic configuration of the client. pub struct Dynconfig { - // data is the dynamic configuration of the dfdaemon. + /// data is the dynamic configuration of the dfdaemon. pub data: RwLock, - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // manager_client is the grpc client of the manager. + /// manager_client is the grpc client of the manager. manager_client: Arc, - // mutex is used to protect refresh. + /// mutex is used to protect refresh. mutex: Mutex<()>, - // shutdown is used to shutdown the dynconfig. + /// shutdown is used to shutdown the dynconfig. shutdown: shutdown::Shutdown, - // _shutdown_complete is used to notify the dynconfig is shutdown. + /// _shutdown_complete is used to notify the dynconfig is shutdown. _shutdown_complete: mpsc::UnboundedSender<()>, } -// Dynconfig is the implementation of Dynconfig. +/// Dynconfig is the implementation of Dynconfig. impl Dynconfig { - // new creates a new Dynconfig. + /// new creates a new Dynconfig. #[instrument(skip_all)] pub async fn new( config: Arc, @@ -86,7 +86,7 @@ impl Dynconfig { Ok(dc) } - // run starts the dynconfig server. + /// run starts the dynconfig server. #[instrument(skip_all)] pub async fn run(&self) { // Clone the shutdown channel. @@ -110,7 +110,7 @@ impl Dynconfig { } } - // refresh refreshes the dynamic configuration of the dfdaemon. + /// refresh refreshes the dynamic configuration of the dfdaemon. #[instrument(skip_all)] pub async fn refresh(&self) -> Result<()> { // Only one refresh can be running at a time. @@ -142,7 +142,7 @@ impl Dynconfig { Ok(()) } - // list_schedulers lists the schedulers from the manager. + /// list_schedulers lists the schedulers from the manager. #[instrument(skip_all)] async fn list_schedulers(&self) -> Result { // Get the source type. @@ -166,7 +166,7 @@ impl Dynconfig { .await } - // get_available_schedulers gets the available schedulers. + /// get_available_schedulers gets the available schedulers. #[instrument(skip_all)] async fn get_available_schedulers(&self, schedulers: &[Scheduler]) -> Result> { let mut available_schedulers: Vec = Vec::new(); diff --git a/dragonfly-client/src/gc/mod.rs b/dragonfly-client/src/gc/mod.rs index 18b0f251..48d3cb86 100644 --- a/dragonfly-client/src/gc/mod.rs +++ b/dragonfly-client/src/gc/mod.rs @@ -24,29 +24,29 @@ use std::sync::Arc; use tokio::sync::mpsc; use tracing::{error, info, instrument}; -// GC is the garbage collector of dfdaemon. +/// GC is the garbage collector of dfdaemon. pub struct GC { - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // host_id is the id of the host. + /// host_id is the id of the host. host_id: String, - // storage is the local storage. + /// storage is the local storage. storage: Arc, - // scheduler_client is the grpc client of the scheduler. + /// scheduler_client is the grpc client of the scheduler. scheduler_client: Arc, - // shutdown is used to shutdown the garbage collector. + /// shutdown is used to shutdown the garbage collector. shutdown: shutdown::Shutdown, - // _shutdown_complete is used to notify the garbage collector is shutdown. + /// _shutdown_complete is used to notify the garbage collector is shutdown. _shutdown_complete: mpsc::UnboundedSender<()>, } impl GC { - // new creates a new GC. + /// new creates a new GC. #[instrument(skip_all)] pub fn new( config: Arc, @@ -66,7 +66,7 @@ impl GC { } } - // run runs the garbage collector. + /// run runs the garbage collector. #[instrument(skip_all)] pub async fn run(&self) { // Clone the shutdown channel. @@ -106,7 +106,7 @@ impl GC { } } - // evict_task_by_ttl evicts the task by ttl. + /// evict_task_by_ttl evicts the task by ttl. #[instrument(skip_all)] async fn evict_task_by_ttl(&self) -> Result<()> { info!("start to evict by task ttl"); @@ -124,7 +124,7 @@ impl GC { Ok(()) } - // evict_task_by_disk_usage evicts the task by disk usage. + /// evict_task_by_disk_usage evicts the task by disk usage. #[instrument(skip_all)] async fn evict_task_by_disk_usage(&self) -> Result<()> { let stats = fs2::statvfs(self.config.storage.dir.as_path())?; @@ -153,7 +153,7 @@ impl GC { Ok(()) } - // evict_task_space evicts the task by the given space. + /// evict_task_space evicts the task by the given space. #[instrument(skip_all)] async fn evict_task_space(&self, need_evict_space: u64) -> Result<()> { let mut tasks = self.storage.get_tasks()?; @@ -190,7 +190,7 @@ impl GC { Ok(()) } - // delete_task_from_scheduler deletes the task from the scheduler. + /// delete_task_from_scheduler deletes the task from the scheduler. #[instrument(skip_all)] async fn delete_task_from_scheduler(&self, task: metadata::Task) { self.scheduler_client @@ -204,7 +204,7 @@ impl GC { }); } - // evict_cache_task_by_ttl evicts the cache task by ttl. + /// evict_cache_task_by_ttl evicts the cache task by ttl. #[instrument(skip_all)] async fn evict_cache_task_by_ttl(&self) -> Result<()> { info!("start to evict by cache task ttl * 2"); @@ -222,7 +222,7 @@ impl GC { Ok(()) } - // evict_cache_task_by_disk_usage evicts the cache task by disk usage. + /// evict_cache_task_by_disk_usage evicts the cache task by disk usage. #[instrument(skip_all)] async fn evict_cache_task_by_disk_usage(&self) -> Result<()> { let stats = fs2::statvfs(self.config.storage.dir.as_path())?; @@ -251,7 +251,7 @@ impl GC { Ok(()) } - // evict_cache_task_space evicts the cache task by the given space. + /// evict_cache_task_space evicts the cache task by the given space. #[instrument(skip_all)] async fn evict_cache_task_space(&self, need_evict_space: u64) -> Result<()> { let mut tasks = self.storage.get_cache_tasks()?; @@ -286,7 +286,7 @@ impl GC { Ok(()) } - // delete_cache_task_from_scheduler deletes the cache task from the scheduler. + /// delete_cache_task_from_scheduler deletes the cache task from the scheduler. #[instrument(skip_all)] async fn delete_cache_task_from_scheduler(&self, task: metadata::CacheTask) { self.scheduler_client diff --git a/dragonfly-client/src/grpc/dfdaemon_download.rs b/dragonfly-client/src/grpc/dfdaemon_download.rs index b0d17bfe..88616afb 100644 --- a/dragonfly-client/src/grpc/dfdaemon_download.rs +++ b/dragonfly-client/src/grpc/dfdaemon_download.rs @@ -60,24 +60,24 @@ use tonic::{ use tower::service_fn; use tracing::{error, info, instrument, Instrument, Span}; -// DfdaemonDownloadServer is the grpc unix server of the download. +/// DfdaemonDownloadServer is the grpc unix server of the download. pub struct DfdaemonDownloadServer { - // socket_path is the path of the unix domain socket. + /// socket_path is the path of the unix domain socket. socket_path: PathBuf, - // service is the grpc service of the dfdaemon. + /// service is the grpc service of the dfdaemon. service: DfdaemonDownloadGRPCServer, - // shutdown is used to shutdown the grpc server. + /// shutdown is used to shutdown the grpc server. shutdown: shutdown::Shutdown, - // _shutdown_complete is used to notify the grpc server is shutdown. + /// _shutdown_complete is used to notify the grpc server is shutdown. _shutdown_complete: mpsc::UnboundedSender<()>, } -// DfdaemonDownloadServer implements the grpc server of the download. +/// DfdaemonDownloadServer implements the grpc server of the download. impl DfdaemonDownloadServer { - // new creates a new DfdaemonServer. + /// new creates a new DfdaemonServer. #[instrument(skip_all)] pub fn new( socket_path: PathBuf, @@ -105,7 +105,7 @@ impl DfdaemonDownloadServer { } } - // run starts the download server with unix domain socket. + /// run starts the download server with unix domain socket. #[instrument(skip_all)] pub async fn run(&mut self) { // Register the reflection service. @@ -156,25 +156,25 @@ impl DfdaemonDownloadServer { } } -// DfdaemonDownloadServerHandler is the handler of the dfdaemon download grpc service. +/// DfdaemonDownloadServerHandler is the handler of the dfdaemon download grpc service. pub struct DfdaemonDownloadServerHandler { - // socket_path is the path of the unix domain socket. + /// socket_path is the path of the unix domain socket. socket_path: PathBuf, - // task is the task manager. + /// task is the task manager. task: Arc, - // cache_task is the cache task manager. + /// cache_task is the cache task manager. cache_task: Arc, } -// DfdaemonDownloadServerHandler implements the dfdaemon download grpc service. +/// DfdaemonDownloadServerHandler implements the dfdaemon download grpc service. #[tonic::async_trait] impl DfdaemonDownload for DfdaemonDownloadServerHandler { - // DownloadTaskStream is the stream of the download task response. + /// DownloadTaskStream is the stream of the download task response. type DownloadTaskStream = ReceiverStream>; - // download_task tells the dfdaemon to download the task. + /// download_task tells the dfdaemon to download the task. #[instrument(skip_all, fields(host_id, task_id, peer_id))] async fn download_task( &self, @@ -544,7 +544,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler { Ok(Response::new(ReceiverStream::new(out_stream_rx))) } - // stat_task gets the status of the task. + /// stat_task gets the status of the task. #[instrument(skip_all, fields(host_id, task_id))] async fn stat_task( &self, @@ -582,7 +582,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler { Ok(Response::new(task)) } - // delete_task calls the dfdaemon to delete the task. + /// delete_task calls the dfdaemon to delete the task. #[instrument(skip_all, fields(host_id, task_id))] async fn delete_task( &self, @@ -619,7 +619,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler { Ok(Response::new(())) } - // delete_host calls the scheduler to delete the host. + /// delete_host calls the scheduler to delete the host. #[instrument(skip_all, fields(host_id))] async fn delete_host(&self, _: Request<()>) -> Result, Status> { // Generate the host id. @@ -646,10 +646,10 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler { Ok(Response::new(())) } - // DownloadCacheTaskStream is the stream of the download cache task response. + /// DownloadCacheTaskStream is the stream of the download cache task response. type DownloadCacheTaskStream = ReceiverStream>; - // download_cache_task downloads the cache task. + /// download_cache_task downloads the cache task. #[instrument(skip_all, fields(host_id, task_id, peer_id))] async fn download_cache_task( &self, @@ -818,7 +818,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler { Ok(Response::new(ReceiverStream::new(out_stream_rx))) } - // upload_cache_task uploads the cache task. + /// upload_cache_task uploads the cache task. #[instrument(skip_all, fields(host_id, task_id, peer_id))] async fn upload_cache_task( &self, @@ -912,7 +912,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler { Ok(Response::new(task)) } - // stat_cache_task stats the cache task. + /// stat_cache_task stats the cache task. #[instrument(skip_all, fields(host_id, task_id))] async fn stat_cache_task( &self, @@ -949,7 +949,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler { Ok(Response::new(task)) } - // delete_cache_task deletes the cache task. + /// delete_cache_task deletes the cache task. #[instrument(skip_all, fields(host_id, task_id))] async fn delete_cache_task( &self, @@ -986,16 +986,16 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler { } } -// DfdaemonDownloadClient is a wrapper of DfdaemonDownloadGRPCClient. +/// DfdaemonDownloadClient is a wrapper of DfdaemonDownloadGRPCClient. #[derive(Clone)] pub struct DfdaemonDownloadClient { - // client is the grpc client of the dfdaemon. + /// client is the grpc client of the dfdaemon. pub client: DfdaemonDownloadGRPCClient, } -// DfdaemonDownloadClient implements the grpc client of the dfdaemon download. +/// DfdaemonDownloadClient implements the grpc client of the dfdaemon download. impl DfdaemonDownloadClient { - // new_unix creates a new DfdaemonDownloadClient with unix domain socket. + /// new_unix creates a new DfdaemonDownloadClient with unix domain socket. #[instrument(skip_all)] pub async fn new_unix(socket_path: PathBuf) -> ClientResult { // Ignore the uri because it is not used. @@ -1024,7 +1024,7 @@ impl DfdaemonDownloadClient { Ok(Self { client }) } - // download_task tells the dfdaemon to download the task. + /// download_task tells the dfdaemon to download the task. #[instrument(skip_all)] pub async fn download_task( &self, @@ -1050,7 +1050,7 @@ impl DfdaemonDownloadClient { Ok(response) } - // stat_task gets the status of the task. + /// stat_task gets the status of the task. #[instrument(skip_all)] pub async fn stat_task(&self, request: DfdaemonStatTaskRequest) -> ClientResult { let request = Self::make_request(request); @@ -1058,7 +1058,7 @@ impl DfdaemonDownloadClient { Ok(response.into_inner()) } - // delete_task tells the dfdaemon to delete the task. + /// delete_task tells the dfdaemon to delete the task. #[instrument(skip_all)] pub async fn delete_task(&self, request: DeleteTaskRequest) -> ClientResult<()> { let request = Self::make_request(request); @@ -1066,7 +1066,7 @@ impl DfdaemonDownloadClient { Ok(()) } - // download_cache_task downloads the cache task. + /// download_cache_task downloads the cache task. #[instrument(skip_all)] pub async fn download_cache_task( &self, @@ -1090,7 +1090,7 @@ impl DfdaemonDownloadClient { Ok(response) } - // upload_cache_task uploads the cache task. + /// upload_cache_task uploads the cache task. #[instrument(skip_all)] pub async fn upload_cache_task( &self, @@ -1114,7 +1114,7 @@ impl DfdaemonDownloadClient { Ok(response.into_inner()) } - // stat_cache_task stats the cache task. + /// stat_cache_task stats the cache task. #[instrument(skip_all)] pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> ClientResult { let mut request = tonic::Request::new(request); @@ -1124,7 +1124,7 @@ impl DfdaemonDownloadClient { Ok(response.into_inner()) } - // delete_cache_task deletes the cache task. + /// delete_cache_task deletes the cache task. #[instrument(skip_all)] pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> ClientResult<()> { let request = Self::make_request(request); @@ -1132,7 +1132,7 @@ impl DfdaemonDownloadClient { Ok(()) } - // make_request creates a new request with timeout. + /// make_request creates a new request with timeout. #[instrument(skip_all)] fn make_request(request: T) -> tonic::Request { let mut request = tonic::Request::new(request); diff --git a/dragonfly-client/src/grpc/dfdaemon_upload.rs b/dragonfly-client/src/grpc/dfdaemon_upload.rs index a1e28d1c..5618d7f1 100644 --- a/dragonfly-client/src/grpc/dfdaemon_upload.rs +++ b/dragonfly-client/src/grpc/dfdaemon_upload.rs @@ -54,24 +54,24 @@ use tonic::{ }; use tracing::{error, info, instrument, Instrument, Span}; -// DfdaemonUploadServer is the grpc server of the upload. +/// DfdaemonUploadServer is the grpc server of the upload. pub struct DfdaemonUploadServer { - // addr is the address of the grpc server. + /// addr is the address of the grpc server. addr: SocketAddr, - // service is the grpc service of the dfdaemon upload. + /// service is the grpc service of the dfdaemon upload. service: DfdaemonUploadGRPCServer, - // shutdown is used to shutdown the grpc server. + /// shutdown is used to shutdown the grpc server. shutdown: shutdown::Shutdown, - // _shutdown_complete is used to notify the grpc server is shutdown. + /// _shutdown_complete is used to notify the grpc server is shutdown. _shutdown_complete: mpsc::UnboundedSender<()>, } -// DfdaemonUploadServer implements the grpc server of the upload. +/// DfdaemonUploadServer implements the grpc server of the upload. impl DfdaemonUploadServer { - // new creates a new DfdaemonUploadServer. + /// new creates a new DfdaemonUploadServer. #[instrument(skip_all)] pub fn new( config: Arc, @@ -100,7 +100,7 @@ impl DfdaemonUploadServer { } } - // run starts the upload server. + /// run starts the upload server. #[instrument(skip_all)] pub async fn run(&mut self) { // Register the reflection service. @@ -139,25 +139,25 @@ impl DfdaemonUploadServer { } } -// DfdaemonUploadServerHandler is the handler of the dfdaemon upload grpc service. +/// DfdaemonUploadServerHandler is the handler of the dfdaemon upload grpc service. pub struct DfdaemonUploadServerHandler { - // socket_path is the path of the unix domain socket. + /// socket_path is the path of the unix domain socket. socket_path: PathBuf, - // task is the task manager. + /// task is the task manager. task: Arc, - // cache_task is the cache task manager. + /// cache_task is the cache task manager. cache_task: Arc, } -// DfdaemonUploadServerHandler implements the dfdaemon upload grpc service. +/// DfdaemonUploadServerHandler implements the dfdaemon upload grpc service. #[tonic::async_trait] impl DfdaemonUpload for DfdaemonUploadServerHandler { - // DownloadTaskStream is the stream of the download task response. + /// DownloadTaskStream is the stream of the download task response. type DownloadTaskStream = ReceiverStream>; - // download_task downloads the task. + /// download_task downloads the task. #[instrument(skip_all, fields(host_id, task_id, peer_id))] async fn download_task( &self, @@ -530,7 +530,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler { Ok(Response::new(ReceiverStream::new(out_stream_rx))) } - // stat_task stats the task. + /// stat_task stats the task. #[instrument(skip_all, fields(host_id, task_id))] async fn stat_task(&self, request: Request) -> Result, Status> { // Clone the request. @@ -565,7 +565,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler { Ok(Response::new(task)) } - // delete_task deletes the task. + /// delete_task deletes the task. #[instrument(skip_all, fields(host_id, task_id))] async fn delete_task( &self, @@ -602,10 +602,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler { Ok(Response::new(())) } - // SyncPiecesStream is the stream of the sync pieces response. + /// SyncPiecesStream is the stream of the sync pieces response. type SyncPiecesStream = ReceiverStream>; - // sync_pieces provides the piece metadata for remote peer. + /// sync_pieces provides the piece metadata for remote peer. #[instrument(skip_all, fields(host_id, remote_host_id, task_id))] async fn sync_pieces( &self, @@ -734,7 +734,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler { Ok(Response::new(ReceiverStream::new(out_stream_rx))) } - // download_piece provides the piece content for remote peer. + /// download_piece provides the piece content for remote peer. #[instrument(skip_all, fields(host_id, remote_host_id, task_id, piece_id))] async fn download_piece( &self, @@ -829,10 +829,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler { })) } - // DownloadCacheTaskStream is the stream of the download cache task response. + /// DownloadCacheTaskStream is the stream of the download cache task response. type DownloadCacheTaskStream = ReceiverStream>; - // download_cache_task downloads the cache task. + /// download_cache_task downloads the cache task. #[instrument(skip_all, fields(host_id, task_id, peer_id))] async fn download_cache_task( &self, @@ -1001,7 +1001,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler { Ok(Response::new(ReceiverStream::new(out_stream_rx))) } - // stat_cache_task stats the cache task. + /// stat_cache_task stats the cache task. #[instrument(skip_all, fields(host_id, task_id))] async fn stat_cache_task( &self, @@ -1038,7 +1038,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler { Ok(Response::new(task)) } - // delete_cache_task deletes the cache task. + /// delete_cache_task deletes the cache task. #[instrument(skip_all, fields(host_id, task_id))] async fn delete_cache_task( &self, @@ -1075,16 +1075,16 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler { } } -// DfdaemonUploadClient is a wrapper of DfdaemonUploadGRPCClient. +/// DfdaemonUploadClient is a wrapper of DfdaemonUploadGRPCClient. #[derive(Clone)] pub struct DfdaemonUploadClient { - // client is the grpc client of the dfdaemon upload. + /// client is the grpc client of the dfdaemon upload. pub client: DfdaemonUploadGRPCClient, } -// DfdaemonUploadClient implements the dfdaemon upload grpc client. +/// DfdaemonUploadClient implements the dfdaemon upload grpc client. impl DfdaemonUploadClient { - // new creates a new DfdaemonUploadClient. + /// new creates a new DfdaemonUploadClient. #[instrument(skip_all)] pub async fn new(addr: String) -> ClientResult { let channel = Channel::from_static(Box::leak(addr.clone().into_boxed_str())) @@ -1106,7 +1106,7 @@ impl DfdaemonUploadClient { Ok(Self { client }) } - // download_task downloads the task. + /// download_task downloads the task. #[instrument(skip_all)] pub async fn download_task( &self, @@ -1132,7 +1132,7 @@ impl DfdaemonUploadClient { Ok(response) } - // sync_pieces provides the piece metadata for remote peer. + /// sync_pieces provides the piece metadata for remote peer. #[instrument(skip_all)] pub async fn sync_pieces( &self, @@ -1143,7 +1143,7 @@ impl DfdaemonUploadClient { Ok(response) } - // download_piece provides the piece content for remote peer. + /// download_piece provides the piece content for remote peer. #[instrument(skip_all)] pub async fn download_piece( &self, @@ -1157,7 +1157,7 @@ impl DfdaemonUploadClient { Ok(response.into_inner()) } - // download_cache_task downloads the cache task. + /// download_cache_task downloads the cache task. #[instrument(skip_all)] pub async fn download_cache_task( &self, @@ -1181,7 +1181,7 @@ impl DfdaemonUploadClient { Ok(response) } - // stat_cache_task stats the cache task. + /// stat_cache_task stats the cache task. #[instrument(skip_all)] pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> ClientResult { let request = Self::make_request(request); @@ -1189,7 +1189,7 @@ impl DfdaemonUploadClient { Ok(response.into_inner()) } - // delete_cache_task deletes the cache task. + /// delete_cache_task deletes the cache task. #[instrument(skip_all)] pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> ClientResult<()> { let request = Self::make_request(request); @@ -1197,7 +1197,7 @@ impl DfdaemonUploadClient { Ok(()) } - // make_request creates a new request with timeout. + /// make_request creates a new request with timeout. #[instrument(skip_all)] fn make_request(request: T) -> tonic::Request { let mut request = tonic::Request::new(request); diff --git a/dragonfly-client/src/grpc/health.rs b/dragonfly-client/src/grpc/health.rs index c90a11d2..07676cad 100644 --- a/dragonfly-client/src/grpc/health.rs +++ b/dragonfly-client/src/grpc/health.rs @@ -28,16 +28,16 @@ use tonic_health::pb::{ use tower::service_fn; use tracing::{error, instrument}; -// HealthClient is a wrapper of HealthGRPCClient. +/// HealthClient is a wrapper of HealthGRPCClient. #[derive(Clone)] pub struct HealthClient { - // client is the grpc client of the certificate. + /// client is the grpc client of the certificate. client: HealthGRPCClient, } -// HealthClient implements the grpc client of the health. +/// HealthClient implements the grpc client of the health. impl HealthClient { - // new creates a new HealthClient. + /// new creates a new HealthClient. #[instrument(skip_all)] pub async fn new(addr: &str) -> Result { let channel = Channel::from_shared(addr.to_string()) @@ -60,7 +60,7 @@ impl HealthClient { Ok(Self { client }) } - // new_unix creates a new HealthClient with unix domain socket. + /// new_unix creates a new HealthClient with unix domain socket. #[instrument(skip_all)] pub async fn new_unix(socket_path: PathBuf) -> Result { // Ignore the uri because it is not used. @@ -86,7 +86,7 @@ impl HealthClient { Ok(Self { client }) } - // check checks the health of the grpc service without service name. + /// check checks the health of the grpc service without service name. #[instrument(skip_all)] pub async fn check(&self) -> Result { let request = Self::make_request(HealthCheckRequest { @@ -96,7 +96,7 @@ impl HealthClient { Ok(response.into_inner()) } - // check_service checks the health of the grpc service with service name. + /// check_service checks the health of the grpc service with service name. #[instrument(skip_all)] pub async fn check_service(&self, service: String) -> Result { let request = Self::make_request(HealthCheckRequest { service }); @@ -104,21 +104,21 @@ impl HealthClient { Ok(response.into_inner()) } - // check_dfdaemon_download checks the health of the dfdaemon download service. + /// check_dfdaemon_download checks the health of the dfdaemon download service. #[instrument(skip_all)] pub async fn check_dfdaemon_download(&self) -> Result { self.check_service("dfdaemon.v2.DfdaemonDownload".to_string()) .await } - // check_dfdaemon_upload checks the health of the dfdaemon upload service. + /// check_dfdaemon_upload checks the health of the dfdaemon upload service. #[instrument(skip_all)] pub async fn check_dfdaemon_upload(&self) -> Result { self.check_service("dfdaemon.v2.DfdaemonUpload".to_string()) .await } - // make_request creates a new request with timeout. + /// make_request creates a new request with timeout. #[instrument(skip_all)] fn make_request(request: T) -> tonic::Request { let mut request = tonic::Request::new(request); diff --git a/dragonfly-client/src/grpc/manager.rs b/dragonfly-client/src/grpc/manager.rs index 10a31847..2363be27 100644 --- a/dragonfly-client/src/grpc/manager.rs +++ b/dragonfly-client/src/grpc/manager.rs @@ -27,16 +27,16 @@ use tonic::transport::Channel; use tonic_health::pb::health_check_response::ServingStatus; use tracing::{error, info, instrument, warn}; -// ManagerClient is a wrapper of ManagerGRPCClient. +/// ManagerClient is a wrapper of ManagerGRPCClient. #[derive(Clone)] pub struct ManagerClient { - // client is the grpc client of the manager. + /// client is the grpc client of the manager. pub client: ManagerGRPCClient, } -// ManagerClient implements the grpc client of the manager. +/// ManagerClient implements the grpc client of the manager. impl ManagerClient { - // new creates a new ManagerClient. + /// new creates a new ManagerClient. #[instrument(skip_all)] pub async fn new(addrs: Vec) -> Result { // Find the available manager address. @@ -91,7 +91,7 @@ impl ManagerClient { Ok(Self { client }) } - // list_schedulers lists all schedulers that best match the client. + /// list_schedulers lists all schedulers that best match the client. #[instrument(skip_all)] pub async fn list_schedulers( &self, @@ -102,7 +102,7 @@ impl ManagerClient { Ok(response.into_inner()) } - // update_seed_peer updates the seed peer information. + /// update_seed_peer updates the seed peer information. #[instrument(skip_all)] pub async fn update_seed_peer(&self, request: UpdateSeedPeerRequest) -> Result { let request = Self::make_request(request); @@ -110,7 +110,7 @@ impl ManagerClient { Ok(response.into_inner()) } - // delete_seed_peer deletes the seed peer information. + /// delete_seed_peer deletes the seed peer information. #[instrument(skip_all)] pub async fn delete_seed_peer(&self, request: DeleteSeedPeerRequest) -> Result<()> { let request = Self::make_request(request); @@ -118,7 +118,7 @@ impl ManagerClient { Ok(()) } - // make_request creates a new request with timeout. + /// make_request creates a new request with timeout. #[instrument(skip_all)] fn make_request(request: T) -> tonic::Request { let mut request = tonic::Request::new(request); diff --git a/dragonfly-client/src/grpc/mod.rs b/dragonfly-client/src/grpc/mod.rs index 8330957d..22dc67c5 100644 --- a/dragonfly-client/src/grpc/mod.rs +++ b/dragonfly-client/src/grpc/mod.rs @@ -31,31 +31,31 @@ pub mod manager; pub mod scheduler; pub mod security; -// CONNECT_TIMEOUT is the timeout for GRPC connection. +/// CONNECT_TIMEOUT is the timeout for GRPC connection. pub const CONNECT_TIMEOUT: Duration = Duration::from_secs(2); -// REQUEST_TIMEOUT is the timeout for GRPC requests. +/// REQUEST_TIMEOUT is the timeout for GRPC requests. pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(10); -// TCP_KEEPALIVE is the keepalive duration for TCP connection. +/// TCP_KEEPALIVE is the keepalive duration for TCP connection. pub const TCP_KEEPALIVE: Duration = Duration::from_secs(3600); -// HTTP2_KEEP_ALIVE_INTERVAL is the interval for HTTP2 keep alive. +/// HTTP2_KEEP_ALIVE_INTERVAL is the interval for HTTP2 keep alive. pub const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(300); -// HTTP2_KEEP_ALIVE_TIMEOUT is the timeout for HTTP2 keep alive. +/// HTTP2_KEEP_ALIVE_TIMEOUT is the timeout for HTTP2 keep alive. pub const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(20); -// MAX_FRAME_SIZE is the max frame size for GRPC, default is 12MB. +/// MAX_FRAME_SIZE is the max frame size for GRPC, default is 12MB. pub const MAX_FRAME_SIZE: u32 = 12 * 1024 * 1024; -// INITIAL_WINDOW_SIZE is the initial window size for GRPC, default is 12MB. +/// INITIAL_WINDOW_SIZE is the initial window size for GRPC, default is 12MB. pub const INITIAL_WINDOW_SIZE: u32 = 12 * 1024 * 1024; -// BUFFER_SIZE is the buffer size for GRPC, default is 64KB. +/// BUFFER_SIZE is the buffer size for GRPC, default is 64KB. pub const BUFFER_SIZE: usize = 64 * 1024; -// prefetch_task prefetches the task if prefetch flag is true. +/// prefetch_task prefetches the task if prefetch flag is true. #[instrument(skip_all)] pub async fn prefetch_task( socket_path: PathBuf, diff --git a/dragonfly-client/src/grpc/scheduler.rs b/dragonfly-client/src/grpc/scheduler.rs index 2e825baf..13b625ae 100644 --- a/dragonfly-client/src/grpc/scheduler.rs +++ b/dragonfly-client/src/grpc/scheduler.rs @@ -37,40 +37,40 @@ use tokio::task::JoinSet; use tonic::transport::Channel; use tracing::{error, info, instrument, Instrument}; -// VNode is the virtual node of the hashring. +/// VNode is the virtual node of the hashring. #[derive(Debug, Copy, Clone, Hash, PartialEq)] struct VNode { - // addr is the address of the virtual node. + /// addr is the address of the virtual node. addr: SocketAddr, } -// VNode implements the Display trait. +/// VNode implements the Display trait. impl std::fmt::Display for VNode { - // fmt formats the virtual node. + /// fmt formats the virtual node. fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.addr) } } -// SchedulerClient is a wrapper of SchedulerGRPCClient. +/// SchedulerClient is a wrapper of SchedulerGRPCClient. #[derive(Clone)] pub struct SchedulerClient { - // dynconfig is the dynamic configuration of the dfdaemon. + /// dynconfig is the dynamic configuration of the dfdaemon. dynconfig: Arc, - // available_schedulers is the available schedulers. + /// available_schedulers is the available schedulers. available_schedulers: Arc>>, - // available_scheduler_addrs is the addresses of available schedulers. + /// available_scheduler_addrs is the addresses of available schedulers. available_scheduler_addrs: Arc>>, - // hashring is the hashring of the scheduler. + /// hashring is the hashring of the scheduler. hashring: Arc>>, } -// SchedulerClient implements the grpc client of the scheduler. +/// SchedulerClient implements the grpc client of the scheduler. impl SchedulerClient { - // new creates a new SchedulerClient. + /// new creates a new SchedulerClient. #[instrument(skip_all)] pub async fn new(dynconfig: Arc) -> Result { let client = Self { @@ -84,7 +84,7 @@ impl SchedulerClient { Ok(client) } - // announce_peer announces the peer to the scheduler. + /// announce_peer announces the peer to the scheduler. #[instrument(skip_all)] pub async fn announce_peer( &self, @@ -100,7 +100,7 @@ impl SchedulerClient { Ok(response) } - // stat_peer gets the status of the peer. + /// stat_peer gets the status of the peer. #[instrument(skip(self))] pub async fn stat_peer(&self, request: StatPeerRequest) -> Result { let task_id = request.task_id.clone(); @@ -113,7 +113,7 @@ impl SchedulerClient { Ok(response.into_inner()) } - // delete_peer tells the scheduler that the peer is deleting. + /// delete_peer tells the scheduler that the peer is deleting. #[instrument(skip(self))] pub async fn delete_peer(&self, request: DeletePeerRequest) -> Result<()> { let task_id = request.task_id.clone(); @@ -125,7 +125,7 @@ impl SchedulerClient { Ok(()) } - // stat_task gets the status of the task. + /// stat_task gets the status of the task. #[instrument(skip(self))] pub async fn stat_task(&self, request: StatTaskRequest) -> Result { let task_id = request.task_id.clone(); @@ -138,7 +138,7 @@ impl SchedulerClient { Ok(response.into_inner()) } - // delete_task tells the scheduler that the task is deleting. + /// delete_task tells the scheduler that the task is deleting. #[instrument(skip(self))] pub async fn delete_task(&self, request: DeleteTaskRequest) -> Result<()> { let task_id = request.task_id.clone(); @@ -150,7 +150,7 @@ impl SchedulerClient { Ok(()) } - // announce_host announces the host to the scheduler. + /// announce_host announces the host to the scheduler. #[instrument(skip(self))] pub async fn announce_host(&self, request: AnnounceHostRequest) -> Result<()> { // Update scheduler addresses of the client. @@ -208,7 +208,7 @@ impl SchedulerClient { Ok(()) } - // init_announce_host announces the host to the scheduler. + /// init_announce_host announces the host to the scheduler. #[instrument(skip(self))] pub async fn init_announce_host(&self, request: AnnounceHostRequest) -> Result<()> { let mut join_set = JoinSet::new(); @@ -263,7 +263,7 @@ impl SchedulerClient { Ok(()) } - // delete_host tells the scheduler that the host is deleting. + /// delete_host tells the scheduler that the host is deleting. #[instrument(skip(self))] pub async fn delete_host(&self, request: DeleteHostRequest) -> Result<()> { // Update scheduler addresses of the client. @@ -321,7 +321,7 @@ impl SchedulerClient { Ok(()) } - // announce_cache_peer announces the cache peer to the scheduler. + /// announce_cache_peer announces the cache peer to the scheduler. #[instrument(skip_all)] pub async fn announce_cache_peer( &self, @@ -337,7 +337,7 @@ impl SchedulerClient { Ok(response) } - // stat_cache_peer gets the status of the cache peer. + /// stat_cache_peer gets the status of the cache peer. #[instrument(skip(self))] pub async fn stat_cache_peer(&self, request: StatCachePeerRequest) -> Result { let task_id = request.task_id.clone(); @@ -350,7 +350,7 @@ impl SchedulerClient { Ok(response.into_inner()) } - // delete_cache_peer tells the scheduler that the cache peer is deleting. + /// delete_cache_peer tells the scheduler that the cache peer is deleting. #[instrument(skip(self))] pub async fn delete_cache_peer(&self, request: DeleteCachePeerRequest) -> Result<()> { let task_id = request.task_id.clone(); @@ -362,7 +362,7 @@ impl SchedulerClient { Ok(()) } - // upload_cache_task_started uploads the metadata of the cache task started. + /// upload_cache_task_started uploads the metadata of the cache task started. #[instrument(skip(self))] pub async fn upload_cache_task_started( &self, @@ -377,7 +377,7 @@ impl SchedulerClient { Ok(()) } - // upload_cache_task_finished uploads the metadata of the cache task finished. + /// upload_cache_task_finished uploads the metadata of the cache task finished. #[instrument(skip_all)] pub async fn upload_cache_task_finished( &self, @@ -393,7 +393,7 @@ impl SchedulerClient { Ok(response.into_inner()) } - // upload_cache_task_failed uploads the metadata of the cache task failed. + /// upload_cache_task_failed uploads the metadata of the cache task failed. #[instrument(skip_all)] pub async fn upload_cache_task_failed( &self, @@ -408,7 +408,7 @@ impl SchedulerClient { Ok(()) } - // stat_cache_task gets the status of the cache task. + /// stat_cache_task gets the status of the cache task. #[instrument(skip(self))] pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> Result { let task_id = request.task_id.clone(); @@ -421,7 +421,7 @@ impl SchedulerClient { Ok(response.into_inner()) } - // delete_cache_task tells the scheduler that the cache task is deleting. + /// delete_cache_task tells the scheduler that the cache task is deleting. #[instrument(skip(self))] pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> Result<()> { let task_id = request.task_id.clone(); @@ -433,7 +433,7 @@ impl SchedulerClient { Ok(()) } - // client gets the grpc client of the scheduler. + /// client gets the grpc client of the scheduler. #[instrument(skip(self))] async fn client( &self, @@ -480,7 +480,7 @@ impl SchedulerClient { .max_encoding_message_size(usize::MAX)) } - // update_available_scheduler_addrs updates the addresses of available schedulers. + /// update_available_scheduler_addrs updates the addresses of available schedulers. #[instrument(skip(self))] async fn update_available_scheduler_addrs(&self) -> Result<()> { // Get the endpoints of available schedulers. @@ -566,7 +566,7 @@ impl SchedulerClient { Ok(()) } - // refresh_available_scheduler_addrs refreshes addresses of available schedulers. + /// refresh_available_scheduler_addrs refreshes addresses of available schedulers. #[instrument(skip(self))] async fn refresh_available_scheduler_addrs(&self) -> Result<()> { // Refresh the dynamic configuration. @@ -576,7 +576,7 @@ impl SchedulerClient { self.update_available_scheduler_addrs().await } - // make_request creates a new request with timeout. + /// make_request creates a new request with timeout. #[instrument(skip_all)] fn make_request(request: T) -> tonic::Request { let mut request = tonic::Request::new(request); diff --git a/dragonfly-client/src/grpc/security.rs b/dragonfly-client/src/grpc/security.rs index cc860465..28305118 100644 --- a/dragonfly-client/src/grpc/security.rs +++ b/dragonfly-client/src/grpc/security.rs @@ -25,16 +25,16 @@ use dragonfly_client_core::{ use tonic::transport::Channel; use tracing::instrument; -// CertificateClient is a wrapper of CertificateGRPCClient. +/// CertificateClient is a wrapper of CertificateGRPCClient. #[derive(Clone)] pub struct CertificateClient { - // client is the grpc client of the certificate. + /// client is the grpc client of the certificate. pub client: CertificateGRPCClient, } -// CertificateClient implements the grpc client of the certificate. +/// CertificateClient implements the grpc client of the certificate. impl CertificateClient { - // new creates a new CertificateClient. + /// new creates a new CertificateClient. #[instrument(skip_all)] pub async fn new(addr: String) -> Result { let channel = Channel::from_static(Box::leak(addr.into_boxed_str())) @@ -49,7 +49,7 @@ impl CertificateClient { Ok(Self { client }) } - // issue_certificate issues a certificate for the peer. + /// issue_certificate issues a certificate for the peer. #[instrument(skip_all)] pub async fn issue_certificate( &self, @@ -60,7 +60,7 @@ impl CertificateClient { Ok(response.into_inner()) } - // make_request creates a new request with timeout. + /// make_request creates a new request with timeout. #[instrument(skip_all)] fn make_request(request: T) -> tonic::Request { let mut request = tonic::Request::new(request); diff --git a/dragonfly-client/src/health/mod.rs b/dragonfly-client/src/health/mod.rs index ac333fea..f03eb7a7 100644 --- a/dragonfly-client/src/health/mod.rs +++ b/dragonfly-client/src/health/mod.rs @@ -20,22 +20,22 @@ use tokio::sync::mpsc; use tracing::{info, instrument}; use warp::{Filter, Rejection, Reply}; -// Health is the health server. +/// Health is the health server. #[derive(Debug)] pub struct Health { - // addr is the address of the health server. + /// addr is the address of the health server. addr: SocketAddr, - // shutdown is used to shutdown the health server. + /// shutdown is used to shutdown the health server. shutdown: shutdown::Shutdown, - // _shutdown_complete is used to notify the health server is shutdown. + /// _shutdown_complete is used to notify the health server is shutdown. _shutdown_complete: mpsc::UnboundedSender<()>, } -// Health implements the health server. +/// Health implements the health server. impl Health { - // new creates a new Health. + /// new creates a new Health. #[instrument(skip_all)] pub fn new( addr: SocketAddr, @@ -49,7 +49,7 @@ impl Health { } } - // run starts the health server. + /// run starts the health server. #[instrument(skip_all)] pub async fn run(&self) { // Clone the shutdown channel. @@ -75,7 +75,7 @@ impl Health { } } - // health_handler handles the health check request. + /// health_handler handles the health check request. #[instrument(skip_all)] async fn health_handler() -> Result { Ok(warp::reply()) diff --git a/dragonfly-client/src/metrics/mod.rs b/dragonfly-client/src/metrics/mod.rs index ca3fcac7..3bccfcb5 100644 --- a/dragonfly-client/src/metrics/mod.rs +++ b/dragonfly-client/src/metrics/mod.rs @@ -31,201 +31,201 @@ use tokio::sync::mpsc; use tracing::{error, info, instrument, warn}; use warp::{Filter, Rejection, Reply}; -// DOWNLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of download task level1 duration for -// recording slow download task. +/// DOWNLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of download task level1 duration for +/// recording slow download task. const DOWNLOAD_TASK_LEVEL1_DURATION_THRESHOLD: Duration = Duration::from_millis(500); -// UPLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of upload task level1 duration for -// recording slow upload task. +/// UPLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of upload task level1 duration for +/// recording slow upload task. const UPLOAD_TASK_LEVEL1_DURATION_THRESHOLD: Duration = Duration::from_millis(500); lazy_static! { - // REGISTRY is used to register all metrics. + /// REGISTRY is used to register all metrics. pub static ref REGISTRY: Registry = Registry::new(); - // VERSION_GAUGE is used to record the version info of the service. + /// VERSION_GAUGE is used to record the version info of the service. pub static ref VERSION_GAUGE: IntGaugeVec = IntGaugeVec::new( Opts::new("version", "Version info of the service.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["git_version", "git_commit", "platform", "build_time"] ).expect("metric can be created"); - // UPLOAD_TASK_COUNT is used to count the number of upload tasks. + /// UPLOAD_TASK_COUNT is used to count the number of upload tasks. pub static ref UPLOAD_TASK_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("upload_task_total", "Counter of the number of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type", "tag", "app"] ).expect("metric can be created"); - // UPLOAD_TASK_FAILURE_COUNT is used to count the failed number of upload tasks. + /// UPLOAD_TASK_FAILURE_COUNT is used to count the failed number of upload tasks. pub static ref UPLOAD_TASK_FAILURE_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("upload_task_failure_total", "Counter of the number of failed of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type", "tag", "app"] ).expect("metric can be created"); - // CONCURRENT_UPLOAD_TASK_GAUGE is used to gauge the number of concurrent upload tasks. + /// CONCURRENT_UPLOAD_TASK_GAUGE is used to gauge the number of concurrent upload tasks. pub static ref CONCURRENT_UPLOAD_TASK_GAUGE: IntGaugeVec = IntGaugeVec::new( Opts::new("concurrent_upload_task_total", "Gauge of the number of concurrent of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type", "tag", "app"] ).expect("metric can be created"); - // UPLOAD_TASK_DURATION is used to record the upload task duration. + /// UPLOAD_TASK_DURATION is used to record the upload task duration. pub static ref UPLOAD_TASK_DURATION: HistogramVec = HistogramVec::new( HistogramOpts::new("upload_task_duration_milliseconds", "Histogram of the upload task duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()), &["task_type", "task_size_level"] ).expect("metric can be created"); - // DOWNLOAD_TASK_COUNT is used to count the number of download tasks. + /// DOWNLOAD_TASK_COUNT is used to count the number of download tasks. pub static ref DOWNLOAD_TASK_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("download_task_total", "Counter of the number of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type", "tag", "app", "priority"] ).expect("metric can be created"); - // DOWNLOAD_TASK_FAILURE_COUNT is used to count the failed number of download tasks. + /// DOWNLOAD_TASK_FAILURE_COUNT is used to count the failed number of download tasks. pub static ref DOWNLOAD_TASK_FAILURE_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("download_task_failure_total", "Counter of the number of failed of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type", "tag", "app", "priority"] ).expect("metric can be created"); - // PREFETCH_TASK_COUNT is used to count the number of prefetch tasks. + /// PREFETCH_TASK_COUNT is used to count the number of prefetch tasks. pub static ref PREFETCH_TASK_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("prefetch_task_total", "Counter of the number of the prefetch task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type", "tag", "app", "priority"] ).expect("metric can be created"); - // PREFETCH_TASK_FAILURE_COUNT is used to count the failed number of prefetch tasks. + /// PREFETCH_TASK_FAILURE_COUNT is used to count the failed number of prefetch tasks. pub static ref PREFETCH_TASK_FAILURE_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("prefetch_task_failure_total", "Counter of the number of failed of the prefetch task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type", "tag", "app", "priority"] ).expect("metric can be created"); - // CONCURRENT_DOWNLOAD_TASK_GAUGE is used to gauge the number of concurrent download tasks. + /// CONCURRENT_DOWNLOAD_TASK_GAUGE is used to gauge the number of concurrent download tasks. pub static ref CONCURRENT_DOWNLOAD_TASK_GAUGE: IntGaugeVec = IntGaugeVec::new( Opts::new("concurrent_download_task_total", "Gauge of the number of concurrent of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type", "tag", "app", "priority"] ).expect("metric can be created"); - // CONCURRENT_UPLOAD_PIECE_GAUGE is used to gauge the number of concurrent upload pieces. + /// CONCURRENT_UPLOAD_PIECE_GAUGE is used to gauge the number of concurrent upload pieces. pub static ref CONCURRENT_UPLOAD_PIECE_GAUGE: IntGaugeVec = IntGaugeVec::new( Opts::new("concurrent_upload_piece_total", "Gauge of the number of concurrent of the upload piece.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &[] ).expect("metric can be created"); - // DOWNLOAD_TRAFFIC is used to count the download traffic. + /// DOWNLOAD_TRAFFIC is used to count the download traffic. pub static ref DOWNLOAD_TRAFFIC: IntCounterVec = IntCounterVec::new( Opts::new("download_traffic", "Counter of the number of the download traffic.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type", "task_type"] ).expect("metric can be created"); - // UPLOAD_TRAFFIC is used to count the upload traffic. + /// UPLOAD_TRAFFIC is used to count the upload traffic. pub static ref UPLOAD_TRAFFIC: IntCounterVec = IntCounterVec::new( Opts::new("upload_traffic", "Counter of the number of the upload traffic.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["task_type"] ).expect("metric can be created"); - // DOWNLOAD_TASK_DURATION is used to record the download task duration. + /// DOWNLOAD_TASK_DURATION is used to record the download task duration. pub static ref DOWNLOAD_TASK_DURATION: HistogramVec = HistogramVec::new( HistogramOpts::new("download_task_duration_milliseconds", "Histogram of the download task duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()), &["task_type", "task_size_level"] ).expect("metric can be created"); - // BACKEND_REQUEST_COUNT is used to count the number of backend requset. + /// BACKEND_REQUEST_COUNT is used to count the number of backend requset. pub static ref BACKEND_REQUEST_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("backend_request_total", "Counter of the number of the backend request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["scheme", "method"] ).expect("metric can be created"); - // BACKEND_REQUEST_FAILURE_COUNT is used to count the failed number of backend request. + /// BACKEND_REQUEST_FAILURE_COUNT is used to count the failed number of backend request. pub static ref BACKEND_REQUEST_FAILURE_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("backend_request_failure_total", "Counter of the number of failed of the backend request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["scheme", "method"] ).expect("metric can be created"); - // BACKEND_REQUEST_DURATION is used to record the backend request duration. + /// BACKEND_REQUEST_DURATION is used to record the backend request duration. pub static ref BACKEND_REQUEST_DURATION: HistogramVec = HistogramVec::new( HistogramOpts::new("backend_request_duration_milliseconds", "Histogram of the backend request duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()), &["scheme", "method"] ).expect("metric can be created"); - // PROXY_REQUEST_COUNT is used to count the number of proxy requset. + /// PROXY_REQUEST_COUNT is used to count the number of proxy requset. pub static ref PROXY_REQUEST_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("proxy_request_total", "Counter of the number of the proxy request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &[] ).expect("metric can be created"); - // PROXY_REQUEST_FAILURE_COUNT is used to count the failed number of proxy request. + /// PROXY_REQUEST_FAILURE_COUNT is used to count the failed number of proxy request. pub static ref PROXY_REQUEST_FAILURE_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("proxy_request_failure_total", "Counter of the number of failed of the proxy request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &[] ).expect("metric can be created"); - // STAT_TASK_COUNT is used to count the number of stat tasks. + /// STAT_TASK_COUNT is used to count the number of stat tasks. pub static ref STAT_TASK_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("stat_task_total", "Counter of the number of the stat task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type"] ).expect("metric can be created"); - // STAT_TASK_FAILURE_COUNT is used to count the failed number of stat tasks. + /// STAT_TASK_FAILURE_COUNT is used to count the failed number of stat tasks. pub static ref STAT_TASK_FAILURE_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("stat_task_failure_total", "Counter of the number of failed of the stat task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type"] ).expect("metric can be created"); - // DELETE_TASK_COUNT is used to count the number of delete tasks. + /// DELETE_TASK_COUNT is used to count the number of delete tasks. pub static ref DELETE_TASK_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("delete_task_total", "Counter of the number of the delete task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type"] ).expect("metric can be created"); - // DELETE_TASK_FAILURE_COUNT is used to count the failed number of delete tasks. + /// DELETE_TASK_FAILURE_COUNT is used to count the failed number of delete tasks. pub static ref DELETE_TASK_FAILURE_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("delete_task_failure_total", "Counter of the number of failed of the delete task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &["type"] ).expect("metric can be created"); - // DELETE_HOST_COUNT is used to count the number of delete host. + /// DELETE_HOST_COUNT is used to count the number of delete host. pub static ref DELETE_HOST_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("delete_host_total", "Counter of the number of the delete host.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &[] ).expect("metric can be created"); - // DELETE_HOST_FAILURE_COUNT is used to count the failed number of delete host. + /// DELETE_HOST_FAILURE_COUNT is used to count the failed number of delete host. pub static ref DELETE_HOST_FAILURE_COUNT: IntCounterVec = IntCounterVec::new( Opts::new("delete_host_failure_total", "Counter of the number of failed of the delete host.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &[] ).expect("metric can be created"); - // DISK_SPACE is used to count of the disk space. + /// DISK_SPACE is used to count of the disk space. pub static ref DISK_SPACE: IntGaugeVec = IntGaugeVec::new( Opts::new("disk_space_total", "Gauge of the disk space in bytes").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), &[] ).expect("metric can be created"); - // DISK_USAGE_SPACE is used to count of the disk usage space. + /// DISK_USAGE_SPACE is used to count of the disk usage space. pub static ref DISK_USAGE_SPACE: IntGaugeVec = IntGaugeVec::new( Opts::new("disk_usage_space_total", "Gauge of the disk usage space in bytes").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), @@ -233,76 +233,76 @@ lazy_static! { ).expect("metric can be created"); } -// TaskSize represents the size of the task. +/// TaskSize represents the size of the task. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum TaskSize { - // Level0 represents unknown size. + /// Level0 represents unknown size. Level0, - // Level0 represents size range is from 0 to 1M. + /// Level0 represents size range is from 0 to 1M. Level1, - // Level1 represents size range is from 1M to 4M. + /// Level1 represents size range is from 1M to 4M. Level2, - // Level2 represents size range is from 4M to 8M. + /// Level2 represents size range is from 4M to 8M. Level3, - // Level3 represents size range is from 8M to 16M. + /// Level3 represents size range is from 8M to 16M. Level4, - // Level4 represents size range is from 16M to 32M. + /// Level4 represents size range is from 16M to 32M. Level5, - // Level5 represents size range is from 32M to 64M. + /// Level5 represents size range is from 32M to 64M. Level6, - // Level6 represents size range is from 64M to 128M. + /// Level6 represents size range is from 64M to 128M. Level7, - // Level7 represents size range is from 128M to 256M. + /// Level7 represents size range is from 128M to 256M. Level8, - // Level8 represents size range is from 256M to 512M. + /// Level8 represents size range is from 256M to 512M. Level9, - // Level9 represents size range is from 512M to 1G. + /// Level9 represents size range is from 512M to 1G. Level10, - // Level10 represents size range is from 1G to 4G. + /// Level10 represents size range is from 1G to 4G. Level11, - // Level11 represents size range is from 4G to 8G. + /// Level11 represents size range is from 4G to 8G. Level12, - // Level12 represents size range is from 8G to 16G. + /// Level12 represents size range is from 8G to 16G. Level13, - // Level13 represents size range is from 16G to 32G. + /// Level13 represents size range is from 16G to 32G. Level14, - // Level14 represents size range is from 32G to 64G. + /// Level14 represents size range is from 32G to 64G. Level15, - // Level15 represents size range is from 64G to 128G. + /// Level15 represents size range is from 64G to 128G. Level16, - // Level16 represents size range is from 128G to 256G. + /// Level16 represents size range is from 128G to 256G. Level17, - // Level17 represents size range is from 256G to 512G. + /// Level17 represents size range is from 256G to 512G. Level18, - // Level18 represents size range is from 512G to 1T. + /// Level18 represents size range is from 512G to 1T. Level19, - // Level20 represents size is greater than 1T. + /// Level20 represents size is greater than 1T. Level20, } -// TaskSize implements the Display trait. +/// TaskSize implements the Display trait. impl std::fmt::Display for TaskSize { - // fmt formats the TaskSize. + /// fmt formats the TaskSize. fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { TaskSize::Level0 => write!(f, "0"), @@ -330,9 +330,9 @@ impl std::fmt::Display for TaskSize { } } -// TaskSize implements the TaskSize. +/// TaskSize implements the TaskSize. impl TaskSize { - // calculate_size_level calculates the size level according to the size. + /// calculate_size_level calculates the size level according to the size. pub fn calculate_size_level(size: u64) -> Self { match size { 0 => TaskSize::Level0, @@ -360,7 +360,7 @@ impl TaskSize { } } -// collect_upload_task_started_metrics collects the upload task started metrics. +/// collect_upload_task_started_metrics collects the upload task started metrics. pub fn collect_upload_task_started_metrics(typ: i32, tag: &str, app: &str) { UPLOAD_TASK_COUNT .with_label_values(&[typ.to_string().as_str(), tag, app]) @@ -371,7 +371,7 @@ pub fn collect_upload_task_started_metrics(typ: i32, tag: &str, app: &str) { .inc(); } -// collect_upload_task_finished_metrics collects the upload task finished metrics. +/// collect_upload_task_finished_metrics collects the upload task finished metrics. pub fn collect_upload_task_finished_metrics( typ: i32, tag: &str, @@ -399,7 +399,7 @@ pub fn collect_upload_task_finished_metrics( .dec(); } -// collect_upload_task_failure_metrics collects the upload task failure metrics. +/// collect_upload_task_failure_metrics collects the upload task failure metrics. pub fn collect_upload_task_failure_metrics(typ: i32, tag: &str, app: &str) { UPLOAD_TASK_FAILURE_COUNT .with_label_values(&[typ.to_string().as_str(), tag, app]) @@ -410,7 +410,7 @@ pub fn collect_upload_task_failure_metrics(typ: i32, tag: &str, app: &str) { .dec(); } -// collect_download_task_started_metrics collects the download task started metrics. +/// collect_download_task_started_metrics collects the download task started metrics. pub fn collect_download_task_started_metrics(typ: i32, tag: &str, app: &str, priority: &str) { DOWNLOAD_TASK_COUNT .with_label_values(&[typ.to_string().as_str(), tag, app, priority]) @@ -421,7 +421,7 @@ pub fn collect_download_task_started_metrics(typ: i32, tag: &str, app: &str, pri .inc(); } -// collect_download_task_finished_metrics collects the download task finished metrics. +/// collect_download_task_finished_metrics collects the download task finished metrics. pub fn collect_download_task_finished_metrics( typ: i32, tag: &str, @@ -457,7 +457,7 @@ pub fn collect_download_task_finished_metrics( .dec(); } -// collect_download_task_failure_metrics collects the download task failure metrics. +/// collect_download_task_failure_metrics collects the download task failure metrics. pub fn collect_download_task_failure_metrics(typ: i32, tag: &str, app: &str, priority: &str) { DOWNLOAD_TASK_FAILURE_COUNT .with_label_values(&[typ.to_string().as_str(), tag, app, priority]) @@ -468,119 +468,119 @@ pub fn collect_download_task_failure_metrics(typ: i32, tag: &str, app: &str, pri .dec(); } -// collect_prefetch_task_started_metrics collects the prefetch task started metrics. +/// collect_prefetch_task_started_metrics collects the prefetch task started metrics. pub fn collect_prefetch_task_started_metrics(typ: i32, tag: &str, app: &str, priority: &str) { PREFETCH_TASK_COUNT .with_label_values(&[typ.to_string().as_str(), tag, app, priority]) .inc(); } -// collect_prefetch_task_failure_metrics collects the prefetch task failure metrics. +/// collect_prefetch_task_failure_metrics collects the prefetch task failure metrics. pub fn collect_prefetch_task_failure_metrics(typ: i32, tag: &str, app: &str, priority: &str) { PREFETCH_TASK_FAILURE_COUNT .with_label_values(&[typ.to_string().as_str(), tag, app, priority]) .inc(); } -// collect_download_piece_traffic_metrics collects the download piece traffic metrics. +/// collect_download_piece_traffic_metrics collects the download piece traffic metrics. pub fn collect_download_piece_traffic_metrics(typ: &TrafficType, task_type: i32, length: u64) { DOWNLOAD_TRAFFIC .with_label_values(&[typ.as_str_name(), task_type.to_string().as_str()]) .inc_by(length); } -// collect_upload_piece_started_metrics collects the upload piece started metrics. +/// collect_upload_piece_started_metrics collects the upload piece started metrics. pub fn collect_upload_piece_started_metrics() { CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).inc(); } -// collect_upload_piece_finished_metrics collects the upload piece finished metrics. +/// collect_upload_piece_finished_metrics collects the upload piece finished metrics. pub fn collect_upload_piece_finished_metrics() { CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).dec(); } -// collect_upload_piece_traffic_metrics collects the upload piece traffic metrics. +/// collect_upload_piece_traffic_metrics collects the upload piece traffic metrics. pub fn collect_upload_piece_traffic_metrics(task_type: i32, length: u64) { UPLOAD_TRAFFIC .with_label_values(&[task_type.to_string().as_str()]) .inc_by(length); } -// collect_upload_piece_failure_metrics collects the upload piece failure metrics. +/// collect_upload_piece_failure_metrics collects the upload piece failure metrics. pub fn collect_upload_piece_failure_metrics() { CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).dec(); } -// collect_backend_request_started_metrics collects the backend request started metrics. +/// collect_backend_request_started_metrics collects the backend request started metrics. pub fn collect_backend_request_started_metrics(scheme: &str, method: &str) { BACKEND_REQUEST_COUNT .with_label_values(&[scheme, method]) .inc(); } -// collect_backend_request_failure_metrics collects the backend request failure metrics. +/// collect_backend_request_failure_metrics collects the backend request failure metrics. pub fn collect_backend_request_failure_metrics(scheme: &str, method: &str) { BACKEND_REQUEST_FAILURE_COUNT .with_label_values(&[scheme, method]) .inc(); } -// collect_backend_request_finished_metrics collects the backend request finished metrics. +/// collect_backend_request_finished_metrics collects the backend request finished metrics. pub fn collect_backend_request_finished_metrics(scheme: &str, method: &str, cost: Duration) { BACKEND_REQUEST_DURATION .with_label_values(&[scheme, method]) .observe(cost.as_millis() as f64); } -// collect_proxy_request_started_metrics collects the proxy request started metrics. +/// collect_proxy_request_started_metrics collects the proxy request started metrics. pub fn collect_proxy_request_started_metrics() { PROXY_REQUEST_COUNT.with_label_values(&[]).inc(); } -// collect_proxy_request_failure_metrics collects the proxy request failure metrics. +/// collect_proxy_request_failure_metrics collects the proxy request failure metrics. pub fn collect_proxy_request_failure_metrics() { PROXY_REQUEST_FAILURE_COUNT.with_label_values(&[]).inc(); } -// collect_stat_task_started_metrics collects the stat task started metrics. +/// collect_stat_task_started_metrics collects the stat task started metrics. pub fn collect_stat_task_started_metrics(typ: i32) { STAT_TASK_COUNT .with_label_values(&[typ.to_string().as_str()]) .inc(); } -// collect_stat_task_failure_metrics collects the stat task failure metrics. +/// collect_stat_task_failure_metrics collects the stat task failure metrics. pub fn collect_stat_task_failure_metrics(typ: i32) { STAT_TASK_FAILURE_COUNT .with_label_values(&[typ.to_string().as_str()]) .inc(); } -// collect_delete_task_started_metrics collects the delete task started metrics. +/// collect_delete_task_started_metrics collects the delete task started metrics. pub fn collect_delete_task_started_metrics(typ: i32) { DELETE_TASK_COUNT .with_label_values(&[typ.to_string().as_str()]) .inc(); } -// collect_delete_task_failure_metrics collects the delete task failure metrics. +/// collect_delete_task_failure_metrics collects the delete task failure metrics. pub fn collect_delete_task_failure_metrics(typ: i32) { DELETE_TASK_FAILURE_COUNT .with_label_values(&[typ.to_string().as_str()]) .inc(); } -// collect_delete_host_started_metrics collects the delete host started metrics. +/// collect_delete_host_started_metrics collects the delete host started metrics. pub fn collect_delete_host_started_metrics() { DELETE_HOST_COUNT.with_label_values(&[]).inc(); } -// collect_delete_host_failure_metrics collects the delete host failure metrics. +/// collect_delete_host_failure_metrics collects the delete host failure metrics. pub fn collect_delete_host_failure_metrics() { DELETE_HOST_FAILURE_COUNT.with_label_values(&[]).inc(); } -// collect_disk_space_metrics collects the disk space metrics. +/// collect_disk_space_metrics collects the disk space metrics. pub fn collect_disk_space_metrics(path: &Path) { let stats = match fs2::statvfs(path) { Ok(stats) => stats, @@ -599,22 +599,22 @@ pub fn collect_disk_space_metrics(path: &Path) { .set(usage_space as i64); } -// Metrics is the metrics server. +/// Metrics is the metrics server. #[derive(Debug)] pub struct Metrics { - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // shutdown is used to shutdown the metrics server. + /// shutdown is used to shutdown the metrics server. shutdown: shutdown::Shutdown, - // _shutdown_complete is used to notify the metrics server is shutdown. + /// _shutdown_complete is used to notify the metrics server is shutdown. _shutdown_complete: mpsc::UnboundedSender<()>, } -// Metrics implements the metrics server. +/// Metrics implements the metrics server. impl Metrics { - // new creates a new Metrics. + /// new creates a new Metrics. #[instrument(skip_all)] pub fn new( config: Arc, @@ -628,7 +628,7 @@ impl Metrics { } } - // run starts the metrics server. + /// run starts the metrics server. #[instrument(skip_all)] pub async fn run(&self) { // Clone the shutdown channel. @@ -680,7 +680,7 @@ impl Metrics { } } - // register_custom_metrics registers all custom metrics. + /// register_custom_metrics registers all custom metrics. #[instrument(skip_all)] fn register_custom_metrics(&self) { REGISTRY @@ -776,7 +776,7 @@ impl Metrics { .expect("metric can be registered"); } - // metrics_handler handles the metrics request. + /// metrics_handler handles the metrics request. #[instrument(skip_all)] async fn metrics_handler(config: Arc) -> Result { // Collect the disk space metrics. diff --git a/dragonfly-client/src/proxy/header.rs b/dragonfly-client/src/proxy/header.rs index fb9aefe4..bc5ccd3f 100644 --- a/dragonfly-client/src/proxy/header.rs +++ b/dragonfly-client/src/proxy/header.rs @@ -18,34 +18,34 @@ use dragonfly_api::common::v2::Priority; use reqwest::header::HeaderMap; use tracing::{error, instrument}; -// DRAGONFLY_TAG_HEADER is the header key of tag in http request. +/// DRAGONFLY_TAG_HEADER is the header key of tag in http request. pub const DRAGONFLY_TAG_HEADER: &str = "X-Dragonfly-Tag"; -// DRAGONFLY_APPLICATION_HEADER is the header key of application in http request. +/// DRAGONFLY_APPLICATION_HEADER is the header key of application in http request. pub const DRAGONFLY_APPLICATION_HEADER: &str = "X-Dragonfly-Application"; -// DRAGONFLY_PRIORITY_HEADER is the header key of priority in http request, -// refer to https://github.com/dragonflyoss/api/blob/main/proto/common.proto#L67. +/// DRAGONFLY_PRIORITY_HEADER is the header key of priority in http request, +/// refer to https://github.com/dragonflyoss/api/blob/main/proto/common.proto#L67. pub const DRAGONFLY_PRIORITY_HEADER: &str = "X-Dragonfly-Priority"; -// DRAGONFLY_REGISTRY_HEADER is the header key of custom address of container registry. +/// DRAGONFLY_REGISTRY_HEADER is the header key of custom address of container registry. pub const DRAGONFLY_REGISTRY_HEADER: &str = "X-Dragonfly-Registry"; -// DRAGONFLY_FILTERS_HEADER is the header key of filters in http request, -// it is the filtered query params to generate the task id. -// When filter is "X-Dragonfly-Filtered-Query-Params: Signature,Expires,ns" for example: -// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io -// will generate the same task id. -// Default value includes the filtered query params of s3, gcs, oss, obs, cos. +/// DRAGONFLY_FILTERS_HEADER is the header key of filters in http request, +/// it is the filtered query params to generate the task id. +/// When filter is "X-Dragonfly-Filtered-Query-Params: Signature,Expires,ns" for example: +/// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io +/// will generate the same task id. +/// Default value includes the filtered query params of s3, gcs, oss, obs, cos. pub const DRAGONFLY_FILTERED_QUERY_PARAMS_HEADER: &str = "X-Dragonfly-Filtered-Query-Params"; -// DRAGONFLY_USE_P2P_HEADER is the header key of use p2p in http request. -// If the value is "true", the request will use P2P technology to distribute -// the content. If the value is "false", but url matches the regular expression in proxy config. -// The request will also use P2P technology to distribute the content. +/// DRAGONFLY_USE_P2P_HEADER is the header key of use p2p in http request. +/// If the value is "true", the request will use P2P technology to distribute +/// the content. If the value is "false", but url matches the regular expression in proxy config. +/// The request will also use P2P technology to distribute the content. pub const DRAGONFLY_USE_P2P_HEADER: &str = "X-Dragonfly-Use-P2P"; -// get_tag gets the tag from http header. +/// get_tag gets the tag from http header. #[instrument(skip_all)] pub fn get_tag(header: &HeaderMap) -> Option { match header.get(DRAGONFLY_TAG_HEADER) { @@ -60,7 +60,7 @@ pub fn get_tag(header: &HeaderMap) -> Option { } } -// get_application gets the application from http header. +/// get_application gets the application from http header. #[instrument(skip_all)] pub fn get_application(header: &HeaderMap) -> Option { match header.get(DRAGONFLY_APPLICATION_HEADER) { @@ -75,7 +75,7 @@ pub fn get_application(header: &HeaderMap) -> Option { } } -// get_priority gets the priority from http header. +/// get_priority gets the priority from http header. #[instrument(skip_all)] pub fn get_priority(header: &HeaderMap) -> i32 { let default_priority = Priority::Level6 as i32; @@ -97,7 +97,7 @@ pub fn get_priority(header: &HeaderMap) -> i32 { } } -// get_registry gets the custom address of container registry from http header. +/// get_registry gets the custom address of container registry from http header. #[instrument(skip_all)] pub fn get_registry(header: &HeaderMap) -> Option { match header.get(DRAGONFLY_REGISTRY_HEADER) { @@ -112,7 +112,7 @@ pub fn get_registry(header: &HeaderMap) -> Option { } } -// get_filters gets the filters from http header. +/// get_filters gets the filters from http header. #[instrument(skip_all)] pub fn get_filtered_query_params( header: &HeaderMap, @@ -130,7 +130,7 @@ pub fn get_filtered_query_params( } } -// get_use_p2p gets the use p2p from http header. +/// get_use_p2p gets the use p2p from http header. #[instrument(skip_all)] pub fn get_use_p2p(header: &HeaderMap) -> bool { match header.get(DRAGONFLY_USE_P2P_HEADER) { diff --git a/dragonfly-client/src/proxy/mod.rs b/dragonfly-client/src/proxy/mod.rs index 1ad4b8fc..986a2ffc 100644 --- a/dragonfly-client/src/proxy/mod.rs +++ b/dragonfly-client/src/proxy/mod.rs @@ -67,37 +67,37 @@ use tracing::{error, info, instrument, Span}; pub mod header; -// Response is the response of the proxy server. +/// Response is the response of the proxy server. pub type Response = hyper::Response>; -// Proxy is the proxy server. +/// Proxy is the proxy server. pub struct Proxy { - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // task is the task manager. + /// task is the task manager. task: Arc, - // addr is the address of the proxy server. + /// addr is the address of the proxy server. addr: SocketAddr, - // registry_certs is the certificate of the client for the registry. + /// registry_certs is the certificate of the client for the registry. registry_certs: Arc>>>, - // server_ca_cert is the CA certificate of the proxy server to - // sign the self-signed certificate. + /// server_ca_cert is the CA certificate of the proxy server to + /// sign the self-signed certificate. server_ca_cert: Arc>, - // shutdown is used to shutdown the proxy server. + /// shutdown is used to shutdown the proxy server. shutdown: shutdown::Shutdown, - // _shutdown_complete is used to notify the proxy server is shutdown. + /// _shutdown_complete is used to notify the proxy server is shutdown. _shutdown_complete: mpsc::UnboundedSender<()>, } -// Proxy implements the proxy server. +/// Proxy implements the proxy server. impl Proxy { - // new creates a new Proxy. + /// new creates a new Proxy. #[instrument(skip_all)] pub fn new( config: Arc, @@ -168,7 +168,7 @@ impl Proxy { proxy } - // run starts the proxy server. + /// run starts the proxy server. #[instrument(skip_all)] pub async fn run(&self) -> ClientResult<()> { let listener = TcpListener::bind(self.addr).await?; @@ -223,7 +223,7 @@ impl Proxy { } } -// handler handles the request from the client. +/// handler handles the request from the client. #[instrument(skip_all, fields(uri, method))] pub async fn handler( config: Arc, @@ -289,7 +289,7 @@ pub async fn handler( .await } -// registry_mirror_http_handler handles the http request for the registry mirror by client. +/// registry_mirror_http_handler handles the http request for the registry mirror by client. #[instrument(skip_all)] pub async fn registry_mirror_http_handler( config: Arc, @@ -309,7 +309,7 @@ pub async fn registry_mirror_http_handler( .await; } -// registry_mirror_https_handler handles the https request for the registry mirror by client. +/// registry_mirror_https_handler handles the https request for the registry mirror by client. #[instrument(skip_all)] pub async fn registry_mirror_https_handler( config: Arc, @@ -331,7 +331,7 @@ pub async fn registry_mirror_https_handler( .await; } -// http_handler handles the http request by client. +/// http_handler handles the http request by client. #[instrument(skip_all)] pub async fn http_handler( config: Arc, @@ -397,7 +397,7 @@ pub async fn http_handler( return proxy_http(request).await; } -// https_handler handles the https request by client. +/// https_handler handles the https request by client. #[instrument(skip_all)] pub async fn https_handler( config: Arc, @@ -439,9 +439,9 @@ pub async fn https_handler( } } -// upgraded_tunnel handles the upgraded connection. If the ca_cert is not set, use the -// self-signed certificate. Otherwise, use the CA certificate to sign the -// self-signed certificate. +/// upgraded_tunnel handles the upgraded connection. If the ca_cert is not set, use the +/// self-signed certificate. Otherwise, use the CA certificate to sign the +/// self-signed certificate. #[instrument(skip_all)] async fn upgraded_tunnel( config: Arc, @@ -503,7 +503,7 @@ async fn upgraded_tunnel( Ok(()) } -// upgraded_handler handles the upgraded https request from the client. +/// upgraded_handler handles the upgraded https request from the client. #[instrument(skip_all, fields(uri, method))] pub async fn upgraded_handler( config: Arc, @@ -579,7 +579,7 @@ pub async fn upgraded_handler( return proxy_http(request).await; } -// proxy_by_dfdaemon proxies the request via the dfdaemon. +/// proxy_by_dfdaemon proxies the request via the dfdaemon. #[instrument(skip_all)] async fn proxy_by_dfdaemon( config: Arc, @@ -839,7 +839,7 @@ async fn proxy_by_dfdaemon( } } -// proxy_http proxies the HTTP request directly to the remote server. +/// proxy_http proxies the HTTP request directly to the remote server. #[instrument(skip_all)] async fn proxy_http(request: Request) -> ClientResult { let Some(host) = request.uri().host() else { @@ -866,7 +866,7 @@ async fn proxy_http(request: Request) -> ClientResult, @@ -904,7 +904,7 @@ async fn proxy_https( Ok(response.map(|b| b.map_err(ClientError::from).boxed())) } -// make_registry_mirror_request makes a registry mirror request by the request. +/// make_registry_mirror_request makes a registry mirror request by the request. #[instrument(skip_all)] fn make_registry_mirror_request( config: Arc, @@ -940,7 +940,7 @@ fn make_registry_mirror_request( Ok(request) } -// make_download_task_request makes a download task request by the request. +/// make_download_task_request makes a download task request by the request. #[instrument(skip_all)] fn make_download_task_request( config: Arc, @@ -983,7 +983,7 @@ fn make_download_task_request( }) } -// make_download_url makes a download url by the given uri. +/// make_download_url makes a download url by the given uri. #[instrument(skip_all)] fn make_download_url( uri: &hyper::Uri, @@ -1009,7 +1009,7 @@ fn make_download_url( .to_string()) } -// make_response_headers makes the response headers. +/// make_response_headers makes the response headers. #[instrument(skip_all)] fn make_response_headers( mut download_task_started_response: DownloadTaskStartedResponse, @@ -1035,14 +1035,14 @@ fn make_response_headers( hashmap_to_hyper_header_map(&download_task_started_response.response_header) } -// find_matching_rule returns whether the dfdaemon should be used to download the task. -// If the dfdaemon should be used, return the matched rule. +/// find_matching_rule returns whether the dfdaemon should be used to download the task. +/// If the dfdaemon should be used, return the matched rule. #[instrument(skip_all)] fn find_matching_rule(rules: Option>, url: &str) -> Option { rules?.iter().find(|rule| rule.regex.is_match(url)).cloned() } -// make_error_response makes an error response with the given status and message. +/// make_error_response makes an error response with the given status and message. #[instrument(skip_all)] fn make_error_response(status: http::StatusCode, header: Option) -> Response { let mut response = Response::new(empty()); @@ -1056,7 +1056,7 @@ fn make_error_response(status: http::StatusCode, header: Option response } -// empty returns an empty body. +/// empty returns an empty body. #[instrument(skip_all)] fn empty() -> BoxBody { Empty::::new() diff --git a/dragonfly-client/src/resource/cache_task.rs b/dragonfly-client/src/resource/cache_task.rs index b623f7b8..678ce86f 100644 --- a/dragonfly-client/src/resource/cache_task.rs +++ b/dragonfly-client/src/resource/cache_task.rs @@ -54,27 +54,27 @@ use tracing::{error, info, instrument, Instrument}; use super::*; -// CacheTask represents a cache task manager. +/// CacheTask represents a cache task manager. pub struct CacheTask { - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // id_generator is the id generator. + /// id_generator is the id generator. pub id_generator: Arc, - // storage is the local storage. + /// storage is the local storage. storage: Arc, - // scheduler_client is the grpc client of the scheduler. + /// scheduler_client is the grpc client of the scheduler. pub scheduler_client: Arc, - // piece is the piece manager. + /// piece is the piece manager. pub piece: Arc, } -// CacheTask is the implementation of CacheTask. +/// CacheTask is the implementation of CacheTask. impl CacheTask { - // new creates a new CacheTask. + /// new creates a new CacheTask. #[instrument(skip_all)] pub fn new( config: Arc, @@ -100,7 +100,7 @@ impl CacheTask { } } - // create_persistent creates a persistent cache task from local. + /// create_persistent creates a persistent cache task from local. #[instrument(skip_all)] pub async fn create_persistent( &self, @@ -224,7 +224,7 @@ impl CacheTask { } } - // download_started updates the metadata of the cache task when the cache task downloads started. + /// download_started updates the metadata of the cache task when the cache task downloads started. #[instrument(skip_all)] pub async fn download_started( &self, @@ -253,20 +253,20 @@ impl CacheTask { ) } - // download_finished updates the metadata of the cache task when the task downloads finished. + /// download_finished updates the metadata of the cache task when the task downloads finished. #[instrument(skip_all)] pub fn download_finished(&self, id: &str) -> ClientResult { self.storage.download_cache_task_finished(id) } - // download_failed updates the metadata of the cache task when the task downloads failed. + /// download_failed updates the metadata of the cache task when the task downloads failed. #[instrument(skip_all)] pub async fn download_failed(&self, id: &str) -> ClientResult<()> { let _ = self.storage.download_cache_task_failed(id).await?; Ok(()) } - // hard_link_or_copy hard links or copies the cache task content to the destination. + /// hard_link_or_copy hard links or copies the cache task content to the destination. #[instrument(skip_all)] pub async fn hard_link_or_copy( &self, @@ -276,7 +276,7 @@ impl CacheTask { self.storage.hard_link_or_copy_cache_task(task, to).await } - // download downloads a cache task. + /// download downloads a cache task. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] pub async fn download( @@ -455,7 +455,7 @@ impl CacheTask { Ok(()) } - // download_partial_with_scheduler downloads a partial cache task with scheduler. + /// download_partial_with_scheduler downloads a partial cache task with scheduler. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] async fn download_partial_with_scheduler( @@ -758,7 +758,7 @@ impl CacheTask { Ok(finished_pieces) } - // download_partial_with_scheduler_from_remote_peer downloads a partial cache task with scheduler from a remote peer. + /// download_partial_with_scheduler_from_remote_peer downloads a partial cache task with scheduler from a remote peer. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] async fn download_partial_with_scheduler_from_remote_peer( @@ -984,7 +984,7 @@ impl CacheTask { Ok(finished_pieces) } - // download_partial_from_local_peer downloads a partial cache task from a local peer. + /// download_partial_from_local_peer downloads a partial cache task from a local peer. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] async fn download_partial_from_local_peer( @@ -1073,7 +1073,7 @@ impl CacheTask { Ok(finished_pieces) } - // stat stats the cache task from the scheduler. + /// stat stats the cache task from the scheduler. #[instrument(skip_all)] pub async fn stat(&self, task_id: &str, host_id: &str) -> ClientResult { self.scheduler_client @@ -1084,7 +1084,7 @@ impl CacheTask { .await } - // delete_cache_task deletes a cache task. + /// delete_cache_task deletes a cache task. #[instrument(skip_all)] pub async fn delete(&self, task_id: &str, host_id: &str) -> ClientResult<()> { self.scheduler_client diff --git a/dragonfly-client/src/resource/piece.rs b/dragonfly-client/src/resource/piece.rs index e1bf1233..2c23b830 100644 --- a/dragonfly-client/src/resource/piece.rs +++ b/dragonfly-client/src/resource/piece.rs @@ -38,48 +38,48 @@ use tracing::{error, info, instrument, Span}; use super::*; -// MAX_PIECE_COUNT is the maximum piece count. If the piece count is upper -// than MAX_PIECE_COUNT, the piece length will be optimized by the file length. -// When piece length becames the MAX_PIECE_LENGTH, the piece piece count -// probably will be upper than MAX_PIECE_COUNT. +/// MAX_PIECE_COUNT is the maximum piece count. If the piece count is upper +/// than MAX_PIECE_COUNT, the piece length will be optimized by the file length. +/// When piece length becames the MAX_PIECE_LENGTH, the piece piece count +/// probably will be upper than MAX_PIECE_COUNT. const MAX_PIECE_COUNT: u64 = 500; -// MIN_PIECE_LENGTH is the minimum piece length. +/// MIN_PIECE_LENGTH is the minimum piece length. const MIN_PIECE_LENGTH: u64 = 4 * 1024 * 1024; -// MAX_PIECE_LENGTH is the maximum piece length. +/// MAX_PIECE_LENGTH is the maximum piece length. const MAX_PIECE_LENGTH: u64 = 16 * 1024 * 1024; -// PieceLengthStrategy sets the optimization strategy of piece length. +/// PieceLengthStrategy sets the optimization strategy of piece length. pub enum PieceLengthStrategy { - // OptimizeByFileLength optimizes the piece length by the file length. + /// OptimizeByFileLength optimizes the piece length by the file length. OptimizeByFileLength, } -// Piece represents a piece manager. +/// Piece represents a piece manager. pub struct Piece { - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // id_generator is the id generator. + /// id_generator is the id generator. id_generator: Arc, - // storage is the local storage. + /// storage is the local storage. storage: Arc, - // backend_factory is the backend factory. + /// backend_factory is the backend factory. backend_factory: Arc, - // download_rate_limiter is the rate limiter of the download speed in bps(bytes per second). + /// download_rate_limiter is the rate limiter of the download speed in bps(bytes per second). download_rate_limiter: Arc, - // upload_rate_limiter is the rate limiter of the upload speed in bps(bytes per second). + /// upload_rate_limiter is the rate limiter of the upload speed in bps(bytes per second). upload_rate_limiter: Arc, } -// Piece implements the piece manager. +/// Piece implements the piece manager. impl Piece { - // new returns a new Piece. + /// new returns a new Piece. #[instrument(skip_all)] pub fn new( config: Arc, @@ -110,13 +110,13 @@ impl Piece { } } - // get gets a piece from the local storage. + /// get gets a piece from the local storage. #[instrument(skip_all)] pub fn get(&self, task_id: &str, number: u32) -> Result> { self.storage.get_piece(task_id, number) } - // calculate_interested calculates the interested pieces by content_length and range. + /// calculate_interested calculates the interested pieces by content_length and range. #[instrument(skip_all)] pub fn calculate_interested( &self, @@ -230,7 +230,7 @@ impl Piece { Ok(pieces) } - // remove_finished_from_interested removes the finished pieces from interested pieces. + /// remove_finished_from_interested removes the finished pieces from interested pieces. #[instrument(skip_all)] pub fn remove_finished_from_interested( &self, @@ -248,7 +248,7 @@ impl Piece { .collect::>() } - // merge_finished_pieces merges the finished pieces and has finished pieces. + /// merge_finished_pieces merges the finished pieces and has finished pieces. #[instrument(skip_all)] pub fn merge_finished_pieces( &self, @@ -269,7 +269,7 @@ impl Piece { pieces.into_values().collect() } - // calculate_piece_size calculates the piece size by content_length. + /// calculate_piece_size calculates the piece size by content_length. pub fn calculate_piece_length( &self, strategy: PieceLengthStrategy, @@ -292,7 +292,7 @@ impl Piece { } } - // upload_from_local_peer_into_async_read uploads a single piece from a local peer. + /// upload_from_local_peer_into_async_read uploads a single piece from a local peer. #[instrument(skip_all, fields(piece_id))] pub async fn upload_from_local_peer_into_async_read( &self, @@ -323,7 +323,7 @@ impl Piece { }) } - // download_from_local_peer_into_async_read downloads a single piece from a local peer. + /// download_from_local_peer_into_async_read downloads a single piece from a local peer. #[instrument(skip_all, fields(piece_id))] pub async fn download_from_local_peer_into_async_read( &self, @@ -345,8 +345,8 @@ impl Piece { self.storage.upload_piece(task_id, number, range).await } - // download_from_local_peer downloads a single piece from a local peer. Fake the download piece - // from the local peer, just collect the metrics. + /// download_from_local_peer downloads a single piece from a local peer. Fake the download piece + /// from the local peer, just collect the metrics. #[instrument(skip_all)] pub fn download_from_local_peer(&self, task_id: &str, length: u64) { collect_download_piece_traffic_metrics( @@ -356,7 +356,7 @@ impl Piece { ); } - // download_from_remote_peer downloads a single piece from a remote peer. + /// download_from_remote_peer downloads a single piece from a remote peer. #[instrument(skip_all, fields(piece_id))] pub async fn download_from_remote_peer( &self, @@ -482,7 +482,7 @@ impl Piece { }) } - // download_from_source downloads a single piece from the source. + /// download_from_source downloads a single piece from the source. #[allow(clippy::too_many_arguments)] #[instrument(skip_all, fields(piece_id))] pub async fn download_from_source( diff --git a/dragonfly-client/src/resource/piece_collector.rs b/dragonfly-client/src/resource/piece_collector.rs index 54e575a9..2cdd62e2 100644 --- a/dragonfly-client/src/resource/piece_collector.rs +++ b/dragonfly-client/src/resource/piece_collector.rs @@ -29,51 +29,51 @@ use tokio::task::JoinSet; use tokio_stream::StreamExt; use tracing::{error, info, instrument, Instrument}; -// CollectedParent is the parent peer collected from the remote peer. +/// CollectedParent is the parent peer collected from the remote peer. #[derive(Clone, Debug)] pub struct CollectedParent { - // id is the id of the parent. + /// id is the id of the parent. pub id: String, - // host is the host of the parent. + /// host is the host of the parent. pub host: Option, } -// CollectedPiece is the piece collected from a peer. +/// CollectedPiece is the piece collected from a peer. pub struct CollectedPiece { - // number is the piece number. + /// number is the piece number. pub number: u32, - // length is the piece length. + /// length is the piece length. pub length: u64, - // parent is the parent peer. + /// parent is the parent peer. pub parent: CollectedParent, } -// PieceCollector is used to collect pieces from peers. +/// PieceCollector is used to collect pieces from peers. pub struct PieceCollector { - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // host_id is the id of the host. + /// host_id is the id of the host. host_id: String, - // task_id is the id of the task. + /// task_id is the id of the task. task_id: String, - // parents is the parent peers. + /// parents is the parent peers. parents: Vec, - // interested_pieces is the pieces interested by the collector. + /// interested_pieces is the pieces interested by the collector. interested_pieces: Vec, - // collected_pieces is the pieces collected from peers. + /// collected_pieces is the pieces collected from peers. collected_pieces: Arc>, } impl PieceCollector { - // new creates a new PieceCollector. + /// new creates a new PieceCollector. #[instrument(skip_all)] pub fn new( config: Arc, @@ -100,7 +100,7 @@ impl PieceCollector { } } - // run runs the piece collector. + /// run runs the piece collector. #[instrument(skip_all)] pub async fn run(&self) -> Receiver { let host_id = self.host_id.clone(); @@ -132,7 +132,7 @@ impl PieceCollector { collected_piece_rx } - // collect_from_remote_peers collects pieces from remote peers. + /// collect_from_remote_peers collects pieces from remote peers. #[instrument(skip_all)] async fn collect_from_remote_peers( host_id: String, diff --git a/dragonfly-client/src/resource/task.rs b/dragonfly-client/src/resource/task.rs index dece63cd..0b377d54 100644 --- a/dragonfly-client/src/resource/task.rs +++ b/dragonfly-client/src/resource/task.rs @@ -64,30 +64,30 @@ use tracing::{error, info, instrument, Instrument}; use super::*; -// Task represents a task manager. +/// Task represents a task manager. pub struct Task { - // config is the configuration of the dfdaemon. + /// config is the configuration of the dfdaemon. config: Arc, - // id_generator is the id generator. + /// id_generator is the id generator. pub id_generator: Arc, - // storage is the local storage. + /// storage is the local storage. storage: Arc, - // scheduler_client is the grpc client of the scheduler. + /// scheduler_client is the grpc client of the scheduler. pub scheduler_client: Arc, - // backend_factory is the backend factory. + /// backend_factory is the backend factory. pub backend_factory: Arc, - // piece is the piece manager. + /// piece is the piece manager. pub piece: Arc, } -// Task implements the task manager. +/// Task implements the task manager. impl Task { - // new returns a new Task. + /// new returns a new Task. #[instrument(skip_all)] pub fn new( config: Arc, @@ -114,7 +114,7 @@ impl Task { } } - // download_started updates the metadata of the task when the task downloads started. + /// download_started updates the metadata of the task when the task downloads started. #[instrument(skip_all)] pub async fn download_started( &self, @@ -208,31 +208,31 @@ impl Task { ) } - // download_finished updates the metadata of the task when the task downloads finished. + /// download_finished updates the metadata of the task when the task downloads finished. #[instrument(skip_all)] pub fn download_finished(&self, id: &str) -> ClientResult { self.storage.download_task_finished(id) } - // download_failed updates the metadata of the task when the task downloads failed. + /// download_failed updates the metadata of the task when the task downloads failed. #[instrument(skip_all)] pub async fn download_failed(&self, id: &str) -> ClientResult<()> { self.storage.download_task_failed(id).await.map(|_| ()) } - // prefetch_task_started updates the metadata of the task when the task prefetch started. + /// prefetch_task_started updates the metadata of the task when the task prefetch started. #[instrument(skip_all)] pub async fn prefetch_task_started(&self, id: &str) -> ClientResult { self.storage.prefetch_task_started(id).await } - // prefetch_task_failed updates the metadata of the task when the task prefetch failed. + /// prefetch_task_failed updates the metadata of the task when the task prefetch failed. #[instrument(skip_all)] pub async fn prefetch_task_failed(&self, id: &str) -> ClientResult { self.storage.prefetch_task_failed(id).await } - // hard_link_or_copy hard links or copies the task content to the destination. + /// hard_link_or_copy hard links or copies the task content to the destination. #[instrument(skip_all)] pub async fn hard_link_or_copy( &self, @@ -243,7 +243,7 @@ impl Task { self.storage.hard_link_or_copy_task(task, to, range).await } - // download downloads a task. + /// download downloads a task. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] pub async fn download( @@ -462,7 +462,7 @@ impl Task { Ok(()) } - // download_partial_with_scheduler downloads a partial task with scheduler. + /// download_partial_with_scheduler downloads a partial task with scheduler. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] async fn download_partial_with_scheduler( @@ -894,7 +894,7 @@ impl Task { Ok(finished_pieces) } - // download_partial_with_scheduler_from_remote_peer downloads a partial task with scheduler from a remote peer. + /// download_partial_with_scheduler_from_remote_peer downloads a partial task with scheduler from a remote peer. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] async fn download_partial_with_scheduler_from_remote_peer( @@ -1160,7 +1160,7 @@ impl Task { Ok(finished_pieces) } - // download_partial_with_scheduler_from_source downloads a partial task with scheduler from the source. + /// download_partial_with_scheduler_from_source downloads a partial task with scheduler from the source. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] async fn download_partial_with_scheduler_from_source( @@ -1407,7 +1407,7 @@ impl Task { Ok(finished_pieces) } - // download_partial_from_local_peer downloads a partial task from a local peer. + /// download_partial_from_local_peer downloads a partial task from a local peer. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] async fn download_partial_from_local_peer( @@ -1500,7 +1500,7 @@ impl Task { Ok(finished_pieces) } - // download_partial_from_source downloads a partial task from the source. + /// download_partial_from_source downloads a partial task from the source. #[allow(clippy::too_many_arguments)] #[instrument(skip_all)] async fn download_partial_from_source( @@ -1663,7 +1663,7 @@ impl Task { )) } - // stat_task returns the task metadata. + /// stat_task returns the task metadata. #[instrument(skip_all)] pub async fn stat(&self, task_id: &str, host_id: &str) -> ClientResult { let task = self @@ -1681,7 +1681,7 @@ impl Task { Ok(task) } - // Delete a task and reclaim local storage. + /// Delete a task and reclaim local storage. #[instrument(skip_all)] pub async fn delete(&self, task_id: &str, host_id: &str) -> ClientResult<()> { let task = self.storage.get_task(task_id).map_err(|err| { diff --git a/dragonfly-client/src/shutdown.rs b/dragonfly-client/src/shutdown.rs index e26d6320..04dc81d0 100644 --- a/dragonfly-client/src/shutdown.rs +++ b/dragonfly-client/src/shutdown.rs @@ -18,22 +18,22 @@ use tokio::signal::unix::{signal, SignalKind}; use tokio::sync::broadcast; use tracing::info; -// Shutdown is a signal to shutdown. +/// Shutdown is a signal to shutdown. #[derive(Debug)] pub struct Shutdown { - // is_shutdown is true if the shutdown signal has been received. + /// is_shutdown is true if the shutdown signal has been received. is_shutdown: bool, - // sender is used to send the shutdown signal. + /// sender is used to send the shutdown signal. sender: broadcast::Sender<()>, - // receiver is used to receive the shutdown signal. + /// receiver is used to receive the shutdown signal. receiver: broadcast::Receiver<()>, } -// Shutdown implements the shutdown signal. +/// Shutdown implements the shutdown signal. impl Shutdown { - // new creates a new Shutdown. + /// new creates a new Shutdown. pub fn new() -> Shutdown { let (sender, receiver) = broadcast::channel(1); Self { @@ -43,17 +43,17 @@ impl Shutdown { } } - // is_shutdown returns true if the shutdown signal has been received. + /// is_shutdown returns true if the shutdown signal has been received. pub fn is_shutdown(&self) -> bool { self.is_shutdown } - // trigger triggers the shutdown signal. + /// trigger triggers the shutdown signal. pub fn trigger(&self) { let _ = self.sender.send(()); } - // recv waits for the shutdown signal. + /// recv waits for the shutdown signal. pub async fn recv(&mut self) { // Return immediately if the shutdown signal has already been received. if self.is_shutdown { @@ -76,9 +76,9 @@ impl Default for Shutdown { } } -// Clone implements the Clone trait. +/// Clone implements the Clone trait. impl Clone for Shutdown { - // clone returns a new Shutdown. + /// clone returns a new Shutdown. fn clone(&self) -> Self { let sender = self.sender.clone(); let receiver = self.sender.subscribe(); @@ -90,8 +90,8 @@ impl Clone for Shutdown { } } -// shutdown_signal returns a future that will resolve when a SIGINT, SIGTERM or SIGQUIT signal is -// received by the process. +/// shutdown_signal returns a future that will resolve when a SIGINT, SIGTERM or SIGQUIT signal is +/// received by the process. pub async fn shutdown_signal() { let mut sigint = signal(SignalKind::interrupt()).unwrap(); let mut sigterm = signal(SignalKind::terminate()).unwrap(); diff --git a/dragonfly-client/src/stats/mod.rs b/dragonfly-client/src/stats/mod.rs index f98353ed..0960d11e 100644 --- a/dragonfly-client/src/stats/mod.rs +++ b/dragonfly-client/src/stats/mod.rs @@ -24,24 +24,24 @@ use tokio::sync::mpsc; use tracing::{error, info, instrument}; use warp::{Filter, Rejection, Reply}; -// DEFAULT_PROFILER_SECONDS is the default seconds to start profiling. +/// DEFAULT_PROFILER_SECONDS is the default seconds to start profiling. const DEFAULT_PROFILER_SECONDS: u64 = 10; -// DEFAULT_PROFILER_FREQUENCY is the default frequency to start profiling. +/// DEFAULT_PROFILER_FREQUENCY is the default frequency to start profiling. const DEFAULT_PROFILER_FREQUENCY: i32 = 1000; -// PProfProfileQueryParams is the query params to start profiling. +/// PProfProfileQueryParams is the query params to start profiling. #[derive(Deserialize, Serialize)] #[serde(default)] pub struct PProfProfileQueryParams { - // seconds is the seconds to start profiling. + /// seconds is the seconds to start profiling. pub seconds: u64, - // frequency is the frequency to start profiling. + /// frequency is the frequency to start profiling. pub frequency: i32, } -// PProfProfileQueryParams implements the default. +/// PProfProfileQueryParams implements the default. impl Default for PProfProfileQueryParams { fn default() -> Self { Self { @@ -51,22 +51,22 @@ impl Default for PProfProfileQueryParams { } } -// Stats is the stats server. +/// Stats is the stats server. #[derive(Debug)] pub struct Stats { - // addr is the address of the stats server. + /// addr is the address of the stats server. addr: SocketAddr, - // shutdown is used to shutdown the stats server. + /// shutdown is used to shutdown the stats server. shutdown: shutdown::Shutdown, - // _shutdown_complete is used to notify the stats server is shutdown. + /// _shutdown_complete is used to notify the stats server is shutdown. _shutdown_complete: mpsc::UnboundedSender<()>, } -// Stats implements the stats server. +/// Stats implements the stats server. impl Stats { - // new creates a new Stats. + /// new creates a new Stats. #[instrument(skip_all)] pub fn new( addr: SocketAddr, @@ -80,7 +80,7 @@ impl Stats { } } - // run starts the stats server. + /// run starts the stats server. #[instrument(skip_all)] pub async fn run(&self) { // Clone the shutdown channel. @@ -114,7 +114,7 @@ impl Stats { } } - // stats_handler handles the stats request. + /// stats_handler handles the stats request. #[instrument(skip_all)] async fn pprof_profile_handler( query_params: PProfProfileQueryParams, @@ -149,7 +149,7 @@ impl Stats { Ok(body) } - // pprof_heap_handler handles the pprof heap request. + /// pprof_heap_handler handles the pprof heap request. #[instrument(skip_all)] async fn pprof_heap_handler() -> Result { info!("start heap profiling"); diff --git a/dragonfly-client/src/tracing/mod.rs b/dragonfly-client/src/tracing/mod.rs index eb66ce3b..546a952d 100644 --- a/dragonfly-client/src/tracing/mod.rs +++ b/dragonfly-client/src/tracing/mod.rs @@ -31,7 +31,7 @@ use tracing_subscriber::{ EnvFilter, Registry, }; -// init_tracing initializes the tracing system. +/// init_tracing initializes the tracing system. #[allow(clippy::too_many_arguments)] pub fn init_tracing( name: &str, @@ -141,7 +141,7 @@ pub fn init_tracing( guards } -// redirect_stderr_to_file redirects stderr to a file. +/// redirect_stderr_to_file redirects stderr to a file. fn redirect_stderr_to_file(log_dir: PathBuf) { let log_path = log_dir.join("stderr.log"); let file = OpenOptions::new()