refactor: change comments to rust style (#742)

Signed-off-by: Gaius <gaius.qi@gmail.com>
This commit is contained in:
Gaius 2024-09-20 20:15:51 +08:00 committed by GitHub
parent d9f15bdbfe
commit e2209a8e61
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
55 changed files with 1257 additions and 1256 deletions

View File

@ -17,39 +17,39 @@
use dragonfly_client_backend::{Backend, Body, GetRequest, GetResponse, HeadRequest, HeadResponse}; use dragonfly_client_backend::{Backend, Body, GetRequest, GetResponse, HeadRequest, HeadResponse};
use dragonfly_client_core::{Error, Result}; use dragonfly_client_core::{Error, Result};
// Hdfs is a struct that implements the Backend trait /// Hdfs is a struct that implements the Backend trait
struct Hdfs; struct Hdfs;
// Hdfs implements the Backend trait /// Hdfs implements the Backend trait
impl Hdfs { impl Hdfs {
pub fn new() -> Self { pub fn new() -> Self {
Self {} Self {}
} }
} }
// Implement the Backend trait for Hdfs. /// Implement the Backend trait for Hdfs.
#[tonic::async_trait] #[tonic::async_trait]
impl Backend for Hdfs { impl Backend for Hdfs {
// scheme returns the scheme of the backend. /// scheme returns the scheme of the backend.
fn scheme(&self) -> String { fn scheme(&self) -> String {
"hdfs".to_string() "hdfs".to_string()
} }
// head is an async function that takes a HeadRequest and returns a HeadResponse. /// head is an async function that takes a HeadRequest and returns a HeadResponse.
async fn head(&self, request: HeadRequest) -> Result<HeadResponse> { async fn head(&self, request: HeadRequest) -> Result<HeadResponse> {
println!("HDFS head url: {}", request.url); println!("HDFS head url: {}", request.url);
Err(Error::Unimplemented) Err(Error::Unimplemented)
} }
// get is an async function that takes a GetRequest and returns a GetResponse. /// get is an async function that takes a GetRequest and returns a GetResponse.
async fn get(&self, request: GetRequest) -> Result<GetResponse<Body>> { async fn get(&self, request: GetRequest) -> Result<GetResponse<Body>> {
println!("HDFS get url: {}", request.url); println!("HDFS get url: {}", request.url);
Err(Error::Unimplemented) Err(Error::Unimplemented)
} }
} }
// register_plugin is a function that returns a Box<dyn Backend + Send + Sync>. /// register_plugin is a function that returns a Box<dyn Backend + Send + Sync>.
// This function is used to register the HDFS plugin to the Backend. /// This function is used to register the HDFS plugin to the Backend.
#[no_mangle] #[no_mangle]
pub fn register_plugin() -> Box<dyn Backend + Send + Sync> { pub fn register_plugin() -> Box<dyn Backend + Send + Sync> {
Box::new(Hdfs::new()) Box::new(Hdfs::new())

View File

@ -22,15 +22,15 @@ use std::io::{Error as IOError, ErrorKind};
use tokio_util::io::StreamReader; use tokio_util::io::StreamReader;
use tracing::{error, info, instrument}; use tracing::{error, info, instrument};
// HTTP is the HTTP backend. /// HTTP is the HTTP backend.
pub struct HTTP { pub struct HTTP {
// scheme is the scheme of the HTTP backend. /// scheme is the scheme of the HTTP backend.
scheme: String, scheme: String,
} }
// HTTP implements the http interface. /// HTTP implements the http interface.
impl HTTP { impl HTTP {
// new returns a new HTTP. /// new returns a new HTTP.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new(scheme: &str) -> HTTP { pub fn new(scheme: &str) -> HTTP {
Self { Self {
@ -38,7 +38,7 @@ impl HTTP {
} }
} }
// client returns a new reqwest client. /// client returns a new reqwest client.
#[instrument(skip_all)] #[instrument(skip_all)]
fn client( fn client(
&self, &self,
@ -68,16 +68,16 @@ impl HTTP {
} }
} }
// Backend implements the Backend trait. /// Backend implements the Backend trait.
#[tonic::async_trait] #[tonic::async_trait]
impl super::Backend for HTTP { impl super::Backend for HTTP {
// scheme returns the scheme of the HTTP backend. /// scheme returns the scheme of the HTTP backend.
#[instrument(skip_all)] #[instrument(skip_all)]
fn scheme(&self) -> String { fn scheme(&self) -> String {
self.scheme.clone() self.scheme.clone()
} }
// head gets the header of the request. /// head gets the header of the request.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn head(&self, request: super::HeadRequest) -> Result<super::HeadResponse> { async fn head(&self, request: super::HeadRequest) -> Result<super::HeadResponse> {
info!( info!(
@ -124,7 +124,7 @@ impl super::Backend for HTTP {
}) })
} }
// get gets the content of the request. /// get gets the content of the request.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn get(&self, request: super::GetRequest) -> Result<super::GetResponse<super::Body>> { async fn get(&self, request: super::GetRequest) -> Result<super::GetResponse<super::Body>> {
info!( info!(
@ -171,9 +171,9 @@ impl super::Backend for HTTP {
} }
} }
// Default implements the Default trait. /// Default implements the Default trait.
impl Default for HTTP { impl Default for HTTP {
// default returns a new default HTTP. /// default returns a new default HTTP.
fn default() -> Self { fn default() -> Self {
Self::new("http") Self::new("http")
} }

View File

@ -32,104 +32,104 @@ use url::Url;
pub mod http; pub mod http;
pub mod object_storage; pub mod object_storage;
// NAME is the name of the package. /// NAME is the name of the package.
pub const NAME: &str = "backend"; pub const NAME: &str = "backend";
// Body is the body of the response. /// Body is the body of the response.
pub type Body = Box<dyn AsyncRead + Send + Unpin>; pub type Body = Box<dyn AsyncRead + Send + Unpin>;
// HeadRequest is the head request for backend. /// HeadRequest is the head request for backend.
pub struct HeadRequest { pub struct HeadRequest {
// task_id is the id of the task. /// task_id is the id of the task.
pub task_id: String, pub task_id: String,
// url is the url of the request. /// url is the url of the request.
pub url: String, pub url: String,
// http_header is the headers of the request. /// http_header is the headers of the request.
pub http_header: Option<HeaderMap>, pub http_header: Option<HeaderMap>,
// timeout is the timeout of the request. /// timeout is the timeout of the request.
pub timeout: Duration, pub timeout: Duration,
// client_certs is the client certificates for the request. /// client_certs is the client certificates for the request.
pub client_certs: Option<Vec<CertificateDer<'static>>>, pub client_certs: Option<Vec<CertificateDer<'static>>>,
// object_storage is the object storage related information. /// object_storage is the object storage related information.
pub object_storage: Option<ObjectStorage>, pub object_storage: Option<ObjectStorage>,
} }
// HeadResponse is the head response for backend. /// HeadResponse is the head response for backend.
#[derive(Debug)] #[derive(Debug)]
pub struct HeadResponse { pub struct HeadResponse {
// success is the success of the response. /// success is the success of the response.
pub success: bool, pub success: bool,
// content_length is the content length of the response. /// content_length is the content length of the response.
pub content_length: Option<u64>, pub content_length: Option<u64>,
// http_header is the headers of the response. /// http_header is the headers of the response.
pub http_header: Option<HeaderMap>, pub http_header: Option<HeaderMap>,
// http_status_code is the status code of the response. /// http_status_code is the status code of the response.
pub http_status_code: Option<reqwest::StatusCode>, pub http_status_code: Option<reqwest::StatusCode>,
// Entries is the information of the entries in the directory. /// Entries is the information of the entries in the directory.
pub entries: Vec<DirEntry>, pub entries: Vec<DirEntry>,
// error_message is the error message of the response. /// error_message is the error message of the response.
pub error_message: Option<String>, pub error_message: Option<String>,
} }
// GetRequest is the get request for backend. /// GetRequest is the get request for backend.
pub struct GetRequest { pub struct GetRequest {
// task_id is the id of the task. /// task_id is the id of the task.
pub task_id: String, pub task_id: String,
// piece_id is the id of the piece. /// piece_id is the id of the piece.
pub piece_id: String, pub piece_id: String,
// url is the url of the request. /// url is the url of the request.
pub url: String, pub url: String,
// range is the range of the request. /// range is the range of the request.
pub range: Option<Range>, pub range: Option<Range>,
// http_header is the headers of the request. /// http_header is the headers of the request.
pub http_header: Option<HeaderMap>, pub http_header: Option<HeaderMap>,
// timeout is the timeout of the request. /// timeout is the timeout of the request.
pub timeout: Duration, pub timeout: Duration,
// client_certs is the client certificates for the request. /// client_certs is the client certificates for the request.
pub client_certs: Option<Vec<CertificateDer<'static>>>, pub client_certs: Option<Vec<CertificateDer<'static>>>,
// the object storage related information. /// the object storage related information.
pub object_storage: Option<ObjectStorage>, pub object_storage: Option<ObjectStorage>,
} }
// GetResponse is the get response for backend. /// GetResponse is the get response for backend.
pub struct GetResponse<R> pub struct GetResponse<R>
where where
R: AsyncRead + Unpin, R: AsyncRead + Unpin,
{ {
// success is the success of the response. /// success is the success of the response.
pub success: bool, pub success: bool,
// http_header is the headers of the response. /// http_header is the headers of the response.
pub http_header: Option<HeaderMap>, pub http_header: Option<HeaderMap>,
// http_status_code is the status code of the response. /// http_status_code is the status code of the response.
pub http_status_code: Option<reqwest::StatusCode>, pub http_status_code: Option<reqwest::StatusCode>,
// body is the content of the response. /// body is the content of the response.
pub reader: R, pub reader: R,
// error_message is the error message of the response. /// error_message is the error message of the response.
pub error_message: Option<String>, pub error_message: Option<String>,
} }
// GetResponse implements the response functions. /// GetResponse implements the response functions.
impl<R> GetResponse<R> impl<R> GetResponse<R>
where where
R: AsyncRead + Unpin, R: AsyncRead + Unpin,
@ -146,64 +146,64 @@ where
/// The File Entry of a directory, including some relevant file metadata. /// The File Entry of a directory, including some relevant file metadata.
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub struct DirEntry { pub struct DirEntry {
// url is the url of the entry. /// url is the url of the entry.
pub url: String, pub url: String,
// content_length is the content length of the entry. /// content_length is the content length of the entry.
pub content_length: usize, pub content_length: usize,
// is_dir is the flag of the entry is a directory. /// is_dir is the flag of the entry is a directory.
pub is_dir: bool, pub is_dir: bool,
} }
// Backend is the interface of the backend. /// Backend is the interface of the backend.
#[tonic::async_trait] #[tonic::async_trait]
pub trait Backend { pub trait Backend {
// scheme returns the scheme of the backend. /// scheme returns the scheme of the backend.
fn scheme(&self) -> String; fn scheme(&self) -> String;
// head gets the header of the request. /// head gets the header of the request.
async fn head(&self, request: HeadRequest) -> Result<HeadResponse>; async fn head(&self, request: HeadRequest) -> Result<HeadResponse>;
// get gets the content of the request. /// get gets the content of the request.
async fn get(&self, request: GetRequest) -> Result<GetResponse<Body>>; async fn get(&self, request: GetRequest) -> Result<GetResponse<Body>>;
} }
// BackendFactory is the factory of the backend. /// BackendFactory is the factory of the backend.
#[derive(Default)] #[derive(Default)]
pub struct BackendFactory { pub struct BackendFactory {
// backends is the backends of the factory, including the plugin backends and /// backends is the backends of the factory, including the plugin backends and
// the builtin backends. /// the builtin backends.
backends: HashMap<String, Box<dyn Backend + Send + Sync>>, backends: HashMap<String, Box<dyn Backend + Send + Sync>>,
// libraries is used to store the plugin's dynamic library, because when not saving the `Library`, /// libraries is used to store the plugin's dynamic library, because when not saving the `Library`,
// it will drop when out of scope, resulting in the null pointer error. /// it will drop when out of scope, resulting in the null pointer error.
libraries: Vec<Library>, libraries: Vec<Library>,
} }
// BackendFactory implements the factory of the backend. It supports loading builtin /// BackendFactory implements the factory of the backend. It supports loading builtin
// backends and plugin backends. /// backends and plugin backends.
// ///
// The builtin backends are http, https, etc, which are implemented /// The builtin backends are http, https, etc, which are implemented
// by the HTTP struct. /// by the HTTP struct.
// ///
// The plugin backends are shared libraries, which are loaded /// The plugin backends are shared libraries, which are loaded
// by the `register_plugin` function. The file name of the shared /// by the `register_plugin` function. The file name of the shared
// library is the scheme of the backend. The shared library /// library is the scheme of the backend. The shared library
// should implement the Backend trait. Default plugin directory /// should implement the Backend trait. Default plugin directory
// is `/var/lib/dragonfly/plugins/` in linux and `~/.dragonfly/plugins` /// is `/var/lib/dragonfly/plugins/` in linux and `~/.dragonfly/plugins`
// in macos. The plugin directory can be set by the dfdaemon configuration. /// in macos. The plugin directory can be set by the dfdaemon configuration.
// ///
// For example: /// For example:
// If implement a plugin backend named `hdfs`, the shared library /// If implement a plugin backend named `hdfs`, the shared library
// should be named `libhdfs.so` or `libhdfs.dylib` and move the file to the backend plugin directory /// should be named `libhdfs.so` or `libhdfs.dylib` and move the file to the backend plugin directory
// `/var/lib/dragonfly/plugins/backend/` in linux or `~/.dragonfly/plugins/backend/` /// `/var/lib/dragonfly/plugins/backend/` in linux or `~/.dragonfly/plugins/backend/`
// in macos. When the dfdaemon starts, it will load the `hdfs` plugin backend in the /// in macos. When the dfdaemon starts, it will load the `hdfs` plugin backend in the
// backend plugin directory. So the dfdaemon or dfget can use the `hdfs` plugin backend /// backend plugin directory. So the dfdaemon or dfget can use the `hdfs` plugin backend
// to download the file by the url `hdfs://example.com/file`. /// to download the file by the url `hdfs://example.com/file`.
// The backend plugin implementation can refer to /// The backend plugin implementation can refer to
// https://github.com/dragonflyoss/client/tree/main/dragonfly-client-backend/examples/plugin/. /// https://github.com/dragonflyoss/client/tree/main/dragonfly-client-backend/examples/plugin/.
impl BackendFactory { impl BackendFactory {
// new returns a new BackendFactory. /// new returns a new BackendFactory.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new(plugin_dir: Option<&Path>) -> Result<Self> { pub fn new(plugin_dir: Option<&Path>) -> Result<Self> {
let mut backend_factory = Self::default(); let mut backend_factory = Self::default();
@ -220,7 +220,7 @@ impl BackendFactory {
Ok(backend_factory) Ok(backend_factory)
} }
// build returns the backend by the scheme of the url. /// build returns the backend by the scheme of the url.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn build(&self, url: &str) -> Result<&(dyn Backend + Send + Sync)> { pub fn build(&self, url: &str) -> Result<&(dyn Backend + Send + Sync)> {
let url = Url::parse(url).or_err(ErrorType::ParseError)?; let url = Url::parse(url).or_err(ErrorType::ParseError)?;
@ -231,7 +231,7 @@ impl BackendFactory {
.ok_or(Error::InvalidParameter) .ok_or(Error::InvalidParameter)
} }
// load_builtin_backends loads the builtin backends. /// load_builtin_backends loads the builtin backends.
#[instrument(skip_all)] #[instrument(skip_all)]
fn load_builtin_backends(&mut self) { fn load_builtin_backends(&mut self) {
self.backends self.backends
@ -291,7 +291,7 @@ impl BackendFactory {
info!("load [cos] builtin backend"); info!("load [cos] builtin backend");
} }
// load_plugin_backends loads the plugin backends. /// load_plugin_backends loads the plugin backends.
#[instrument(skip_all)] #[instrument(skip_all)]
fn load_plugin_backends(&mut self, plugin_dir: &Path) -> Result<()> { fn load_plugin_backends(&mut self, plugin_dir: &Path) -> Result<()> {
let backend_plugin_dir = plugin_dir.join(NAME); let backend_plugin_dir = plugin_dir.join(NAME);

View File

@ -27,31 +27,31 @@ use tokio_util::io::StreamReader;
use tracing::{error, info, instrument}; use tracing::{error, info, instrument};
use url::Url; use url::Url;
// Scheme is the scheme of the object storage. /// Scheme is the scheme of the object storage.
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Scheme { pub enum Scheme {
// S3 is the Amazon Simple Storage Service. /// S3 is the Amazon Simple Storage Service.
S3, S3,
// GCS is the Google Cloud Storage Service. /// GCS is the Google Cloud Storage Service.
GCS, GCS,
// ABS is the Azure Blob Storage Service. /// ABS is the Azure Blob Storage Service.
ABS, ABS,
// OSS is the Aliyun Object Storage Service. /// OSS is the Aliyun Object Storage Service.
OSS, OSS,
// OBS is the Huawei Cloud Object Storage Service. /// OBS is the Huawei Cloud Object Storage Service.
OBS, OBS,
// COS is the Tencent Cloud Object Storage Service. /// COS is the Tencent Cloud Object Storage Service.
COS, COS,
} }
// Scheme implements the Display. /// Scheme implements the Display.
impl fmt::Display for Scheme { impl fmt::Display for Scheme {
// fmt formats the value using the given formatter. /// fmt formats the value using the given formatter.
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
Scheme::S3 => write!(f, "s3"), Scheme::S3 => write!(f, "s3"),
@ -64,11 +64,11 @@ impl fmt::Display for Scheme {
} }
} }
// Scheme implements the FromStr. /// Scheme implements the FromStr.
impl FromStr for Scheme { impl FromStr for Scheme {
type Err = String; type Err = String;
// from_str parses an scheme string. /// from_str parses an scheme string.
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
match s { match s {
"s3" => Ok(Scheme::S3), "s3" => Ok(Scheme::S3),
@ -82,30 +82,30 @@ impl FromStr for Scheme {
} }
} }
// ParsedURL is a struct that contains the parsed URL, bucket, and path. /// ParsedURL is a struct that contains the parsed URL, bucket, and path.
#[derive(Debug)] #[derive(Debug)]
pub struct ParsedURL { pub struct ParsedURL {
// url is the requested URL of the object storage. /// url is the requested URL of the object storage.
pub url: Url, pub url: Url,
// scheme is the scheme of the object storage. /// scheme is the scheme of the object storage.
pub scheme: Scheme, pub scheme: Scheme,
// bucket is the bucket of the object storage. /// bucket is the bucket of the object storage.
pub bucket: String, pub bucket: String,
// key is the key of the object storage. /// key is the key of the object storage.
pub key: String, pub key: String,
} }
// ParsedURL implements the ParsedURL trait. /// ParsedURL implements the ParsedURL trait.
impl ParsedURL { impl ParsedURL {
// is_dir returns true if the URL path ends with a slash. /// is_dir returns true if the URL path ends with a slash.
pub fn is_dir(&self) -> bool { pub fn is_dir(&self) -> bool {
self.url.path().ends_with('/') self.url.path().ends_with('/')
} }
// make_url_by_entry_path makes a URL by the entry path when the URL is a directory. /// make_url_by_entry_path makes a URL by the entry path when the URL is a directory.
pub fn make_url_by_entry_path(&self, entry_path: &str) -> Url { pub fn make_url_by_entry_path(&self, entry_path: &str) -> Url {
let mut url = self.url.clone(); let mut url = self.url.clone();
url.set_path(entry_path); url.set_path(entry_path);
@ -113,13 +113,13 @@ impl ParsedURL {
} }
} }
// ParsedURL implements the TryFrom trait for the URL. /// ParsedURL implements the TryFrom trait for the URL.
// ///
// The object storage URL should be in the format of `scheme://<bucket>/<path>`. /// The object storage URL should be in the format of `scheme://<bucket>/<path>`.
impl TryFrom<Url> for ParsedURL { impl TryFrom<Url> for ParsedURL {
type Error = ClientError; type Error = ClientError;
// try_from parses the URL and returns a ParsedURL. /// try_from parses the URL and returns a ParsedURL.
fn try_from(url: Url) -> Result<Self, Self::Error> { fn try_from(url: Url) -> Result<Self, Self::Error> {
// Get the bucket from the URL host. // Get the bucket from the URL host.
let bucket = url let bucket = url
@ -150,7 +150,7 @@ impl TryFrom<Url> for ParsedURL {
} }
} }
// make_need_fields_message makes a message for the need fields in the object storage. /// make_need_fields_message makes a message for the need fields in the object storage.
macro_rules! make_need_fields_message { macro_rules! make_need_fields_message {
($var:ident {$($field:ident),*}) => {{ ($var:ident {$($field:ident),*}) => {{
let mut need_fields: Vec<&'static str> = vec![]; let mut need_fields: Vec<&'static str> = vec![];
@ -165,21 +165,21 @@ macro_rules! make_need_fields_message {
}}; }};
} }
// ObjectStorage is a struct that implements the backend trait. /// ObjectStorage is a struct that implements the backend trait.
pub struct ObjectStorage { pub struct ObjectStorage {
// scheme is the scheme of the object storage. /// scheme is the scheme of the object storage.
scheme: Scheme, scheme: Scheme,
} }
// ObjectStorage implements the ObjectStorage trait. /// ObjectStorage implements the ObjectStorage trait.
impl ObjectStorage { impl ObjectStorage {
// Returns ObjectStorage that implements the Backend trait. /// Returns ObjectStorage that implements the Backend trait.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new(scheme: Scheme) -> ObjectStorage { pub fn new(scheme: Scheme) -> ObjectStorage {
Self { scheme } Self { scheme }
} }
// operator initializes the operator with the parsed URL and object storage. /// operator initializes the operator with the parsed URL and object storage.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn operator( pub fn operator(
&self, &self,
@ -206,7 +206,7 @@ impl ObjectStorage {
} }
} }
// s3_operator initializes the S3 operator with the parsed URL and object storage. /// s3_operator initializes the S3 operator with the parsed URL and object storage.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn s3_operator( pub fn s3_operator(
&self, &self,
@ -260,7 +260,7 @@ impl ObjectStorage {
Ok(Operator::new(builder)?.finish()) Ok(Operator::new(builder)?.finish())
} }
// gcs_operator initializes the GCS operator with the parsed URL and object storage. /// gcs_operator initializes the GCS operator with the parsed URL and object storage.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn gcs_operator( pub fn gcs_operator(
&self, &self,
@ -296,7 +296,7 @@ impl ObjectStorage {
Ok(Operator::new(builder)?.finish()) Ok(Operator::new(builder)?.finish())
} }
// abs_operator initializes the ABS operator with the parsed URL and object storage. /// abs_operator initializes the ABS operator with the parsed URL and object storage.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn abs_operator( pub fn abs_operator(
&self, &self,
@ -340,7 +340,7 @@ impl ObjectStorage {
Ok(Operator::new(builder)?.finish()) Ok(Operator::new(builder)?.finish())
} }
// oss_operator initializes the OSS operator with the parsed URL and object storage. /// oss_operator initializes the OSS operator with the parsed URL and object storage.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn oss_operator( pub fn oss_operator(
&self, &self,
@ -385,7 +385,7 @@ impl ObjectStorage {
Ok(Operator::new(builder)?.finish()) Ok(Operator::new(builder)?.finish())
} }
// obs_operator initializes the OBS operator with the parsed URL and object storage. /// obs_operator initializes the OBS operator with the parsed URL and object storage.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn obs_operator( pub fn obs_operator(
&self, &self,
@ -429,7 +429,7 @@ impl ObjectStorage {
Ok(Operator::new(builder)?.finish()) Ok(Operator::new(builder)?.finish())
} }
// cos_operator initializes the COS operator with the parsed URL and object storage. /// cos_operator initializes the COS operator with the parsed URL and object storage.
pub fn cos_operator( pub fn cos_operator(
&self, &self,
parsed_url: &super::object_storage::ParsedURL, parsed_url: &super::object_storage::ParsedURL,
@ -473,16 +473,16 @@ impl ObjectStorage {
} }
} }
// Backend implements the Backend trait. /// Backend implements the Backend trait.
#[tonic::async_trait] #[tonic::async_trait]
impl crate::Backend for ObjectStorage { impl crate::Backend for ObjectStorage {
// scheme returns the scheme of the object storage. /// scheme returns the scheme of the object storage.
#[instrument(skip_all)] #[instrument(skip_all)]
fn scheme(&self) -> String { fn scheme(&self) -> String {
self.scheme.to_string() self.scheme.to_string()
} }
//head gets the header of the request. /// head gets the header of the request.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn head(&self, request: super::HeadRequest) -> ClientResult<super::HeadResponse> { async fn head(&self, request: super::HeadRequest) -> ClientResult<super::HeadResponse> {
info!( info!(
@ -568,7 +568,7 @@ impl crate::Backend for ObjectStorage {
}) })
} }
// Returns content of requested file. /// Returns content of requested file.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn get( async fn get(
&self, &self,

View File

@ -16,16 +16,16 @@
use std::path::PathBuf; use std::path::PathBuf;
// NAME is the name of dfcache. /// NAME is the name of dfcache.
pub const NAME: &str = "dfcache"; pub const NAME: &str = "dfcache";
// default_dfcache_log_dir is the default log directory for dfcache. /// default_dfcache_log_dir is the default log directory for dfcache.
#[inline] #[inline]
pub fn default_dfcache_log_dir() -> PathBuf { pub fn default_dfcache_log_dir() -> PathBuf {
crate::default_log_dir().join(NAME) crate::default_log_dir().join(NAME)
} }
// default_dfcache_persistent_replica_count is the default replica count of the persistent cache task. /// default_dfcache_persistent_replica_count is the default replica count of the persistent cache task.
#[inline] #[inline]
pub fn default_dfcache_persistent_replica_count() -> u64 { pub fn default_dfcache_persistent_replica_count() -> u64 {
2 2

File diff suppressed because it is too large Load Diff

View File

@ -16,10 +16,10 @@
use std::path::PathBuf; use std::path::PathBuf;
// NAME is the name of dfget. /// NAME is the name of dfget.
pub const NAME: &str = "dfget"; pub const NAME: &str = "dfget";
// default_dfget_log_dir is the default log directory for dfget. /// default_dfget_log_dir is the default log directory for dfget.
pub fn default_dfget_log_dir() -> PathBuf { pub fn default_dfget_log_dir() -> PathBuf {
crate::default_log_dir().join(NAME) crate::default_log_dir().join(NAME)
} }

View File

@ -24,40 +24,40 @@ use std::path::PathBuf;
use tracing::{info, instrument}; use tracing::{info, instrument};
use validator::Validate; use validator::Validate;
// NAME is the name of dfinit. /// NAME is the name of dfinit.
pub const NAME: &str = "dfinit"; pub const NAME: &str = "dfinit";
// default_dfinit_config_path is the default config path for dfinit. /// default_dfinit_config_path is the default config path for dfinit.
#[inline] #[inline]
pub fn default_dfinit_config_path() -> PathBuf { pub fn default_dfinit_config_path() -> PathBuf {
crate::default_config_dir().join("dfinit.yaml") crate::default_config_dir().join("dfinit.yaml")
} }
// default_dfinit_log_dir is the default log directory for dfinit. /// default_dfinit_log_dir is the default log directory for dfinit.
pub fn default_dfinit_log_dir() -> PathBuf { pub fn default_dfinit_log_dir() -> PathBuf {
crate::default_log_dir().join(NAME) crate::default_log_dir().join(NAME)
} }
// default_container_runtime_containerd_config_path is the default containerd configuration path. /// default_container_runtime_containerd_config_path is the default containerd configuration path.
#[inline] #[inline]
fn default_container_runtime_containerd_config_path() -> PathBuf { fn default_container_runtime_containerd_config_path() -> PathBuf {
PathBuf::from("/etc/containerd/config.toml") PathBuf::from("/etc/containerd/config.toml")
} }
// default_container_runtime_docker_config_path is the default docker configuration path. /// default_container_runtime_docker_config_path is the default docker configuration path.
#[inline] #[inline]
fn default_container_runtime_docker_config_path() -> PathBuf { fn default_container_runtime_docker_config_path() -> PathBuf {
PathBuf::from("/etc/docker/daemon.json") PathBuf::from("/etc/docker/daemon.json")
} }
// default_container_runtime_crio_config_path is the default cri-o configuration path. /// default_container_runtime_crio_config_path is the default cri-o configuration path.
#[inline] #[inline]
fn default_container_runtime_crio_config_path() -> PathBuf { fn default_container_runtime_crio_config_path() -> PathBuf {
PathBuf::from("/etc/containers/registries.conf") PathBuf::from("/etc/containers/registries.conf")
} }
// default_container_runtime_crio_unqualified_search_registries is the default unqualified search registries of cri-o, /// default_container_runtime_crio_unqualified_search_registries is the default unqualified search registries of cri-o,
// refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings. /// refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings.
#[inline] #[inline]
fn default_container_runtime_crio_unqualified_search_registries() -> Vec<String> { fn default_container_runtime_crio_unqualified_search_registries() -> Vec<String> {
vec![ vec![
@ -67,7 +67,7 @@ fn default_container_runtime_crio_unqualified_search_registries() -> Vec<String>
] ]
} }
// default_proxy_addr is the default proxy address of dfdaemon. /// default_proxy_addr is the default proxy address of dfdaemon.
#[inline] #[inline]
fn default_proxy_addr() -> String { fn default_proxy_addr() -> String {
format!( format!(
@ -77,95 +77,95 @@ fn default_proxy_addr() -> String {
) )
} }
// default_container_runtime_containerd_registry_host_capabilities is the default /// default_container_runtime_containerd_registry_host_capabilities is the default
// capabilities of the containerd registry. /// capabilities of the containerd registry.
#[inline] #[inline]
fn default_container_runtime_containerd_registry_capabilities() -> Vec<String> { fn default_container_runtime_containerd_registry_capabilities() -> Vec<String> {
vec!["pull".to_string(), "resolve".to_string()] vec!["pull".to_string(), "resolve".to_string()]
} }
// Registry is the registry configuration for containerd. /// Registry is the registry configuration for containerd.
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
#[serde(default, rename_all = "camelCase")] #[serde(default, rename_all = "camelCase")]
pub struct ContainerdRegistry { pub struct ContainerdRegistry {
// host_namespace is the location where container images and artifacts are sourced, /// host_namespace is the location where container images and artifacts are sourced,
// refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#registry-host-namespace. /// refer to https://github.com/containerd/containerd/blob/main/docs/hosts.md#registry-host-namespace.
// The registry host namespace portion is [registry_host_name|IP address][:port], such as /// The registry host namespace portion is [registry_host_name|IP address][:port], such as
// docker.io, ghcr.io, gcr.io, etc. /// docker.io, ghcr.io, gcr.io, etc.
pub host_namespace: String, pub host_namespace: String,
// server_addr specifies the default server for this registry host namespace, refer to /// server_addr specifies the default server for this registry host namespace, refer to
// https://github.com/containerd/containerd/blob/main/docs/hosts.md#server-field. /// https://github.com/containerd/containerd/blob/main/docs/hosts.md#server-field.
pub server_addr: String, pub server_addr: String,
// capabilities is the list of capabilities in containerd configuration, refer to /// capabilities is the list of capabilities in containerd configuration, refer to
// https://github.com/containerd/containerd/blob/main/docs/hosts.md#capabilities-field. /// https://github.com/containerd/containerd/blob/main/docs/hosts.md#capabilities-field.
#[serde(default = "default_container_runtime_containerd_registry_capabilities")] #[serde(default = "default_container_runtime_containerd_registry_capabilities")]
pub capabilities: Vec<String>, pub capabilities: Vec<String>,
// skip_verify is the flag to skip verifying the server's certificate, refer to /// skip_verify is the flag to skip verifying the server's certificate, refer to
// https://github.com/containerd/containerd/blob/main/docs/hosts.md#bypass-tls-verification-example. /// https://github.com/containerd/containerd/blob/main/docs/hosts.md#bypass-tls-verification-example.
pub skip_verify: Option<bool>, pub skip_verify: Option<bool>,
// ca (Certificate Authority Certification) can be set to a path or an array of paths each pointing /// ca (Certificate Authority Certification) can be set to a path or an array of paths each pointing
// to a ca file for use in authenticating with the registry namespace, refer to /// to a ca file for use in authenticating with the registry namespace, refer to
// https://github.com/containerd/containerd/blob/main/docs/hosts.md#ca-field. /// https://github.com/containerd/containerd/blob/main/docs/hosts.md#ca-field.
pub ca: Option<Vec<String>>, pub ca: Option<Vec<String>>,
} }
// Containerd is the containerd configuration for dfinit. /// Containerd is the containerd configuration for dfinit.
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
#[serde(default, rename_all = "camelCase")] #[serde(default, rename_all = "camelCase")]
pub struct Containerd { pub struct Containerd {
// config_path is the path of containerd configuration file. /// config_path is the path of containerd configuration file.
#[serde(default = "default_container_runtime_containerd_config_path")] #[serde(default = "default_container_runtime_containerd_config_path")]
pub config_path: PathBuf, pub config_path: PathBuf,
// registries is the list of containerd registries. /// registries is the list of containerd registries.
pub registries: Vec<ContainerdRegistry>, pub registries: Vec<ContainerdRegistry>,
} }
// CRIORegistry is the registry configuration for cri-o. /// CRIORegistry is the registry configuration for cri-o.
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize, PartialEq, Eq)] #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize, PartialEq, Eq)]
#[serde(default, rename_all = "camelCase")] #[serde(default, rename_all = "camelCase")]
pub struct CRIORegistry { pub struct CRIORegistry {
// prefix is the prefix of the user-specified image name, refer to /// prefix is the prefix of the user-specified image name, refer to
// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table. /// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#choosing-a-registry-toml-table.
pub prefix: String, pub prefix: String,
// location accepts the same format as the prefix field, and specifies the physical location of the prefix-rooted namespace, /// location accepts the same format as the prefix field, and specifies the physical location of the prefix-rooted namespace,
// refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#remapping-and-mirroring-registries. /// refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#remapping-and-mirroring-registries.
pub location: String, pub location: String,
} }
// CRIO is the cri-o configuration for dfinit. /// CRIO is the cri-o configuration for dfinit.
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
#[serde(default, rename_all = "camelCase")] #[serde(default, rename_all = "camelCase")]
pub struct CRIO { pub struct CRIO {
// config_path is the path of cri-o registries's configuration file. /// config_path is the path of cri-o registries's configuration file.
#[serde(default = "default_container_runtime_crio_config_path")] #[serde(default = "default_container_runtime_crio_config_path")]
pub config_path: PathBuf, pub config_path: PathBuf,
// unqualified_search_registries is an array of host[:port] registries to try when pulling an unqualified image, in order. /// unqualified_search_registries is an array of host[:port] registries to try when pulling an unqualified image, in order.
// Refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings. /// Refer to https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#global-settings.
#[serde(default = "default_container_runtime_crio_unqualified_search_registries")] #[serde(default = "default_container_runtime_crio_unqualified_search_registries")]
pub unqualified_search_registries: Vec<String>, pub unqualified_search_registries: Vec<String>,
// registries is the list of cri-o registries, refer to /// registries is the list of cri-o registries, refer to
// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#namespaced-registry-settings. /// https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md#namespaced-registry-settings.
pub registries: Vec<CRIORegistry>, pub registries: Vec<CRIORegistry>,
} }
// Docker is the docker configuration for dfinit. /// Docker is the docker configuration for dfinit.
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
#[serde(default, rename_all = "camelCase")] #[serde(default, rename_all = "camelCase")]
pub struct Docker { pub struct Docker {
// config_path is the path of docker configuration file. /// config_path is the path of docker configuration file.
#[serde(default = "default_container_runtime_docker_config_path")] #[serde(default = "default_container_runtime_docker_config_path")]
pub config_path: PathBuf, pub config_path: PathBuf,
} }
// ContainerRuntime is the container runtime configuration for dfinit. /// ContainerRuntime is the container runtime configuration for dfinit.
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
#[serde(default, rename_all = "camelCase")] #[serde(default, rename_all = "camelCase")]
pub struct ContainerRuntime { pub struct ContainerRuntime {
@ -173,7 +173,7 @@ pub struct ContainerRuntime {
pub config: Option<ContainerRuntimeConfig>, pub config: Option<ContainerRuntimeConfig>,
} }
// ContainerRuntimeConfig is the container runtime configuration for dfinit. /// ContainerRuntimeConfig is the container runtime configuration for dfinit.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum ContainerRuntimeConfig { pub enum ContainerRuntimeConfig {
Containerd(Containerd), Containerd(Containerd),
@ -181,7 +181,7 @@ pub enum ContainerRuntimeConfig {
CRIO(CRIO), CRIO(CRIO),
} }
// Serialize is the implementation of the Serialize trait for ContainerRuntimeConfig. /// Serialize is the implementation of the Serialize trait for ContainerRuntimeConfig.
impl Serialize for ContainerRuntimeConfig { impl Serialize for ContainerRuntimeConfig {
fn serialize<S>(&self, serializer: S) -> std::prelude::v1::Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> std::prelude::v1::Result<S::Ok, S::Error>
where where
@ -207,7 +207,7 @@ impl Serialize for ContainerRuntimeConfig {
} }
} }
// Deserialize is the implementation of the Deserialize trait for ContainerRuntimeConfig. /// Deserialize is the implementation of the Deserialize trait for ContainerRuntimeConfig.
impl<'de> Deserialize<'de> for ContainerRuntimeConfig { impl<'de> Deserialize<'de> for ContainerRuntimeConfig {
fn deserialize<D>(deserializer: D) -> std::prelude::v1::Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> std::prelude::v1::Result<Self, D::Error>
where where
@ -241,7 +241,7 @@ impl<'de> Deserialize<'de> for ContainerRuntimeConfig {
} }
} }
// Proxy is the proxy server configuration for dfdaemon. /// Proxy is the proxy server configuration for dfdaemon.
#[derive(Debug, Clone, Validate, Deserialize, Serialize)] #[derive(Debug, Clone, Validate, Deserialize, Serialize)]
#[serde(default, rename_all = "camelCase")] #[serde(default, rename_all = "camelCase")]
pub struct Proxy { pub struct Proxy {
@ -250,7 +250,7 @@ pub struct Proxy {
pub addr: String, pub addr: String,
} }
// Proxy implements Default. /// Proxy implements Default.
impl Default for Proxy { impl Default for Proxy {
fn default() -> Self { fn default() -> Self {
Self { Self {
@ -259,22 +259,22 @@ impl Default for Proxy {
} }
} }
// Config is the configuration for dfinit. /// Config is the configuration for dfinit.
#[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)] #[derive(Debug, Clone, Default, Validate, Deserialize, Serialize)]
#[serde(default, rename_all = "camelCase")] #[serde(default, rename_all = "camelCase")]
pub struct Config { pub struct Config {
// proxy is the configuration of the dfdaemon's HTTP/HTTPS proxy. /// proxy is the configuration of the dfdaemon's HTTP/HTTPS proxy.
#[validate] #[validate]
pub proxy: Proxy, pub proxy: Proxy,
// container_runtime is the container runtime configuration. /// container_runtime is the container runtime configuration.
#[validate] #[validate]
pub container_runtime: ContainerRuntime, pub container_runtime: ContainerRuntime,
} }
// Config implements the config operation of dfinit. /// Config implements the config operation of dfinit.
impl Config { impl Config {
// load loads configuration from file. /// load loads configuration from file.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn load(path: &PathBuf) -> Result<Config> { pub fn load(path: &PathBuf) -> Result<Config> {
// Load configuration from file. // Load configuration from file.

View File

@ -16,10 +16,10 @@
use std::path::PathBuf; use std::path::PathBuf;
// NAME is the name of dfstore. /// NAME is the name of dfstore.
pub const NAME: &str = "dfstore"; pub const NAME: &str = "dfstore";
// default_dfstore_log_dir is the default log directory for dfstore. /// default_dfstore_log_dir is the default log directory for dfstore.
pub fn default_dfstore_log_dir() -> PathBuf { pub fn default_dfstore_log_dir() -> PathBuf {
crate::default_log_dir().join(NAME) crate::default_log_dir().join(NAME)
} }

View File

@ -22,22 +22,22 @@ pub mod dfget;
pub mod dfinit; pub mod dfinit;
pub mod dfstore; pub mod dfstore;
// SERVICE_NAME is the name of the service. /// SERVICE_NAME is the name of the service.
pub const SERVICE_NAME: &str = "dragonfly"; pub const SERVICE_NAME: &str = "dragonfly";
// NAME is the name of the package. /// NAME is the name of the package.
pub const NAME: &str = "client"; pub const NAME: &str = "client";
// CARGO_PKG_VERSION is the version of the cargo package. /// CARGO_PKG_VERSION is the version of the cargo package.
pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION");
// CARGO_PKG_RUSTC_VERSION is the minimum Rust version supported by the package, not the current Rust version. /// CARGO_PKG_RUSTC_VERSION is the minimum Rust version supported by the package, not the current Rust version.
pub const CARGO_PKG_RUSTC_VERSION: &str = env!("CARGO_PKG_RUST_VERSION"); pub const CARGO_PKG_RUSTC_VERSION: &str = env!("CARGO_PKG_RUST_VERSION");
// GIT_HASH is the git hash of the package. /// GIT_HASH is the git hash of the package.
pub const GIT_HASH: Option<&str> = option_env!("GIT_HASH"); pub const GIT_HASH: Option<&str> = option_env!("GIT_HASH");
// default_root_dir is the default root directory for client. /// default_root_dir is the default root directory for client.
pub fn default_root_dir() -> PathBuf { pub fn default_root_dir() -> PathBuf {
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
return PathBuf::from("/var/run/dragonfly/"); return PathBuf::from("/var/run/dragonfly/");
@ -46,7 +46,7 @@ pub fn default_root_dir() -> PathBuf {
return home::home_dir().unwrap().join(".dragonfly"); return home::home_dir().unwrap().join(".dragonfly");
} }
// default_config_dir is the default config directory for client. /// default_config_dir is the default config directory for client.
pub fn default_config_dir() -> PathBuf { pub fn default_config_dir() -> PathBuf {
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
return PathBuf::from("/etc/dragonfly/"); return PathBuf::from("/etc/dragonfly/");
@ -55,7 +55,7 @@ pub fn default_config_dir() -> PathBuf {
return home::home_dir().unwrap().join(".dragonfly").join("config"); return home::home_dir().unwrap().join(".dragonfly").join("config");
} }
// default_log_dir is the default log directory for client. /// default_log_dir is the default log directory for client.
pub fn default_log_dir() -> PathBuf { pub fn default_log_dir() -> PathBuf {
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
return PathBuf::from("/var/log/dragonfly/"); return PathBuf::from("/var/log/dragonfly/");
@ -64,7 +64,7 @@ pub fn default_log_dir() -> PathBuf {
return home::home_dir().unwrap().join(".dragonfly").join("logs"); return home::home_dir().unwrap().join(".dragonfly").join("logs");
} }
// default_storage_dir is the default storage directory for client. /// default_storage_dir is the default storage directory for client.
pub fn default_storage_dir() -> PathBuf { pub fn default_storage_dir() -> PathBuf {
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
return PathBuf::from("/var/lib/dragonfly/"); return PathBuf::from("/var/lib/dragonfly/");
@ -73,7 +73,7 @@ pub fn default_storage_dir() -> PathBuf {
return home::home_dir().unwrap().join(".dragonfly").join("storage"); return home::home_dir().unwrap().join(".dragonfly").join("storage");
} }
// default_lock_dir is the default lock directory for client. /// default_lock_dir is the default lock directory for client.
pub fn default_lock_dir() -> PathBuf { pub fn default_lock_dir() -> PathBuf {
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
return PathBuf::from("/var/lock/dragonfly/"); return PathBuf::from("/var/lock/dragonfly/");
@ -82,7 +82,7 @@ pub fn default_lock_dir() -> PathBuf {
return home::home_dir().unwrap().join(".dragonfly"); return home::home_dir().unwrap().join(".dragonfly");
} }
// default_plugin_dir is the default plugin directory for client. /// default_plugin_dir is the default plugin directory for client.
pub fn default_plugin_dir() -> PathBuf { pub fn default_plugin_dir() -> PathBuf {
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
return PathBuf::from("/var/lib/dragonfly/plugins/"); return PathBuf::from("/var/lib/dragonfly/plugins/");
@ -91,7 +91,7 @@ pub fn default_plugin_dir() -> PathBuf {
return home::home_dir().unwrap().join(".dragonfly").join("plugins"); return home::home_dir().unwrap().join(".dragonfly").join("plugins");
} }
// default_cache_dir is the default cache directory for client. /// default_cache_dir is the default cache directory for client.
pub fn default_cache_dir() -> PathBuf { pub fn default_cache_dir() -> PathBuf {
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
return PathBuf::from("/var/cache/dragonfly/"); return PathBuf::from("/var/cache/dragonfly/");

View File

@ -18,7 +18,7 @@ use std::{error::Error as ErrorTrait, fmt};
use super::message::Message; use super::message::Message;
// ErrorType is the type of the error. /// ErrorType is the type of the error.
#[derive(Debug, PartialEq, Eq, Clone)] #[derive(Debug, PartialEq, Eq, Clone)]
pub enum ErrorType { pub enum ErrorType {
StorageError, StorageError,
@ -34,9 +34,9 @@ pub enum ErrorType {
PluginError, PluginError,
} }
// ErrorType implements the display for the error type. /// ErrorType implements the display for the error type.
impl ErrorType { impl ErrorType {
// as_str returns the string of the error type. /// as_str returns the string of the error type.
pub fn as_str(&self) -> &'static str { pub fn as_str(&self) -> &'static str {
match self { match self {
ErrorType::StorageError => "StorageError", ErrorType::StorageError => "StorageError",
@ -54,7 +54,7 @@ impl ErrorType {
} }
} }
// ExternalError is the external error. /// ExternalError is the external error.
#[derive(Debug)] #[derive(Debug)]
pub struct ExternalError { pub struct ExternalError {
pub etype: ErrorType, pub etype: ErrorType,
@ -62,9 +62,9 @@ pub struct ExternalError {
pub context: Option<Message>, pub context: Option<Message>,
} }
// ExternalError implements the error trait. /// ExternalError implements the error trait.
impl ExternalError { impl ExternalError {
// new returns a new ExternalError. /// new returns a new ExternalError.
pub fn new(etype: ErrorType) -> Self { pub fn new(etype: ErrorType) -> Self {
ExternalError { ExternalError {
etype, etype,
@ -73,19 +73,19 @@ impl ExternalError {
} }
} }
// with_context returns a new ExternalError with the context. /// with_context returns a new ExternalError with the context.
pub fn with_context(mut self, message: impl Into<Message>) -> Self { pub fn with_context(mut self, message: impl Into<Message>) -> Self {
self.context = Some(message.into()); self.context = Some(message.into());
self self
} }
// with_cause returns a new ExternalError with the cause. /// with_cause returns a new ExternalError with the cause.
pub fn with_cause(mut self, cause: Box<dyn ErrorTrait + Send + Sync>) -> Self { pub fn with_cause(mut self, cause: Box<dyn ErrorTrait + Send + Sync>) -> Self {
self.cause = Some(cause); self.cause = Some(cause);
self self
} }
// chain_display returns the display of the error with the previous error. /// chain_display returns the display of the error with the previous error.
fn chain_display( fn chain_display(
&self, &self,
previous: Option<&ExternalError>, previous: Option<&ExternalError>,
@ -112,17 +112,17 @@ impl ExternalError {
} }
} }
// ExternalError implements the display for the error. /// ExternalError implements the display for the error.
impl fmt::Display for ExternalError { impl fmt::Display for ExternalError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.chain_display(None, f) self.chain_display(None, f)
} }
} }
// ExternalError implements the error trait. /// ExternalError implements the error trait.
impl ErrorTrait for ExternalError {} impl ErrorTrait for ExternalError {}
// OrErr is the trait to extend the result with error. /// OrErr is the trait to extend the result with error.
pub trait OrErr<T, E> { pub trait OrErr<T, E> {
/// Wrap the E in [Result] with new [ErrorType] and context, the existing E will be the cause. /// Wrap the E in [Result] with new [ErrorType] and context, the existing E will be the cause.
/// ///
@ -136,7 +136,7 @@ pub trait OrErr<T, E> {
E: Into<Box<dyn ErrorTrait + Send + Sync>>; E: Into<Box<dyn ErrorTrait + Send + Sync>>;
} }
// OrErr implements the OrErr for Result. /// OrErr implements the OrErr for Result.
impl<T, E> OrErr<T, E> for Result<T, E> { impl<T, E> OrErr<T, E> for Result<T, E> {
fn or_err(self, et: ErrorType) -> Result<T, ExternalError> fn or_err(self, et: ErrorType) -> Result<T, ExternalError>
where where
@ -157,28 +157,28 @@ impl<T, E> OrErr<T, E> for Result<T, E> {
} }
} }
// BackendError is the error for backend. /// BackendError is the error for backend.
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
#[error("backend error {message}")] #[error("backend error {message}")]
pub struct BackendError { pub struct BackendError {
// message is the error message. /// message is the error message.
pub message: String, pub message: String,
// status_code is the status code of the response. /// status_code is the status code of the response.
pub status_code: Option<reqwest::StatusCode>, pub status_code: Option<reqwest::StatusCode>,
// header is the headers of the response. /// header is the headers of the response.
pub header: Option<reqwest::header::HeaderMap>, pub header: Option<reqwest::header::HeaderMap>,
} }
// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed. /// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed.
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
#[error("download piece {piece_number} from remote peer {parent_id} failed")] #[error("download piece {piece_number} from remote peer {parent_id} failed")]
pub struct DownloadFromRemotePeerFailed { pub struct DownloadFromRemotePeerFailed {
// piece_number is the number of the piece. /// piece_number is the number of the piece.
pub piece_number: u32, pub piece_number: u32,
// parent_id is the parent id of the piece. /// parent_id is the parent id of the piece.
pub parent_id: String, pub parent_id: String,
} }

View File

@ -16,29 +16,29 @@
use std::borrow::Cow; use std::borrow::Cow;
// Message is the message for the error. /// Message is the message for the error.
#[derive(Debug)] #[derive(Debug)]
pub struct Message(Cow<'static, str>); pub struct Message(Cow<'static, str>);
// From<&'static str> for Message implements the conversion from &'static str to Message. /// From<&'static str> for Message implements the conversion from &'static str to Message.
impl From<&'static str> for Message { impl From<&'static str> for Message {
// from returns the message from the string. /// from returns the message from the string.
fn from(s: &'static str) -> Self { fn from(s: &'static str) -> Self {
Message(Cow::Borrowed(s)) Message(Cow::Borrowed(s))
} }
} }
// From<String> for Message implements the conversion from String to Message. /// From<String> for Message implements the conversion from String to Message.
impl From<String> for Message { impl From<String> for Message {
// from returns the message from the string. /// from returns the message from the string.
fn from(s: String) -> Self { fn from(s: String) -> Self {
Message(Cow::Owned(s)) Message(Cow::Owned(s))
} }
} }
// Message implements the message for the error. /// Message implements the message for the error.
impl Message { impl Message {
// as_str returns the string of the message. /// as_str returns the string of the message.
pub fn as_str(&self) -> &str { pub fn as_str(&self) -> &str {
&self.0 &self.0
} }

View File

@ -23,181 +23,181 @@ pub use errors::ExternalError;
pub use errors::OrErr; pub use errors::OrErr;
pub use errors::{BackendError, DownloadFromRemotePeerFailed}; pub use errors::{BackendError, DownloadFromRemotePeerFailed};
// DFError is the error for dragonfly. /// DFError is the error for dragonfly.
#[derive(thiserror::Error, Debug)] #[derive(thiserror::Error, Debug)]
pub enum DFError { pub enum DFError {
// IO is the error for IO operation. /// IO is the error for IO operation.
#[error(transparent)] #[error(transparent)]
IO(#[from] std::io::Error), IO(#[from] std::io::Error),
// MpscSend is the error for send. /// MpscSend is the error for send.
#[error("mpsc send: {0}")] #[error("mpsc send: {0}")]
MpscSend(String), MpscSend(String),
// SendTimeout is the error for send timeout. /// SendTimeout is the error for send timeout.
#[error("send timeout")] #[error("send timeout")]
SendTimeout, SendTimeout,
// HashRing is the error for hashring. /// HashRing is the error for hashring.
#[error{"hashring {0} is failed"}] #[error{"hashring {0} is failed"}]
HashRing(String), HashRing(String),
// HostNotFound is the error when the host is not found. /// HostNotFound is the error when the host is not found.
#[error{"host {0} not found"}] #[error{"host {0} not found"}]
HostNotFound(String), HostNotFound(String),
// TaskNotFound is the error when the task is not found. /// TaskNotFound is the error when the task is not found.
#[error{"task {0} not found"}] #[error{"task {0} not found"}]
TaskNotFound(String), TaskNotFound(String),
// PieceNotFound is the error when the piece is not found. /// PieceNotFound is the error when the piece is not found.
#[error{"piece {0} not found"}] #[error{"piece {0} not found"}]
PieceNotFound(String), PieceNotFound(String),
// PieceStateIsFailed is the error when the piece state is failed. /// PieceStateIsFailed is the error when the piece state is failed.
#[error{"piece {0} state is failed"}] #[error{"piece {0} state is failed"}]
PieceStateIsFailed(String), PieceStateIsFailed(String),
// WaitForPieceFinishedTimeout is the error when the wait for piece finished timeout. /// WaitForPieceFinishedTimeout is the error when the wait for piece finished timeout.
#[error{"wait for piece {0} finished timeout"}] #[error{"wait for piece {0} finished timeout"}]
WaitForPieceFinishedTimeout(String), WaitForPieceFinishedTimeout(String),
// AvailableManagerNotFound is the error when the available manager is not found. /// AvailableManagerNotFound is the error when the available manager is not found.
#[error{"available manager not found"}] #[error{"available manager not found"}]
AvailableManagerNotFound, AvailableManagerNotFound,
// AvailableSchedulersNotFound is the error when the available schedulers is not found. /// AvailableSchedulersNotFound is the error when the available schedulers is not found.
#[error{"available schedulers not found"}] #[error{"available schedulers not found"}]
AvailableSchedulersNotFound, AvailableSchedulersNotFound,
// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed. /// DownloadFromRemotePeerFailed is the error when the download from remote peer is failed.
#[error(transparent)] #[error(transparent)]
DownloadFromRemotePeerFailed(DownloadFromRemotePeerFailed), DownloadFromRemotePeerFailed(DownloadFromRemotePeerFailed),
// ColumnFamilyNotFound is the error when the column family is not found. /// ColumnFamilyNotFound is the error when the column family is not found.
#[error{"column family {0} not found"}] #[error{"column family {0} not found"}]
ColumnFamilyNotFound(String), ColumnFamilyNotFound(String),
// InvalidStateTransition is the error when the state transition is invalid. /// InvalidStateTransition is the error when the state transition is invalid.
#[error{"can not transit from {0} to {1}"}] #[error{"can not transit from {0} to {1}"}]
InvalidStateTransition(String, String), InvalidStateTransition(String, String),
// InvalidState is the error when the state is invalid. /// InvalidState is the error when the state is invalid.
#[error{"invalid state {0}"}] #[error{"invalid state {0}"}]
InvalidState(String), InvalidState(String),
// InvalidURI is the error when the uri is invalid. /// InvalidURI is the error when the uri is invalid.
#[error("invalid uri {0}")] #[error("invalid uri {0}")]
InvalidURI(String), InvalidURI(String),
// InvalidPeer is the error when the peer is invalid. /// InvalidPeer is the error when the peer is invalid.
#[error("invalid peer {0}")] #[error("invalid peer {0}")]
InvalidPeer(String), InvalidPeer(String),
// SchedulerClientNotFound is the error when the scheduler client is not found. /// SchedulerClientNotFound is the error when the scheduler client is not found.
#[error{"scheduler client not found"}] #[error{"scheduler client not found"}]
SchedulerClientNotFound, SchedulerClientNotFound,
// UnexpectedResponse is the error when the response is unexpected. /// UnexpectedResponse is the error when the response is unexpected.
#[error{"unexpected response"}] #[error{"unexpected response"}]
UnexpectedResponse, UnexpectedResponse,
// DigestMismatch is the error when the digest is mismatch. /// DigestMismatch is the error when the digest is mismatch.
#[error{"digest mismatch expected: {0}, actual: {1}"}] #[error{"digest mismatch expected: {0}, actual: {1}"}]
DigestMismatch(String, String), DigestMismatch(String, String),
// ContentLengthMismatch is the error when the content length is mismatch. /// ContentLengthMismatch is the error when the content length is mismatch.
#[error("content length mismatch expected: {0}, actual: {1}")] #[error("content length mismatch expected: {0}, actual: {1}")]
ContentLengthMismatch(u64, u64), ContentLengthMismatch(u64, u64),
// MaxScheduleCountExceeded is the error when the max schedule count is exceeded. /// MaxScheduleCountExceeded is the error when the max schedule count is exceeded.
#[error("max schedule count {0} exceeded")] #[error("max schedule count {0} exceeded")]
MaxScheduleCountExceeded(u32), MaxScheduleCountExceeded(u32),
// InvalidContentLength is the error when the content length is invalid. /// InvalidContentLength is the error when the content length is invalid.
#[error("invalid content length")] #[error("invalid content length")]
InvalidContentLength, InvalidContentLength,
// InvalidPieceLength is the error when the piece length is invalid. /// InvalidPieceLength is the error when the piece length is invalid.
#[error("invalid piece length")] #[error("invalid piece length")]
InvalidPieceLength, InvalidPieceLength,
// InvalidParameter is the error when the parameter is invalid. /// InvalidParameter is the error when the parameter is invalid.
#[error("invalid parameter")] #[error("invalid parameter")]
InvalidParameter, InvalidParameter,
#[error(transparent)] #[error(transparent)]
Utf8(#[from] std::str::Utf8Error), Utf8(#[from] std::str::Utf8Error),
// Unknown is the error when the error is unknown. /// Unknown is the error when the error is unknown.
#[error("unknown {0}")] #[error("unknown {0}")]
Unknown(String), Unknown(String),
// Unimplemented is the error when the feature is not implemented. /// Unimplemented is the error when the feature is not implemented.
#[error{"unimplemented"}] #[error{"unimplemented"}]
Unimplemented, Unimplemented,
// EmptyHTTPRangeError is the error when the range fallback error is empty. /// EmptyHTTPRangeError is the error when the range fallback error is empty.
#[error{"RangeUnsatisfiable: Failed to parse range fallback error, please file an issue"}] #[error{"RangeUnsatisfiable: Failed to parse range fallback error, please file an issue"}]
EmptyHTTPRangeError, EmptyHTTPRangeError,
// TonicStatus is the error for tonic status. /// TonicStatus is the error for tonic status.
#[error(transparent)] #[error(transparent)]
TonicStatus(#[from] tonic::Status), TonicStatus(#[from] tonic::Status),
// TonicStreamElapsed is the error for tonic stream elapsed. /// TonicStreamElapsed is the error for tonic stream elapsed.
#[error(transparent)] #[error(transparent)]
TokioStreamElapsed(#[from] tokio_stream::Elapsed), TokioStreamElapsed(#[from] tokio_stream::Elapsed),
// ReqwestError is the error for reqwest. /// ReqwestError is the error for reqwest.
#[error(transparent)] #[error(transparent)]
ReqwesError(#[from] reqwest::Error), ReqwesError(#[from] reqwest::Error),
// OpenDALError is the error for opendal. /// OpenDALError is the error for opendal.
#[error(transparent)] #[error(transparent)]
OpenDALError(#[from] opendal::Error), OpenDALError(#[from] opendal::Error),
// HyperError is the error for hyper. /// HyperError is the error for hyper.
#[error(transparent)] #[error(transparent)]
HyperError(#[from] hyper::Error), HyperError(#[from] hyper::Error),
// BackendError is the error for backend. /// BackendError is the error for backend.
#[error(transparent)] #[error(transparent)]
BackendError(BackendError), BackendError(BackendError),
// HyperUtilClientLegacyError is the error for hyper util client legacy. /// HyperUtilClientLegacyError is the error for hyper util client legacy.
#[error(transparent)] #[error(transparent)]
HyperUtilClientLegacyError(#[from] hyper_util::client::legacy::Error), HyperUtilClientLegacyError(#[from] hyper_util::client::legacy::Error),
// ExternalError is the error for external error. /// ExternalError is the error for external error.
#[error(transparent)] #[error(transparent)]
ExternalError(#[from] ExternalError), ExternalError(#[from] ExternalError),
// MaxDownloadFilesExceeded is the error for max download files exceeded. /// MaxDownloadFilesExceeded is the error for max download files exceeded.
#[error("max number of files to download exceeded: {0}")] #[error("max number of files to download exceeded: {0}")]
MaxDownloadFilesExceeded(usize), MaxDownloadFilesExceeded(usize),
// Unsupported is the error for unsupported. /// Unsupported is the error for unsupported.
#[error("unsupported {0}")] #[error("unsupported {0}")]
Unsupported(String), Unsupported(String),
// TokioJoinError is the error for tokio join. /// TokioJoinError is the error for tokio join.
#[error(transparent)] #[error(transparent)]
TokioJoinError(tokio::task::JoinError), TokioJoinError(tokio::task::JoinError),
// ValidationError is the error for validate. /// ValidationError is the error for validate.
#[error("validate failed: {0}")] #[error("validate failed: {0}")]
ValidationError(String), ValidationError(String),
} }
// SendError is the error for send. /// SendError is the error for send.
impl<T> From<tokio::sync::mpsc::error::SendError<T>> for DFError { impl<T> From<tokio::sync::mpsc::error::SendError<T>> for DFError {
fn from(e: tokio::sync::mpsc::error::SendError<T>) -> Self { fn from(e: tokio::sync::mpsc::error::SendError<T>) -> Self {
Self::MpscSend(e.to_string()) Self::MpscSend(e.to_string())
} }
} }
// SendTimeoutError is the error for send timeout. /// SendTimeoutError is the error for send timeout.
impl<T> From<tokio::sync::mpsc::error::SendTimeoutError<T>> for DFError { impl<T> From<tokio::sync::mpsc::error::SendTimeoutError<T>> for DFError {
fn from(err: tokio::sync::mpsc::error::SendTimeoutError<T>) -> Self { fn from(err: tokio::sync::mpsc::error::SendTimeoutError<T>) -> Self {
match err { match err {

View File

@ -92,6 +92,7 @@ async fn main() -> Result<(), anyhow::Error> {
error!("failed to load config: {}", err); error!("failed to load config: {}", err);
err err
})?; })?;
// Handle features of the container runtime. // Handle features of the container runtime.
let container_runtime = container_runtime::ContainerRuntime::new(&config); let container_runtime = container_runtime::ContainerRuntime::new(&config);
container_runtime.run().await.map_err(|err| { container_runtime.run().await.map_err(|err| {

View File

@ -25,20 +25,20 @@ use tokio::{self, fs};
use toml_edit::{value, Array, DocumentMut, Item, Table, Value}; use toml_edit::{value, Array, DocumentMut, Item, Table, Value};
use tracing::{info, instrument}; use tracing::{info, instrument};
// Containerd represents the containerd runtime manager. /// Containerd represents the containerd runtime manager.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Containerd { pub struct Containerd {
// config is the configuration for initializing /// config is the configuration for initializing
// runtime environment for the dfdaemon. /// runtime environment for the dfdaemon.
config: dfinit::Containerd, config: dfinit::Containerd,
// proxy_config is the configuration for the dfdaemon's proxy server. /// proxy_config is the configuration for the dfdaemon's proxy server.
proxy_config: dfinit::Proxy, proxy_config: dfinit::Proxy,
} }
// Containerd implements the containerd runtime manager. /// Containerd implements the containerd runtime manager.
impl Containerd { impl Containerd {
// new creates a new containerd runtime manager. /// new creates a new containerd runtime manager.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new(config: dfinit::Containerd, proxy_config: dfinit::Proxy) -> Self { pub fn new(config: dfinit::Containerd, proxy_config: dfinit::Proxy) -> Self {
Self { Self {
@ -47,8 +47,8 @@ impl Containerd {
} }
} }
// run runs the containerd runtime to initialize /// run runs the containerd runtime to initialize
// runtime environment for the dfdaemon. /// runtime environment for the dfdaemon.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) -> Result<()> { pub async fn run(&self) -> Result<()> {
let content = fs::read_to_string(&self.config.config_path).await?; let content = fs::read_to_string(&self.config.config_path).await?;
@ -114,8 +114,8 @@ impl Containerd {
Ok(()) Ok(())
} }
// add_registries adds registries to the containerd configuration, when containerd supports /// add_registries adds registries to the containerd configuration, when containerd supports
// config_path mode and config_path is not empty. /// config_path mode and config_path is not empty.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn add_registries( pub async fn add_registries(
&self, &self,

View File

@ -24,20 +24,20 @@ use toml_edit::{value, Array, ArrayOfTables, Item, Table, Value};
use tracing::{info, instrument}; use tracing::{info, instrument};
use url::Url; use url::Url;
// CRIO represents the cri-o runtime manager. /// CRIO represents the cri-o runtime manager.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct CRIO { pub struct CRIO {
// config is the configuration for initializing /// config is the configuration for initializing
// runtime environment for the dfdaemon. /// runtime environment for the dfdaemon.
config: dfinit::CRIO, config: dfinit::CRIO,
// proxy_config is the configuration for the dfdaemon's proxy server. /// proxy_config is the configuration for the dfdaemon's proxy server.
proxy_config: dfinit::Proxy, proxy_config: dfinit::Proxy,
} }
// CRIO implements the cri-o runtime manager. /// CRIO implements the cri-o runtime manager.
impl CRIO { impl CRIO {
// new creates a new cri-o runtime manager. /// new creates a new cri-o runtime manager.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new(config: dfinit::CRIO, proxy_config: dfinit::Proxy) -> Self { pub fn new(config: dfinit::CRIO, proxy_config: dfinit::Proxy) -> Self {
Self { Self {
@ -46,8 +46,8 @@ impl CRIO {
} }
} }
// run runs the cri-o runtime to initialize /// run runs the cri-o runtime to initialize
// runtime environment for the dfdaemon. /// runtime environment for the dfdaemon.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) -> Result<()> { pub async fn run(&self) -> Result<()> {
let mut registries_config_table = toml_edit::DocumentMut::new(); let mut registries_config_table = toml_edit::DocumentMut::new();

View File

@ -18,20 +18,20 @@ use dragonfly_client_config::dfinit;
use dragonfly_client_core::{Error, Result}; use dragonfly_client_core::{Error, Result};
use tracing::{info, instrument}; use tracing::{info, instrument};
// Docker represents the docker runtime manager. /// Docker represents the docker runtime manager.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Docker { pub struct Docker {
// config is the configuration for initializing /// config is the configuration for initializing
// runtime environment for the dfdaemon. /// runtime environment for the dfdaemon.
config: dfinit::Docker, config: dfinit::Docker,
// proxy_config is the configuration for the dfdaemon's proxy server. /// proxy_config is the configuration for the dfdaemon's proxy server.
proxy_config: dfinit::Proxy, proxy_config: dfinit::Proxy,
} }
// Docker implements the docker runtime manager. /// Docker implements the docker runtime manager.
impl Docker { impl Docker {
// new creates a new docker runtime manager. /// new creates a new docker runtime manager.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new(config: dfinit::Docker, proxy_config: dfinit::Proxy) -> Self { pub fn new(config: dfinit::Docker, proxy_config: dfinit::Proxy) -> Self {
Self { Self {
@ -40,10 +40,10 @@ impl Docker {
} }
} }
// TODO: Implement the run method for Docker. /// TODO: Implement the run method for Docker.
// ///
// run runs the docker runtime to initialize /// run runs the docker runtime to initialize
// runtime environment for the dfdaemon. /// runtime environment for the dfdaemon.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) -> Result<()> { pub async fn run(&self) -> Result<()> {
info!( info!(

View File

@ -22,7 +22,7 @@ pub mod containerd;
pub mod crio; pub mod crio;
pub mod docker; pub mod docker;
// Engine represents config of the container runtime engine. /// Engine represents config of the container runtime engine.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
enum Engine { enum Engine {
Containerd(containerd::Containerd), Containerd(containerd::Containerd),
@ -30,14 +30,14 @@ enum Engine {
Crio(crio::CRIO), Crio(crio::CRIO),
} }
// ContainerRuntime represents the container runtime manager. /// ContainerRuntime represents the container runtime manager.
pub struct ContainerRuntime { pub struct ContainerRuntime {
engine: Option<Engine>, engine: Option<Engine>,
} }
// ContainerRuntime implements the container runtime manager. /// ContainerRuntime implements the container runtime manager.
impl ContainerRuntime { impl ContainerRuntime {
// new creates a new container runtime manager. /// new creates a new container runtime manager.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new(config: &Config) -> Self { pub fn new(config: &Config) -> Self {
Self { Self {
@ -45,7 +45,7 @@ impl ContainerRuntime {
} }
} }
// run runs the container runtime to initialize runtime environment for the dfdaemon. /// run runs the container runtime to initialize runtime environment for the dfdaemon.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) -> Result<()> { pub async fn run(&self) -> Result<()> {
// If containerd is enabled, override the default containerd // If containerd is enabled, override the default containerd
@ -58,7 +58,7 @@ impl ContainerRuntime {
} }
} }
// get_engine returns the runtime engine from the config. /// get_engine returns the runtime engine from the config.
#[instrument(skip_all)] #[instrument(skip_all)]
fn get_engine(config: &Config) -> Option<Engine> { fn get_engine(config: &Config) -> Option<Engine> {
if let Some(ref container_runtime_config) = config.container_runtime.config { if let Some(ref container_runtime_config) = config.container_runtime.config {

View File

@ -25,39 +25,39 @@ use tokio::io::{self, AsyncRead, AsyncReadExt, AsyncSeekExt, BufReader, SeekFrom
use tokio_util::io::InspectReader; use tokio_util::io::InspectReader;
use tracing::{error, info, instrument, warn}; use tracing::{error, info, instrument, warn};
// DEFAULT_DIR_NAME is the default directory name to store content. /// DEFAULT_DIR_NAME is the default directory name to store content.
const DEFAULT_DIR_NAME: &str = "content"; const DEFAULT_DIR_NAME: &str = "content";
// Content is the content of a piece. /// Content is the content of a piece.
pub struct Content { pub struct Content {
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// dir is the directory to store content. /// dir is the directory to store content.
dir: PathBuf, dir: PathBuf,
} }
// WritePieceResponse is the response of writing a piece. /// WritePieceResponse is the response of writing a piece.
pub struct WritePieceResponse { pub struct WritePieceResponse {
// length is the length of the piece. /// length is the length of the piece.
pub length: u64, pub length: u64,
// hash is the hash of the piece. /// hash is the hash of the piece.
pub hash: String, pub hash: String,
} }
// WriteCacheTaskResponse is the response of writing a cache task. /// WriteCacheTaskResponse is the response of writing a cache task.
pub struct WriteCacheTaskResponse { pub struct WriteCacheTaskResponse {
// length is the length of the cache task. /// length is the length of the cache task.
pub length: u64, pub length: u64,
// hash is the hash of the cache task. /// hash is the hash of the cache task.
pub hash: String, pub hash: String,
} }
// Content implements the content storage. /// Content implements the content storage.
impl Content { impl Content {
// new returns a new content. /// new returns a new content.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new(config: Arc<Config>, dir: &Path) -> Result<Content> { pub async fn new(config: Arc<Config>, dir: &Path) -> Result<Content> {
let dir = dir.join(DEFAULT_DIR_NAME); let dir = dir.join(DEFAULT_DIR_NAME);
@ -75,7 +75,7 @@ impl Content {
Ok(Content { config, dir }) Ok(Content { config, dir })
} }
// hard_link_or_copy_task hard links or copies the task content to the destination. /// hard_link_or_copy_task hard links or copies the task content to the destination.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn hard_link_or_copy_task( pub async fn hard_link_or_copy_task(
&self, &self,
@ -144,14 +144,14 @@ impl Content {
Ok(()) Ok(())
} }
// hard_link_task hard links the task content. /// hard_link_task hard links the task content.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn hard_link_task(&self, task_id: &str, link: &Path) -> Result<()> { async fn hard_link_task(&self, task_id: &str, link: &Path) -> Result<()> {
fs::hard_link(self.dir.join(task_id), link).await?; fs::hard_link(self.dir.join(task_id), link).await?;
Ok(()) Ok(())
} }
// copy_task copies the task content to the destination. /// copy_task copies the task content to the destination.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn copy_task(&self, task_id: &str, to: &Path) -> Result<()> { async fn copy_task(&self, task_id: &str, to: &Path) -> Result<()> {
// Ensure the parent directory of the destination exists. // Ensure the parent directory of the destination exists.
@ -168,7 +168,7 @@ impl Content {
Ok(()) Ok(())
} }
// copy_task_by_range copies the task content to the destination by range. /// copy_task_by_range copies the task content to the destination by range.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn copy_task_by_range(&self, task_id: &str, to: &Path, range: Range) -> Result<()> { async fn copy_task_by_range(&self, task_id: &str, to: &Path, range: Range) -> Result<()> {
// Ensure the parent directory of the destination exists. // Ensure the parent directory of the destination exists.
@ -200,7 +200,7 @@ impl Content {
Ok(()) Ok(())
} }
// read_task reads the task content by range. /// read_task reads the task content by range.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn read_task_by_range(&self, task_id: &str, range: Range) -> Result<impl AsyncRead> { pub async fn read_task_by_range(&self, task_id: &str, range: Range) -> Result<impl AsyncRead> {
let task_path = self.dir.join(task_id); let task_path = self.dir.join(task_id);
@ -221,7 +221,7 @@ impl Content {
Ok(range_reader) Ok(range_reader)
} }
// delete_task deletes the task content. /// delete_task deletes the task content.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn delete_task(&self, task_id: &str) -> Result<()> { pub async fn delete_task(&self, task_id: &str) -> Result<()> {
info!("delete task content: {}", task_id); info!("delete task content: {}", task_id);
@ -233,7 +233,7 @@ impl Content {
Ok(()) Ok(())
} }
// read_piece reads the piece from the content. /// read_piece reads the piece from the content.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn read_piece( pub async fn read_piece(
&self, &self,
@ -274,7 +274,7 @@ impl Content {
Ok(f.take(length)) Ok(f.take(length))
} }
// write_piece writes the piece to the content. /// write_piece writes the piece to the content.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn write_piece<R: AsyncRead + Unpin + ?Sized>( pub async fn write_piece<R: AsyncRead + Unpin + ?Sized>(
&self, &self,
@ -326,7 +326,7 @@ impl Content {
}) })
} }
// hard_link_or_copy_cache_task hard links or copies the task content to the destination. /// hard_link_or_copy_cache_task hard links or copies the task content to the destination.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn hard_link_or_copy_cache_task( pub async fn hard_link_or_copy_cache_task(
&self, &self,
@ -379,7 +379,7 @@ impl Content {
Ok(()) Ok(())
} }
// copy_cache_task copies the cache task content to the destination. /// copy_cache_task copies the cache task content to the destination.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn write_cache_task( pub async fn write_cache_task(
&self, &self,
@ -426,7 +426,7 @@ impl Content {
}) })
} }
// delete_task deletes the cache task content. /// delete_task deletes the cache task content.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn delete_cache_task(&self, cache_task_id: &str) -> Result<()> { pub async fn delete_cache_task(&self, cache_task_id: &str) -> Result<()> {
info!("delete cache task content: {}", cache_task_id); info!("delete cache task content: {}", cache_task_id);

View File

@ -30,24 +30,24 @@ pub mod content;
pub mod metadata; pub mod metadata;
pub mod storage_engine; pub mod storage_engine;
// DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL is the default interval for waiting for the piece to be finished. /// DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL is the default interval for waiting for the piece to be finished.
pub const DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL: Duration = Duration::from_millis(500); pub const DEFAULT_WAIT_FOR_PIECE_FINISHED_INTERVAL: Duration = Duration::from_millis(500);
// Storage is the storage of the task. /// Storage is the storage of the task.
pub struct Storage { pub struct Storage {
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// metadata implements the metadata storage. /// metadata implements the metadata storage.
metadata: metadata::Metadata, metadata: metadata::Metadata,
// content implements the content storage. /// content implements the content storage.
content: content::Content, content: content::Content,
} }
// Storage implements the storage. /// Storage implements the storage.
impl Storage { impl Storage {
// new returns a new storage. /// new returns a new storage.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new(config: Arc<Config>, dir: &Path, log_dir: PathBuf) -> Result<Self> { pub async fn new(config: Arc<Config>, dir: &Path, log_dir: PathBuf) -> Result<Self> {
let metadata = metadata::Metadata::new(config.clone(), dir, &log_dir)?; let metadata = metadata::Metadata::new(config.clone(), dir, &log_dir)?;
@ -59,7 +59,7 @@ impl Storage {
}) })
} }
// hard_link_or_copy_task hard links or copies the task content to the destination. /// hard_link_or_copy_task hard links or copies the task content to the destination.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn hard_link_or_copy_task( pub async fn hard_link_or_copy_task(
&self, &self,
@ -70,7 +70,7 @@ impl Storage {
self.content.hard_link_or_copy_task(task, to, range).await self.content.hard_link_or_copy_task(task, to, range).await
} }
// read_task_by_range returns the reader of the task by range. /// read_task_by_range returns the reader of the task by range.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn read_task_by_range( pub async fn read_task_by_range(
&self, &self,
@ -80,7 +80,7 @@ impl Storage {
self.content.read_task_by_range(task_id, range).await self.content.read_task_by_range(task_id, range).await
} }
// download_task_started updates the metadata of the task when the task downloads started. /// download_task_started updates the metadata of the task when the task downloads started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_task_started( pub fn download_task_started(
&self, &self,
@ -93,49 +93,49 @@ impl Storage {
.download_task_started(id, piece_length, content_length, response_header) .download_task_started(id, piece_length, content_length, response_header)
} }
// download_task_finished updates the metadata of the task when the task downloads finished. /// download_task_finished updates the metadata of the task when the task downloads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_task_finished(&self, id: &str) -> Result<metadata::Task> { pub fn download_task_finished(&self, id: &str) -> Result<metadata::Task> {
self.metadata.download_task_finished(id) self.metadata.download_task_finished(id)
} }
// download_task_failed updates the metadata of the task when the task downloads failed. /// download_task_failed updates the metadata of the task when the task downloads failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_task_failed(&self, id: &str) -> Result<metadata::Task> { pub async fn download_task_failed(&self, id: &str) -> Result<metadata::Task> {
self.metadata.download_task_failed(id) self.metadata.download_task_failed(id)
} }
// prefetch_task_started updates the metadata of the task when the task prefetches started. /// prefetch_task_started updates the metadata of the task when the task prefetches started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn prefetch_task_started(&self, id: &str) -> Result<metadata::Task> { pub async fn prefetch_task_started(&self, id: &str) -> Result<metadata::Task> {
self.metadata.prefetch_task_started(id) self.metadata.prefetch_task_started(id)
} }
// prefetch_task_failed updates the metadata of the task when the task prefetches failed. /// prefetch_task_failed updates the metadata of the task when the task prefetches failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn prefetch_task_failed(&self, id: &str) -> Result<metadata::Task> { pub async fn prefetch_task_failed(&self, id: &str) -> Result<metadata::Task> {
self.metadata.prefetch_task_failed(id) self.metadata.prefetch_task_failed(id)
} }
// upload_task_finished updates the metadata of the task when task uploads finished. /// upload_task_finished updates the metadata of the task when task uploads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn upload_task_finished(&self, id: &str) -> Result<metadata::Task> { pub fn upload_task_finished(&self, id: &str) -> Result<metadata::Task> {
self.metadata.upload_task_finished(id) self.metadata.upload_task_finished(id)
} }
// get_task returns the task metadata. /// get_task returns the task metadata.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_task(&self, id: &str) -> Result<Option<metadata::Task>> { pub fn get_task(&self, id: &str) -> Result<Option<metadata::Task>> {
self.metadata.get_task(id) self.metadata.get_task(id)
} }
// get_tasks returns the task metadatas. /// get_tasks returns the task metadatas.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_tasks(&self) -> Result<Vec<metadata::Task>> { pub fn get_tasks(&self) -> Result<Vec<metadata::Task>> {
self.metadata.get_tasks() self.metadata.get_tasks()
} }
// delete_task deletes the task metadatas, task content and piece metadatas. /// delete_task deletes the task metadatas, task content and piece metadatas.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn delete_task(&self, id: &str) { pub async fn delete_task(&self, id: &str) {
self.metadata self.metadata
@ -151,7 +151,7 @@ impl Storage {
}); });
} }
// hard_link_or_copy_cache_task hard links or copies the cache task content to the destination. /// hard_link_or_copy_cache_task hard links or copies the cache task content to the destination.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn hard_link_or_copy_cache_task( pub async fn hard_link_or_copy_cache_task(
&self, &self,
@ -161,7 +161,7 @@ impl Storage {
self.content.hard_link_or_copy_cache_task(task, to).await self.content.hard_link_or_copy_cache_task(task, to).await
} }
// create_persistent_cache_task creates a new persistent cache task. /// create_persistent_cache_task creates a new persistent cache task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn create_persistent_cache_task( pub async fn create_persistent_cache_task(
&self, &self,
@ -190,7 +190,7 @@ impl Storage {
) )
} }
// download_cache_task_started updates the metadata of the cache task when the cache task downloads started. /// download_cache_task_started updates the metadata of the cache task when the cache task downloads started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_cache_task_started( pub fn download_cache_task_started(
&self, &self,
@ -204,37 +204,37 @@ impl Storage {
.download_cache_task_started(id, ttl, persistent, piece_length, content_length) .download_cache_task_started(id, ttl, persistent, piece_length, content_length)
} }
// download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished. /// download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_cache_task_finished(&self, id: &str) -> Result<metadata::CacheTask> { pub fn download_cache_task_finished(&self, id: &str) -> Result<metadata::CacheTask> {
self.metadata.download_cache_task_finished(id) self.metadata.download_cache_task_finished(id)
} }
// download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed. /// download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_cache_task_failed(&self, id: &str) -> Result<metadata::CacheTask> { pub async fn download_cache_task_failed(&self, id: &str) -> Result<metadata::CacheTask> {
self.metadata.download_cache_task_failed(id) self.metadata.download_cache_task_failed(id)
} }
// upload_cache_task_finished updates the metadata of the cahce task when cache task uploads finished. /// upload_cache_task_finished updates the metadata of the cahce task when cache task uploads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn upload_cache_task_finished(&self, id: &str) -> Result<metadata::CacheTask> { pub fn upload_cache_task_finished(&self, id: &str) -> Result<metadata::CacheTask> {
self.metadata.upload_cache_task_finished(id) self.metadata.upload_cache_task_finished(id)
} }
// get_cache_task returns the cache task metadata. /// get_cache_task returns the cache task metadata.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_cache_task(&self, id: &str) -> Result<Option<metadata::CacheTask>> { pub fn get_cache_task(&self, id: &str) -> Result<Option<metadata::CacheTask>> {
self.metadata.get_cache_task(id) self.metadata.get_cache_task(id)
} }
// get_tasks returns the task metadatas. /// get_tasks returns the task metadatas.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_cache_tasks(&self) -> Result<Vec<metadata::CacheTask>> { pub fn get_cache_tasks(&self) -> Result<Vec<metadata::CacheTask>> {
self.metadata.get_cache_tasks() self.metadata.get_cache_tasks()
} }
// delete_cache_task deletes the cache task metadatas, cache task content and piece metadatas. /// delete_cache_task deletes the cache task metadatas, cache task content and piece metadatas.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn delete_cache_task(&self, id: &str) { pub async fn delete_cache_task(&self, id: &str) {
self.metadata.delete_cache_task(id).unwrap_or_else(|err| { self.metadata.delete_cache_task(id).unwrap_or_else(|err| {
@ -249,8 +249,8 @@ impl Storage {
}); });
} }
// download_piece_started updates the metadata of the piece and writes /// download_piece_started updates the metadata of the piece and writes
// the data of piece to file when the piece downloads started. /// the data of piece to file when the piece downloads started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_piece_started( pub async fn download_piece_started(
&self, &self,
@ -265,7 +265,7 @@ impl Storage {
} }
} }
// download_piece_from_source_finished is used for downloading piece from source. /// download_piece_from_source_finished is used for downloading piece from source.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_piece_from_source_finished<R: AsyncRead + Unpin + ?Sized>( pub async fn download_piece_from_source_finished<R: AsyncRead + Unpin + ?Sized>(
&self, &self,
@ -288,7 +288,7 @@ impl Storage {
) )
} }
// download_piece_from_remote_peer_finished is used for downloading piece from remote peer. /// download_piece_from_remote_peer_finished is used for downloading piece from remote peer.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_piece_from_remote_peer_finished<R: AsyncRead + Unpin + ?Sized>( pub async fn download_piece_from_remote_peer_finished<R: AsyncRead + Unpin + ?Sized>(
&self, &self,
@ -321,14 +321,14 @@ impl Storage {
) )
} }
// download_piece_failed updates the metadata of the piece when the piece downloads failed. /// download_piece_failed updates the metadata of the piece when the piece downloads failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_piece_failed(&self, task_id: &str, number: u32) -> Result<()> { pub fn download_piece_failed(&self, task_id: &str, number: u32) -> Result<()> {
self.metadata.download_piece_failed(task_id, number) self.metadata.download_piece_failed(task_id, number)
} }
// upload_piece updates the metadata of the piece and /// upload_piece updates the metadata of the piece and
// returns the data of the piece. /// returns the data of the piece.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn upload_piece( pub async fn upload_piece(
&self, &self,
@ -394,24 +394,24 @@ impl Storage {
} }
} }
// get_piece returns the piece metadata. /// get_piece returns the piece metadata.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_piece(&self, task_id: &str, number: u32) -> Result<Option<metadata::Piece>> { pub fn get_piece(&self, task_id: &str, number: u32) -> Result<Option<metadata::Piece>> {
self.metadata.get_piece(task_id, number) self.metadata.get_piece(task_id, number)
} }
// get_pieces returns the piece metadatas. /// get_pieces returns the piece metadatas.
pub fn get_pieces(&self, task_id: &str) -> Result<Vec<metadata::Piece>> { pub fn get_pieces(&self, task_id: &str) -> Result<Vec<metadata::Piece>> {
self.metadata.get_pieces(task_id) self.metadata.get_pieces(task_id)
} }
// piece_id returns the piece id. /// piece_id returns the piece id.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn piece_id(&self, task_id: &str, number: u32) -> String { pub fn piece_id(&self, task_id: &str, number: u32) -> String {
self.metadata.piece_id(task_id, number) self.metadata.piece_id(task_id, number)
} }
// wait_for_piece_finished waits for the piece to be finished. /// wait_for_piece_finished waits for the piece to be finished.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn wait_for_piece_finished(&self, task_id: &str, number: u32) -> Result<metadata::Piece> { async fn wait_for_piece_finished(&self, task_id: &str, number: u32) -> Result<metadata::Piece> {
// Initialize the timeout of piece. // Initialize the timeout of piece.

View File

@ -30,83 +30,83 @@ use tracing::{error, info, instrument};
use crate::storage_engine::{rocksdb::RocksdbStorageEngine, DatabaseObject, StorageEngineOwned}; use crate::storage_engine::{rocksdb::RocksdbStorageEngine, DatabaseObject, StorageEngineOwned};
// Task is the metadata of the task. /// Task is the metadata of the task.
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct Task { pub struct Task {
// id is the task id. /// id is the task id.
pub id: String, pub id: String,
// piece_length is the length of the piece. /// piece_length is the length of the piece.
pub piece_length: Option<u64>, pub piece_length: Option<u64>,
// content_length is the length of the content. /// content_length is the length of the content.
pub content_length: Option<u64>, pub content_length: Option<u64>,
// header is the header of the response. /// header is the header of the response.
pub response_header: HashMap<String, String>, pub response_header: HashMap<String, String>,
// uploading_count is the count of the task being uploaded by other peers. /// uploading_count is the count of the task being uploaded by other peers.
pub uploading_count: u64, pub uploading_count: u64,
// uploaded_count is the count of the task has been uploaded by other peers. /// uploaded_count is the count of the task has been uploaded by other peers.
pub uploaded_count: u64, pub uploaded_count: u64,
// updated_at is the time when the task metadata is updated. If the task is downloaded /// updated_at is the time when the task metadata is updated. If the task is downloaded
// by other peers, it will also update updated_at. /// by other peers, it will also update updated_at.
pub updated_at: NaiveDateTime, pub updated_at: NaiveDateTime,
// created_at is the time when the task metadata is created. /// created_at is the time when the task metadata is created.
pub created_at: NaiveDateTime, pub created_at: NaiveDateTime,
// prefetched_at is the time when the task prefetched. /// prefetched_at is the time when the task prefetched.
pub prefetched_at: Option<NaiveDateTime>, pub prefetched_at: Option<NaiveDateTime>,
// failed_at is the time when the task downloads failed. /// failed_at is the time when the task downloads failed.
pub failed_at: Option<NaiveDateTime>, pub failed_at: Option<NaiveDateTime>,
// finished_at is the time when the task downloads finished. /// finished_at is the time when the task downloads finished.
pub finished_at: Option<NaiveDateTime>, pub finished_at: Option<NaiveDateTime>,
} }
// Task implements the task database object. /// Task implements the task database object.
impl DatabaseObject for Task { impl DatabaseObject for Task {
// NAMESPACE is the namespace of [Task] objects. /// NAMESPACE is the namespace of [Task] objects.
const NAMESPACE: &'static str = "task"; const NAMESPACE: &'static str = "task";
} }
// Task implements the task metadata. /// Task implements the task metadata.
impl Task { impl Task {
// is_started returns whether the task downloads started. /// is_started returns whether the task downloads started.
pub fn is_started(&self) -> bool { pub fn is_started(&self) -> bool {
self.finished_at.is_none() self.finished_at.is_none()
} }
// is_downloading returns whether the task is downloading. /// is_downloading returns whether the task is downloading.
pub fn is_uploading(&self) -> bool { pub fn is_uploading(&self) -> bool {
self.uploading_count > 0 self.uploading_count > 0
} }
// is_expired returns whether the task is expired. /// is_expired returns whether the task is expired.
pub fn is_expired(&self, ttl: Duration) -> bool { pub fn is_expired(&self, ttl: Duration) -> bool {
self.updated_at + ttl < Utc::now().naive_utc() self.updated_at + ttl < Utc::now().naive_utc()
} }
// is_prefetched returns whether the task is prefetched. /// is_prefetched returns whether the task is prefetched.
pub fn is_prefetched(&self) -> bool { pub fn is_prefetched(&self) -> bool {
self.prefetched_at.is_some() self.prefetched_at.is_some()
} }
// is_failed returns whether the task downloads failed. /// is_failed returns whether the task downloads failed.
pub fn is_failed(&self) -> bool { pub fn is_failed(&self) -> bool {
self.failed_at.is_some() self.failed_at.is_some()
} }
// is_finished returns whether the task downloads finished. /// is_finished returns whether the task downloads finished.
pub fn is_finished(&self) -> bool { pub fn is_finished(&self) -> bool {
self.finished_at.is_some() self.finished_at.is_some()
} }
// is_empty returns whether the task is empty. /// is_empty returns whether the task is empty.
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
if let Some(content_length) = self.content_length() { if let Some(content_length) = self.content_length() {
if content_length == 0 { if content_length == 0 {
@ -117,79 +117,79 @@ impl Task {
false false
} }
// piece_length returns the piece length of the task. /// piece_length returns the piece length of the task.
pub fn piece_length(&self) -> Option<u64> { pub fn piece_length(&self) -> Option<u64> {
self.piece_length self.piece_length
} }
// content_length returns the content length of the task. /// content_length returns the content length of the task.
pub fn content_length(&self) -> Option<u64> { pub fn content_length(&self) -> Option<u64> {
self.content_length self.content_length
} }
} }
// CacheTask is the metadata of the cache task. /// CacheTask is the metadata of the cache task.
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct CacheTask { pub struct CacheTask {
// id is the task id. /// id is the task id.
pub id: String, pub id: String,
// persistent represents whether the cache task is persistent. /// persistent represents whether the cache task is persistent.
// If the cache task is persistent, the cache peer will /// If the cache task is persistent, the cache peer will
// not be deleted when dfdamon runs garbage collection. /// not be deleted when dfdamon runs garbage collection.
pub persistent: bool, pub persistent: bool,
// ttl is the time to live of the cache task. /// ttl is the time to live of the cache task.
pub ttl: Duration, pub ttl: Duration,
// digests is the digests of the cache task. /// digests is the digests of the cache task.
pub digest: String, pub digest: String,
// piece_length is the length of the piece. /// piece_length is the length of the piece.
pub piece_length: u64, pub piece_length: u64,
// content_length is the length of the content. /// content_length is the length of the content.
pub content_length: u64, pub content_length: u64,
// uploading_count is the count of the task being uploaded by other peers. /// uploading_count is the count of the task being uploaded by other peers.
pub uploading_count: u64, pub uploading_count: u64,
// uploaded_count is the count of the task has been uploaded by other peers. /// uploaded_count is the count of the task has been uploaded by other peers.
pub uploaded_count: u64, pub uploaded_count: u64,
// updated_at is the time when the task metadata is updated. If the task is downloaded /// updated_at is the time when the task metadata is updated. If the task is downloaded
// by other peers, it will also update updated_at. /// by other peers, it will also update updated_at.
pub updated_at: NaiveDateTime, pub updated_at: NaiveDateTime,
// created_at is the time when the task metadata is created. /// created_at is the time when the task metadata is created.
pub created_at: NaiveDateTime, pub created_at: NaiveDateTime,
// failed_at is the time when the task downloads failed. /// failed_at is the time when the task downloads failed.
pub failed_at: Option<NaiveDateTime>, pub failed_at: Option<NaiveDateTime>,
// finished_at is the time when the task downloads finished. /// finished_at is the time when the task downloads finished.
pub finished_at: Option<NaiveDateTime>, pub finished_at: Option<NaiveDateTime>,
} }
// CacheTask implements the cache task database object. /// CacheTask implements the cache task database object.
impl DatabaseObject for CacheTask { impl DatabaseObject for CacheTask {
// NAMESPACE is the namespace of [CacheTask] objects. /// NAMESPACE is the namespace of [CacheTask] objects.
const NAMESPACE: &'static str = "cache_task"; const NAMESPACE: &'static str = "cache_task";
} }
// CacheTask implements the cache task metadata. /// CacheTask implements the cache task metadata.
impl CacheTask { impl CacheTask {
// is_started returns whether the cache task downloads started. /// is_started returns whether the cache task downloads started.
pub fn is_started(&self) -> bool { pub fn is_started(&self) -> bool {
self.finished_at.is_none() self.finished_at.is_none()
} }
// is_downloading returns whether the cache task is downloading. /// is_downloading returns whether the cache task is downloading.
pub fn is_uploading(&self) -> bool { pub fn is_uploading(&self) -> bool {
self.uploading_count > 0 self.uploading_count > 0
} }
// is_expired returns whether the cache task is expired. /// is_expired returns whether the cache task is expired.
pub fn is_expired(&self) -> bool { pub fn is_expired(&self) -> bool {
// When scheduler runs garbage collection, it will trigger dfdaemon to evict the cache task. // When scheduler runs garbage collection, it will trigger dfdaemon to evict the cache task.
// But sometimes the dfdaemon may not evict the cache task in time, so we select the ttl * 1.2 // But sometimes the dfdaemon may not evict the cache task in time, so we select the ttl * 1.2
@ -197,17 +197,17 @@ impl CacheTask {
self.created_at + self.ttl * 2 < Utc::now().naive_utc() self.created_at + self.ttl * 2 < Utc::now().naive_utc()
} }
// is_failed returns whether the cache task downloads failed. /// is_failed returns whether the cache task downloads failed.
pub fn is_failed(&self) -> bool { pub fn is_failed(&self) -> bool {
self.failed_at.is_some() self.failed_at.is_some()
} }
// is_finished returns whether the cache task downloads finished. /// is_finished returns whether the cache task downloads finished.
pub fn is_finished(&self) -> bool { pub fn is_finished(&self) -> bool {
self.finished_at.is_some() self.finished_at.is_some()
} }
// is_empty returns whether the cache task is empty. /// is_empty returns whether the cache task is empty.
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
if self.content_length == 0 { if self.content_length == 0 {
return true; return true;
@ -216,76 +216,76 @@ impl CacheTask {
false false
} }
// is_persistent returns whether the cache task is persistent. /// is_persistent returns whether the cache task is persistent.
pub fn is_persistent(&self) -> bool { pub fn is_persistent(&self) -> bool {
self.persistent self.persistent
} }
// piece_length returns the piece length of the cache task. /// piece_length returns the piece length of the cache task.
pub fn piece_length(&self) -> u64 { pub fn piece_length(&self) -> u64 {
self.piece_length self.piece_length
} }
// content_length returns the content length of the cache task. /// content_length returns the content length of the cache task.
pub fn content_length(&self) -> u64 { pub fn content_length(&self) -> u64 {
self.content_length self.content_length
} }
} }
// Piece is the metadata of the piece. /// Piece is the metadata of the piece.
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct Piece { pub struct Piece {
// number is the piece number. /// number is the piece number.
pub number: u32, pub number: u32,
// offset is the offset of the piece in the task. /// offset is the offset of the piece in the task.
pub offset: u64, pub offset: u64,
// length is the length of the piece. /// length is the length of the piece.
pub length: u64, pub length: u64,
// digest is the digest of the piece. /// digest is the digest of the piece.
pub digest: String, pub digest: String,
// parent_id is the parent id of the piece. /// parent_id is the parent id of the piece.
pub parent_id: Option<String>, pub parent_id: Option<String>,
// uploading_count is the count of the piece being uploaded by other peers. /// uploading_count is the count of the piece being uploaded by other peers.
pub uploading_count: u64, pub uploading_count: u64,
// uploaded_count is the count of the piece has been uploaded by other peers. /// uploaded_count is the count of the piece has been uploaded by other peers.
pub uploaded_count: u64, pub uploaded_count: u64,
// updated_at is the time when the piece metadata is updated. If the piece is downloaded /// updated_at is the time when the piece metadata is updated. If the piece is downloaded
// by other peers, it will also update updated_at. /// by other peers, it will also update updated_at.
pub updated_at: NaiveDateTime, pub updated_at: NaiveDateTime,
// created_at is the time when the piece metadata is created. /// created_at is the time when the piece metadata is created.
pub created_at: NaiveDateTime, pub created_at: NaiveDateTime,
// finished_at is the time when the piece downloads finished. /// finished_at is the time when the piece downloads finished.
pub finished_at: Option<NaiveDateTime>, pub finished_at: Option<NaiveDateTime>,
} }
// Piece implements the piece database object. /// Piece implements the piece database object.
impl DatabaseObject for Piece { impl DatabaseObject for Piece {
// NAMESPACE is the namespace of [Piece] objects. /// NAMESPACE is the namespace of [Piece] objects.
const NAMESPACE: &'static str = "piece"; const NAMESPACE: &'static str = "piece";
} }
// Piece implements the piece metadata. /// Piece implements the piece metadata.
impl Piece { impl Piece {
// is_started returns whether the piece downloads started. /// is_started returns whether the piece downloads started.
pub fn is_started(&self) -> bool { pub fn is_started(&self) -> bool {
self.finished_at.is_none() self.finished_at.is_none()
} }
// is_finished returns whether the piece downloads finished. /// is_finished returns whether the piece downloads finished.
pub fn is_finished(&self) -> bool { pub fn is_finished(&self) -> bool {
self.finished_at.is_some() self.finished_at.is_some()
} }
// cost returns the cost of the piece downloaded. /// cost returns the cost of the piece downloaded.
pub fn cost(&self) -> Option<Duration> { pub fn cost(&self) -> Option<Duration> {
match self match self
.finished_at .finished_at
@ -302,7 +302,7 @@ impl Piece {
} }
} }
// prost_cost returns the prost cost of the piece downloaded. /// prost_cost returns the prost cost of the piece downloaded.
pub fn prost_cost(&self) -> Option<prost_wkt_types::Duration> { pub fn prost_cost(&self) -> Option<prost_wkt_types::Duration> {
match self.cost() { match self.cost() {
Some(cost) => match prost_wkt_types::Duration::try_from(cost) { Some(cost) => match prost_wkt_types::Duration::try_from(cost) {
@ -317,17 +317,17 @@ impl Piece {
} }
} }
// Metadata manages the metadata of [Task], [Piece] and [CacheTask]. /// Metadata manages the metadata of [Task], [Piece] and [CacheTask].
pub struct Metadata<E = RocksdbStorageEngine> pub struct Metadata<E = RocksdbStorageEngine>
where where
E: StorageEngineOwned, E: StorageEngineOwned,
{ {
// db is the underlying storage engine instance. /// db is the underlying storage engine instance.
db: E, db: E,
} }
impl<E: StorageEngineOwned> Metadata<E> { impl<E: StorageEngineOwned> Metadata<E> {
// download_task_started updates the metadata of the task when the task downloads started. /// download_task_started updates the metadata of the task when the task downloads started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_task_started( pub fn download_task_started(
&self, &self,
@ -381,7 +381,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// download_task_finished updates the metadata of the task when the task downloads finished. /// download_task_finished updates the metadata of the task when the task downloads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_task_finished(&self, id: &str) -> Result<Task> { pub fn download_task_finished(&self, id: &str) -> Result<Task> {
let task = match self.db.get::<Task>(id.as_bytes())? { let task = match self.db.get::<Task>(id.as_bytes())? {
@ -398,7 +398,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// download_task_failed updates the metadata of the task when the task downloads failed. /// download_task_failed updates the metadata of the task when the task downloads failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_task_failed(&self, id: &str) -> Result<Task> { pub fn download_task_failed(&self, id: &str) -> Result<Task> {
let task = match self.db.get::<Task>(id.as_bytes())? { let task = match self.db.get::<Task>(id.as_bytes())? {
@ -414,7 +414,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// prefetch_task_started updates the metadata of the task when the task prefetch started. /// prefetch_task_started updates the metadata of the task when the task prefetch started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn prefetch_task_started(&self, id: &str) -> Result<Task> { pub fn prefetch_task_started(&self, id: &str) -> Result<Task> {
let task = match self.db.get::<Task>(id.as_bytes())? { let task = match self.db.get::<Task>(id.as_bytes())? {
@ -436,7 +436,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// prefetch_task_failed updates the metadata of the task when the task prefetch failed. /// prefetch_task_failed updates the metadata of the task when the task prefetch failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn prefetch_task_failed(&self, id: &str) -> Result<Task> { pub fn prefetch_task_failed(&self, id: &str) -> Result<Task> {
let task = match self.db.get::<Task>(id.as_bytes())? { let task = match self.db.get::<Task>(id.as_bytes())? {
@ -453,7 +453,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// upload_task_started updates the metadata of the task when task uploads started. /// upload_task_started updates the metadata of the task when task uploads started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn upload_task_started(&self, id: &str) -> Result<Task> { pub fn upload_task_started(&self, id: &str) -> Result<Task> {
let task = match self.db.get::<Task>(id.as_bytes())? { let task = match self.db.get::<Task>(id.as_bytes())? {
@ -469,7 +469,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// upload_task_finished updates the metadata of the task when task uploads finished. /// upload_task_finished updates the metadata of the task when task uploads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn upload_task_finished(&self, id: &str) -> Result<Task> { pub fn upload_task_finished(&self, id: &str) -> Result<Task> {
let task = match self.db.get::<Task>(id.as_bytes())? { let task = match self.db.get::<Task>(id.as_bytes())? {
@ -486,7 +486,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// upload_task_failed updates the metadata of the task when the task uploads failed. /// upload_task_failed updates the metadata of the task when the task uploads failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn upload_task_failed(&self, id: &str) -> Result<Task> { pub fn upload_task_failed(&self, id: &str) -> Result<Task> {
let task = match self.db.get::<Task>(id.as_bytes())? { let task = match self.db.get::<Task>(id.as_bytes())? {
@ -502,13 +502,13 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// get_task gets the task metadata. /// get_task gets the task metadata.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_task(&self, id: &str) -> Result<Option<Task>> { pub fn get_task(&self, id: &str) -> Result<Option<Task>> {
self.db.get(id.as_bytes()) self.db.get(id.as_bytes())
} }
// get_tasks gets the task metadatas. /// get_tasks gets the task metadatas.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_tasks(&self) -> Result<Vec<Task>> { pub fn get_tasks(&self) -> Result<Vec<Task>> {
let tasks = self let tasks = self
@ -526,16 +526,16 @@ impl<E: StorageEngineOwned> Metadata<E> {
.collect() .collect()
} }
// delete_task deletes the task metadata. /// delete_task deletes the task metadata.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn delete_task(&self, id: &str) -> Result<()> { pub fn delete_task(&self, id: &str) -> Result<()> {
info!("delete task metadata {}", id); info!("delete task metadata {}", id);
self.db.delete::<Task>(id.as_bytes()) self.db.delete::<Task>(id.as_bytes())
} }
// create_persistent_cache_task creates a new persistent cache task. /// create_persistent_cache_task creates a new persistent cache task.
// If the cache task imports the content to the dfdaemon finished, /// If the cache task imports the content to the dfdaemon finished,
// the dfdaemon will create a persistent cache task metadata. /// the dfdaemon will create a persistent cache task metadata.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn create_persistent_cache_task( pub fn create_persistent_cache_task(
&self, &self,
@ -562,9 +562,9 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// download_cache_task_started updates the metadata of the cache task when /// download_cache_task_started updates the metadata of the cache task when
// the cache task downloads started. If the cache task downloaded by scheduler /// the cache task downloads started. If the cache task downloaded by scheduler
// to create persistent cache task, the persistent should be set to true. /// to create persistent cache task, the persistent should be set to true.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_cache_task_started( pub fn download_cache_task_started(
&self, &self,
@ -597,7 +597,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished. /// download_cache_task_finished updates the metadata of the cache task when the cache task downloads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_cache_task_finished(&self, id: &str) -> Result<CacheTask> { pub fn download_cache_task_finished(&self, id: &str) -> Result<CacheTask> {
let task = match self.db.get::<CacheTask>(id.as_bytes())? { let task = match self.db.get::<CacheTask>(id.as_bytes())? {
@ -619,7 +619,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed. /// download_cache_task_failed updates the metadata of the cache task when the cache task downloads failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_cache_task_failed(&self, id: &str) -> Result<CacheTask> { pub fn download_cache_task_failed(&self, id: &str) -> Result<CacheTask> {
let task = match self.db.get::<CacheTask>(id.as_bytes())? { let task = match self.db.get::<CacheTask>(id.as_bytes())? {
@ -635,7 +635,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// upload_cache_task_started updates the metadata of the cache task when cache task uploads started. /// upload_cache_task_started updates the metadata of the cache task when cache task uploads started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn upload_cache_task_started(&self, id: &str) -> Result<CacheTask> { pub fn upload_cache_task_started(&self, id: &str) -> Result<CacheTask> {
let task = match self.db.get::<CacheTask>(id.as_bytes())? { let task = match self.db.get::<CacheTask>(id.as_bytes())? {
@ -651,7 +651,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// upload_cache_task_finished updates the metadata of the cache task when cache task uploads finished. /// upload_cache_task_finished updates the metadata of the cache task when cache task uploads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn upload_cache_task_finished(&self, id: &str) -> Result<CacheTask> { pub fn upload_cache_task_finished(&self, id: &str) -> Result<CacheTask> {
let task = match self.db.get::<CacheTask>(id.as_bytes())? { let task = match self.db.get::<CacheTask>(id.as_bytes())? {
@ -668,7 +668,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// upload_cache_task_failed updates the metadata of the cache task when the cache task uploads failed. /// upload_cache_task_failed updates the metadata of the cache task when the cache task uploads failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn upload_cache_task_failed(&self, id: &str) -> Result<CacheTask> { pub fn upload_cache_task_failed(&self, id: &str) -> Result<CacheTask> {
let task = match self.db.get::<CacheTask>(id.as_bytes())? { let task = match self.db.get::<CacheTask>(id.as_bytes())? {
@ -684,27 +684,27 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(task) Ok(task)
} }
// get_cache_task gets the cache task metadata. /// get_cache_task gets the cache task metadata.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_cache_task(&self, id: &str) -> Result<Option<CacheTask>> { pub fn get_cache_task(&self, id: &str) -> Result<Option<CacheTask>> {
self.db.get(id.as_bytes()) self.db.get(id.as_bytes())
} }
// get_cache_tasks gets the cache task metadatas. /// get_cache_tasks gets the cache task metadatas.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_cache_tasks(&self) -> Result<Vec<CacheTask>> { pub fn get_cache_tasks(&self) -> Result<Vec<CacheTask>> {
let iter = self.db.iter::<CacheTask>()?; let iter = self.db.iter::<CacheTask>()?;
iter.map(|ele| ele.map(|(_, task)| task)).collect() iter.map(|ele| ele.map(|(_, task)| task)).collect()
} }
// delete_cache_task deletes the cache task metadata. /// delete_cache_task deletes the cache task metadata.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn delete_cache_task(&self, id: &str) -> Result<()> { pub fn delete_cache_task(&self, id: &str) -> Result<()> {
info!("delete cache task metadata {}", id); info!("delete cache task metadata {}", id);
self.db.delete::<CacheTask>(id.as_bytes()) self.db.delete::<CacheTask>(id.as_bytes())
} }
// download_piece_started updates the metadata of the piece when the piece downloads started. /// download_piece_started updates the metadata of the piece when the piece downloads started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_piece_started(&self, task_id: &str, number: u32) -> Result<Piece> { pub fn download_piece_started(&self, task_id: &str, number: u32) -> Result<Piece> {
// Construct the piece metadata. // Construct the piece metadata.
@ -721,7 +721,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(piece) Ok(piece)
} }
// download_piece_finished updates the metadata of the piece when the piece downloads finished. /// download_piece_finished updates the metadata of the piece when the piece downloads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_piece_finished( pub fn download_piece_finished(
&self, &self,
@ -751,19 +751,19 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(piece) Ok(piece)
} }
// download_piece_failed updates the metadata of the piece when the piece downloads failed. /// download_piece_failed updates the metadata of the piece when the piece downloads failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_piece_failed(&self, task_id: &str, number: u32) -> Result<()> { pub fn download_piece_failed(&self, task_id: &str, number: u32) -> Result<()> {
self.delete_piece(task_id, number) self.delete_piece(task_id, number)
} }
// wait_for_piece_finished_failed waits for the piece to be finished or failed. /// wait_for_piece_finished_failed waits for the piece to be finished or failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn wait_for_piece_finished_failed(&self, task_id: &str, number: u32) -> Result<()> { pub fn wait_for_piece_finished_failed(&self, task_id: &str, number: u32) -> Result<()> {
self.delete_piece(task_id, number) self.delete_piece(task_id, number)
} }
// upload_piece_started updates the metadata of the piece when piece uploads started. /// upload_piece_started updates the metadata of the piece when piece uploads started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn upload_piece_started(&self, task_id: &str, number: u32) -> Result<Piece> { pub fn upload_piece_started(&self, task_id: &str, number: u32) -> Result<Piece> {
// Get the piece id. // Get the piece id.
@ -781,7 +781,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(piece) Ok(piece)
} }
// upload_piece_finished updates the metadata of the piece when piece uploads finished. /// upload_piece_finished updates the metadata of the piece when piece uploads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn upload_piece_finished(&self, task_id: &str, number: u32) -> Result<Piece> { pub fn upload_piece_finished(&self, task_id: &str, number: u32) -> Result<Piece> {
// Get the piece id. // Get the piece id.
@ -800,7 +800,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(piece) Ok(piece)
} }
// upload_piece_failed updates the metadata of the piece when the piece uploads failed. /// upload_piece_failed updates the metadata of the piece when the piece uploads failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn upload_piece_failed(&self, task_id: &str, number: u32) -> Result<Piece> { pub fn upload_piece_failed(&self, task_id: &str, number: u32) -> Result<Piece> {
// Get the piece id. // Get the piece id.
@ -818,13 +818,13 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(piece) Ok(piece)
} }
// get_piece gets the piece metadata. /// get_piece gets the piece metadata.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_piece(&self, task_id: &str, number: u32) -> Result<Option<Piece>> { pub fn get_piece(&self, task_id: &str, number: u32) -> Result<Option<Piece>> {
self.db.get(self.piece_id(task_id, number).as_bytes()) self.db.get(self.piece_id(task_id, number).as_bytes())
} }
// get_pieces gets the piece metadatas. /// get_pieces gets the piece metadatas.
pub fn get_pieces(&self, task_id: &str) -> Result<Vec<Piece>> { pub fn get_pieces(&self, task_id: &str) -> Result<Vec<Piece>> {
let pieces = self let pieces = self
.db .db
@ -841,7 +841,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
.collect() .collect()
} }
// delete_piece deletes the piece metadata. /// delete_piece deletes the piece metadata.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn delete_piece(&self, task_id: &str, number: u32) -> Result<()> { pub fn delete_piece(&self, task_id: &str, number: u32) -> Result<()> {
info!("delete piece metadata {}", self.piece_id(task_id, number)); info!("delete piece metadata {}", self.piece_id(task_id, number));
@ -849,7 +849,7 @@ impl<E: StorageEngineOwned> Metadata<E> {
.delete::<Piece>(self.piece_id(task_id, number).as_bytes()) .delete::<Piece>(self.piece_id(task_id, number).as_bytes())
} }
// delete_pieces deletes the piece metadatas. /// delete_pieces deletes the piece metadatas.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn delete_pieces(&self, task_id: &str) -> Result<()> { pub fn delete_pieces(&self, task_id: &str) -> Result<()> {
let piece_ids = self let piece_ids = self
@ -878,16 +878,16 @@ impl<E: StorageEngineOwned> Metadata<E> {
Ok(()) Ok(())
} }
// piece_id returns the piece id. /// piece_id returns the piece id.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn piece_id(&self, task_id: &str, number: u32) -> String { pub fn piece_id(&self, task_id: &str, number: u32) -> String {
format!("{}-{}", task_id, number) format!("{}-{}", task_id, number)
} }
} }
// Metadata implements the metadata of the storage engine. /// Metadata implements the metadata of the storage engine.
impl Metadata<RocksdbStorageEngine> { impl Metadata<RocksdbStorageEngine> {
// new creates a new metadata instance. /// new creates a new metadata instance.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
config: Arc<Config>, config: Arc<Config>,

View File

@ -32,18 +32,18 @@ pub struct RocksdbStorageEngine {
inner: rocksdb::DB, inner: rocksdb::DB,
} }
// RocksdbStorageEngine implements deref of the storage engine. /// RocksdbStorageEngine implements deref of the storage engine.
impl Deref for RocksdbStorageEngine { impl Deref for RocksdbStorageEngine {
// Target is the inner rocksdb DB. /// Target is the inner rocksdb DB.
type Target = rocksdb::DB; type Target = rocksdb::DB;
// deref returns the inner rocksdb DB. /// deref returns the inner rocksdb DB.
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&self.inner &self.inner
} }
} }
// RocksdbStorageEngine implements the storage engine of the rocksdb. /// RocksdbStorageEngine implements the storage engine of the rocksdb.
impl RocksdbStorageEngine { impl RocksdbStorageEngine {
/// DEFAULT_DIR_NAME is the default directory name to store metadata. /// DEFAULT_DIR_NAME is the default directory name to store metadata.
const DEFAULT_DIR_NAME: &'static str = "metadata"; const DEFAULT_DIR_NAME: &'static str = "metadata";
@ -60,10 +60,10 @@ impl RocksdbStorageEngine {
/// DEFAULT_CACHE_SIZE is the default cache size for rocksdb, default is 512MB. /// DEFAULT_CACHE_SIZE is the default cache size for rocksdb, default is 512MB.
const DEFAULT_CACHE_SIZE: usize = 512 * 1024 * 1024; const DEFAULT_CACHE_SIZE: usize = 512 * 1024 * 1024;
// DEFAULT_LOG_MAX_SIZE is the default max log size for rocksdb, default is 64MB. /// DEFAULT_LOG_MAX_SIZE is the default max log size for rocksdb, default is 64MB.
const DEFAULT_LOG_MAX_SIZE: usize = 64 * 1024 * 1024; const DEFAULT_LOG_MAX_SIZE: usize = 64 * 1024 * 1024;
// DEFAULT_LOG_MAX_FILES is the default max log files for rocksdb. /// DEFAULT_LOG_MAX_FILES is the default max log files for rocksdb.
const DEFAULT_LOG_MAX_FILES: usize = 10; const DEFAULT_LOG_MAX_FILES: usize = 10;
/// open opens a rocksdb storage engine with the given directory and column families. /// open opens a rocksdb storage engine with the given directory and column families.
@ -124,9 +124,9 @@ impl RocksdbStorageEngine {
} }
} }
// RocksdbStorageEngine implements the storage engine operations. /// RocksdbStorageEngine implements the storage engine operations.
impl Operations for RocksdbStorageEngine { impl Operations for RocksdbStorageEngine {
// get gets the object by key. /// get gets the object by key.
#[instrument(skip_all)] #[instrument(skip_all)]
fn get<O: DatabaseObject>(&self, key: &[u8]) -> Result<Option<O>> { fn get<O: DatabaseObject>(&self, key: &[u8]) -> Result<Option<O>> {
let cf = cf_handle::<O>(self)?; let cf = cf_handle::<O>(self)?;
@ -142,7 +142,7 @@ impl Operations for RocksdbStorageEngine {
} }
} }
// put puts the object by key. /// put puts the object by key.
#[instrument(skip_all)] #[instrument(skip_all)]
fn put<O: DatabaseObject>(&self, key: &[u8], value: &O) -> Result<()> { fn put<O: DatabaseObject>(&self, key: &[u8], value: &O) -> Result<()> {
let cf = cf_handle::<O>(self)?; let cf = cf_handle::<O>(self)?;
@ -155,7 +155,7 @@ impl Operations for RocksdbStorageEngine {
Ok(()) Ok(())
} }
// delete deletes the object by key. /// delete deletes the object by key.
#[instrument(skip_all)] #[instrument(skip_all)]
fn delete<O: DatabaseObject>(&self, key: &[u8]) -> Result<()> { fn delete<O: DatabaseObject>(&self, key: &[u8]) -> Result<()> {
let cf = cf_handle::<O>(self)?; let cf = cf_handle::<O>(self)?;
@ -167,7 +167,7 @@ impl Operations for RocksdbStorageEngine {
Ok(()) Ok(())
} }
// iter iterates all objects. /// iter iterates all objects.
#[instrument(skip_all)] #[instrument(skip_all)]
fn iter<O: DatabaseObject>(&self) -> Result<impl Iterator<Item = Result<(Box<[u8]>, O)>>> { fn iter<O: DatabaseObject>(&self) -> Result<impl Iterator<Item = Result<(Box<[u8]>, O)>>> {
let cf = cf_handle::<O>(self)?; let cf = cf_handle::<O>(self)?;
@ -178,7 +178,7 @@ impl Operations for RocksdbStorageEngine {
})) }))
} }
// iter_raw iterates all objects without serialization. /// iter_raw iterates all objects without serialization.
#[instrument(skip_all)] #[instrument(skip_all)]
fn iter_raw<O: DatabaseObject>( fn iter_raw<O: DatabaseObject>(
&self, &self,
@ -192,7 +192,7 @@ impl Operations for RocksdbStorageEngine {
})) }))
} }
// prefix_iter iterates all objects with prefix. /// prefix_iter iterates all objects with prefix.
#[instrument(skip_all)] #[instrument(skip_all)]
fn prefix_iter<O: DatabaseObject>( fn prefix_iter<O: DatabaseObject>(
&self, &self,
@ -206,7 +206,7 @@ impl Operations for RocksdbStorageEngine {
})) }))
} }
// prefix_iter_raw iterates all objects with prefix without serialization. /// prefix_iter_raw iterates all objects with prefix without serialization.
#[instrument(skip_all)] #[instrument(skip_all)]
fn prefix_iter_raw<O: DatabaseObject>( fn prefix_iter_raw<O: DatabaseObject>(
&self, &self,
@ -219,7 +219,7 @@ impl Operations for RocksdbStorageEngine {
})) }))
} }
// batch_delete deletes objects by keys. /// batch_delete deletes objects by keys.
#[instrument(skip_all)] #[instrument(skip_all)]
fn batch_delete<O: DatabaseObject>(&self, keys: Vec<&[u8]>) -> Result<()> { fn batch_delete<O: DatabaseObject>(&self, keys: Vec<&[u8]>) -> Result<()> {
let cf = cf_handle::<O>(self)?; let cf = cf_handle::<O>(self)?;
@ -236,7 +236,7 @@ impl Operations for RocksdbStorageEngine {
} }
} }
// RocksdbStorageEngine implements the rocksdb of the storage engine. /// RocksdbStorageEngine implements the rocksdb of the storage engine.
impl<'db> StorageEngine<'db> for RocksdbStorageEngine {} impl<'db> StorageEngine<'db> for RocksdbStorageEngine {}
/// cf_handle returns the column family handle for the given object. /// cf_handle returns the column family handle for the given object.

View File

@ -22,28 +22,28 @@ use std::path::Path;
use std::str::FromStr; use std::str::FromStr;
use tracing::instrument; use tracing::instrument;
// SEPARATOR is the separator of digest. /// SEPARATOR is the separator of digest.
pub const SEPARATOR: &str = ":"; pub const SEPARATOR: &str = ":";
// Algorithm is an enum of the algorithm that is used to generate digest. /// Algorithm is an enum of the algorithm that is used to generate digest.
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Algorithm { pub enum Algorithm {
// Crc32 is crc32 algorithm for generate digest. /// Crc32 is crc32 algorithm for generate digest.
Crc32, Crc32,
// Blake3 is blake3 algorithm for generate digest. /// Blake3 is blake3 algorithm for generate digest.
Blake3, Blake3,
// Sha256 is sha256 algorithm for generate digest. /// Sha256 is sha256 algorithm for generate digest.
Sha256, Sha256,
// Sha512 is sha512 algorithm for generate digest. /// Sha512 is sha512 algorithm for generate digest.
Sha512, Sha512,
} }
// Algorithm implements the Display. /// Algorithm implements the Display.
impl fmt::Display for Algorithm { impl fmt::Display for Algorithm {
// fmt formats the value using the given formatter. /// fmt formats the value using the given formatter.
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
Algorithm::Crc32 => write!(f, "crc32"), Algorithm::Crc32 => write!(f, "crc32"),
@ -54,11 +54,11 @@ impl fmt::Display for Algorithm {
} }
} }
// Algorithm implements the FromStr. /// Algorithm implements the FromStr.
impl FromStr for Algorithm { impl FromStr for Algorithm {
type Err = String; type Err = String;
// from_str parses an algorithm string. /// from_str parses an algorithm string.
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
match s { match s {
"crc32" => Ok(Algorithm::Crc32), "crc32" => Ok(Algorithm::Crc32),
@ -70,23 +70,23 @@ impl FromStr for Algorithm {
} }
} }
// Digest is a struct that is used to generate digest. /// Digest is a struct that is used to generate digest.
pub struct Digest { pub struct Digest {
// algorithm is the algorithm that is used to generate digest. /// algorithm is the algorithm that is used to generate digest.
algorithm: Algorithm, algorithm: Algorithm,
// encoded is the encoded digest. /// encoded is the encoded digest.
encoded: String, encoded: String,
} }
// Digest implements the Digest. /// Digest implements the Digest.
impl Digest { impl Digest {
// new returns a new Digest. /// new returns a new Digest.
pub fn new(algorithm: Algorithm, encoded: String) -> Self { pub fn new(algorithm: Algorithm, encoded: String) -> Self {
Self { algorithm, encoded } Self { algorithm, encoded }
} }
// algorithm returns the algorithm of the digest. /// algorithm returns the algorithm of the digest.
pub fn algorithm(&self) -> Algorithm { pub fn algorithm(&self) -> Algorithm {
self.algorithm self.algorithm
} }
@ -97,19 +97,19 @@ impl Digest {
} }
} }
// Digest implements the Display. /// Digest implements the Display.
impl fmt::Display for Digest { impl fmt::Display for Digest {
// fmt formats the value using the given formatter. /// fmt formats the value using the given formatter.
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}{}{}", self.algorithm, SEPARATOR, self.encoded) write!(f, "{}{}{}", self.algorithm, SEPARATOR, self.encoded)
} }
} }
// Digest implements the FromStr. /// Digest implements the FromStr.
impl FromStr for Digest { impl FromStr for Digest {
type Err = String; type Err = String;
// from_str parses a digest string. /// from_str parses a digest string.
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.splitn(2, SEPARATOR).collect(); let parts: Vec<&str> = s.splitn(2, SEPARATOR).collect();
if parts.len() != 2 { if parts.len() != 2 {
@ -128,7 +128,7 @@ impl FromStr for Digest {
} }
} }
// calculate_file_hash calculates the hash of a file. /// calculate_file_hash calculates the hash of a file.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn calculate_file_hash(algorithm: Algorithm, path: &Path) -> ClientResult<Digest> { pub fn calculate_file_hash(algorithm: Algorithm, path: &Path) -> ClientResult<Digest> {
let f = std::fs::File::open(path)?; let f = std::fs::File::open(path)?;

View File

@ -23,7 +23,7 @@ use reqwest::header::{HeaderMap, HeaderValue};
use std::collections::HashMap; use std::collections::HashMap;
use tracing::{error, instrument}; use tracing::{error, instrument};
// reqwest_headermap_to_hashmap converts a reqwest headermap to a hashmap. /// reqwest_headermap_to_hashmap converts a reqwest headermap to a hashmap.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn reqwest_headermap_to_hashmap(header: &HeaderMap<HeaderValue>) -> HashMap<String, String> { pub fn reqwest_headermap_to_hashmap(header: &HeaderMap<HeaderValue>) -> HashMap<String, String> {
let mut hashmap: HashMap<String, String> = HashMap::new(); let mut hashmap: HashMap<String, String> = HashMap::new();
@ -38,7 +38,7 @@ pub fn reqwest_headermap_to_hashmap(header: &HeaderMap<HeaderValue>) -> HashMap<
hashmap hashmap
} }
// hashmap_to_reqwest_headermap converts a hashmap to a reqwest headermap. /// hashmap_to_reqwest_headermap converts a hashmap to a reqwest headermap.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn hashmap_to_reqwest_headermap( pub fn hashmap_to_reqwest_headermap(
header: &HashMap<String, String>, header: &HashMap<String, String>,
@ -47,7 +47,7 @@ pub fn hashmap_to_reqwest_headermap(
Ok(header) Ok(header)
} }
// hashmap_to_hyper_header_map converts a hashmap to a hyper header map. /// hashmap_to_hyper_header_map converts a hashmap to a hyper header map.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn hashmap_to_hyper_header_map( pub fn hashmap_to_hyper_header_map(
header: &HashMap<String, String>, header: &HashMap<String, String>,
@ -56,10 +56,10 @@ pub fn hashmap_to_hyper_header_map(
Ok(header) Ok(header)
} }
// TODO: Remove the conversion after the http crate version is the same. /// TODO: Remove the conversion after the http crate version is the same.
// Convert the Reqwest header to the Hyper header, because of the http crate /// Convert the Reqwest header to the Hyper header, because of the http crate
// version is different. Reqwest header depends on the http crate /// version is different. Reqwest header depends on the http crate
// version 0.2, but the Hyper header depends on the http crate version 0.1. /// version 0.2, but the Hyper header depends on the http crate version 0.1.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn hyper_headermap_to_reqwest_headermap( pub fn hyper_headermap_to_reqwest_headermap(
hyper_header: &hyper::header::HeaderMap, hyper_header: &hyper::header::HeaderMap,
@ -95,7 +95,7 @@ pub fn hyper_headermap_to_reqwest_headermap(
reqwest_header reqwest_header
} }
// header_vec_to_hashmap converts a vector of header string to a hashmap. /// header_vec_to_hashmap converts a vector of header string to a hashmap.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn header_vec_to_hashmap(raw_header: Vec<String>) -> Result<HashMap<String, String>> { pub fn header_vec_to_hashmap(raw_header: Vec<String>) -> Result<HashMap<String, String>> {
let mut header = HashMap::new(); let mut header = HashMap::new();
@ -109,7 +109,7 @@ pub fn header_vec_to_hashmap(raw_header: Vec<String>) -> Result<HashMap<String,
Ok(header) Ok(header)
} }
// header_vec_to_reqwest_headermap converts a vector of header string to a reqwest headermap. /// header_vec_to_reqwest_headermap converts a vector of header string to a reqwest headermap.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn header_vec_to_reqwest_headermap( pub fn header_vec_to_reqwest_headermap(
raw_header: Vec<String>, raw_header: Vec<String>,
@ -117,7 +117,7 @@ pub fn header_vec_to_reqwest_headermap(
hashmap_to_reqwest_headermap(&header_vec_to_hashmap(raw_header)?) hashmap_to_reqwest_headermap(&header_vec_to_hashmap(raw_header)?)
} }
// get_range gets the range from http header. /// get_range gets the range from http header.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_range(header: &HeaderMap, content_length: u64) -> Result<Option<Range>> { pub fn get_range(header: &HeaderMap, content_length: u64) -> Result<Option<Range>> {
match header.get(reqwest::header::RANGE) { match header.get(reqwest::header::RANGE) {
@ -129,9 +129,9 @@ pub fn get_range(header: &HeaderMap, content_length: u64) -> Result<Option<Range
} }
} }
// parse_range_header parses a Range header string as per RFC 7233, /// parse_range_header parses a Range header string as per RFC 7233,
// supported Range Header: "Range": "bytes=100-200", "Range": "bytes=-50", /// supported Range Header: "Range": "bytes=100-200", "Range": "bytes=-50",
// "Range": "bytes=150-", "Range": "bytes=0-0,-1". /// "Range": "bytes=150-", "Range": "bytes=0-0,-1".
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn parse_range_header(range_header_value: &str, content_length: u64) -> Result<Range> { pub fn parse_range_header(range_header_value: &str, content_length: u64) -> Result<Range> {
let parsed_ranges = let parsed_ranges =

View File

@ -25,31 +25,31 @@ use tracing::instrument;
use url::Url; use url::Url;
use uuid::Uuid; use uuid::Uuid;
// SEED_PEER_KEY is the key of the seed peer. /// SEED_PEER_KEY is the key of the seed peer.
const SEED_PEER_KEY: &str = "seed"; const SEED_PEER_KEY: &str = "seed";
// CACHE_KEY is the key of the cache. /// CACHE_KEY is the key of the cache.
const CACHE_KEY: &str = "cache"; const CACHE_KEY: &str = "cache";
// PERSISTENT_CACHE_KEY is the key of the persistent cache. /// PERSISTENT_CACHE_KEY is the key of the persistent cache.
const PERSISTENT_CACHE_KEY: &str = "persistent"; const PERSISTENT_CACHE_KEY: &str = "persistent";
// IDGenerator is used to generate the id for the resources. /// IDGenerator is used to generate the id for the resources.
#[derive(Debug)] #[derive(Debug)]
pub struct IDGenerator { pub struct IDGenerator {
// ip is the ip of the host. /// ip is the ip of the host.
ip: String, ip: String,
// hostname is the hostname of the host. /// hostname is the hostname of the host.
hostname: String, hostname: String,
// is_seed_peer indicates whether the host is a seed peer. /// is_seed_peer indicates whether the host is a seed peer.
is_seed_peer: bool, is_seed_peer: bool,
} }
// IDGenerator implements the IDGenerator. /// IDGenerator implements the IDGenerator.
impl IDGenerator { impl IDGenerator {
// new creates a new IDGenerator. /// new creates a new IDGenerator.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new(ip: String, hostname: String, is_seed_peer: bool) -> Self { pub fn new(ip: String, hostname: String, is_seed_peer: bool) -> Self {
IDGenerator { IDGenerator {
@ -59,7 +59,7 @@ impl IDGenerator {
} }
} }
// host_id generates the host id. /// host_id generates the host id.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn host_id(&self) -> String { pub fn host_id(&self) -> String {
if self.is_seed_peer { if self.is_seed_peer {
@ -69,7 +69,7 @@ impl IDGenerator {
format!("{}-{}", self.ip, self.hostname) format!("{}-{}", self.ip, self.hostname)
} }
// task_id generates the task id. /// task_id generates the task id.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn task_id( pub fn task_id(
&self, &self,
@ -113,7 +113,7 @@ impl IDGenerator {
Ok(hex::encode(hasher.finalize())) Ok(hex::encode(hasher.finalize()))
} }
// cache_task_id generates the cache task id. /// cache_task_id generates the cache task id.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn cache_task_id( pub fn cache_task_id(
&self, &self,
@ -142,7 +142,7 @@ impl IDGenerator {
Ok(hasher.finalize().to_hex().to_string()) Ok(hasher.finalize().to_hex().to_string())
} }
// peer_id generates the peer id. /// peer_id generates the peer id.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn peer_id(&self) -> String { pub fn peer_id(&self) -> String {
if self.is_seed_peer { if self.is_seed_peer {
@ -158,7 +158,7 @@ impl IDGenerator {
format!("{}-{}-{}", self.ip, self.hostname, Uuid::new_v4()) format!("{}-{}-{}", self.ip, self.hostname, Uuid::new_v4())
} }
// cache_peer_id generates the cache peer id. /// cache_peer_id generates the cache peer id.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn cache_peer_id(&self, persistent: bool) -> String { pub fn cache_peer_id(&self, persistent: bool) -> String {
if persistent { if persistent {
@ -181,7 +181,7 @@ impl IDGenerator {
) )
} }
// task_type generates the task type by the task id. /// task_type generates the task type by the task id.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn task_type(&self, id: &str) -> TaskType { pub fn task_type(&self, id: &str) -> TaskType {
if id.contains(CACHE_KEY) { if id.contains(CACHE_KEY) {

View File

@ -24,22 +24,22 @@ use std::vec::Vec;
use std::{fs, io}; use std::{fs, io};
use tracing::instrument; use tracing::instrument;
// NoVerifier is a verifier that does not verify the server certificate. /// NoVerifier is a verifier that does not verify the server certificate.
// It is used for testing and should not be used in production. /// It is used for testing and should not be used in production.
#[derive(Debug)] #[derive(Debug)]
pub struct NoVerifier(Arc<rustls::crypto::CryptoProvider>); pub struct NoVerifier(Arc<rustls::crypto::CryptoProvider>);
// Implement the NoVerifier. /// Implement the NoVerifier.
impl NoVerifier { impl NoVerifier {
// new creates a new NoVerifier. /// new creates a new NoVerifier.
pub fn new() -> Arc<Self> { pub fn new() -> Arc<Self> {
Arc::new(Self(Arc::new(rustls::crypto::ring::default_provider()))) Arc::new(Self(Arc::new(rustls::crypto::ring::default_provider())))
} }
} }
// Implement the ServerCertVerifier trait for NoVerifier. /// Implement the ServerCertVerifier trait for NoVerifier.
impl rustls::client::danger::ServerCertVerifier for NoVerifier { impl rustls::client::danger::ServerCertVerifier for NoVerifier {
// verify_server_cert verifies the server certificate. /// verify_server_cert verifies the server certificate.
fn verify_server_cert( fn verify_server_cert(
&self, &self,
_end_entity: &CertificateDer<'_>, _end_entity: &CertificateDer<'_>,
@ -51,7 +51,7 @@ impl rustls::client::danger::ServerCertVerifier for NoVerifier {
Ok(rustls::client::danger::ServerCertVerified::assertion()) Ok(rustls::client::danger::ServerCertVerified::assertion())
} }
// verify_tls12_signature verifies the TLS 1.2 signature. /// verify_tls12_signature verifies the TLS 1.2 signature.
fn verify_tls12_signature( fn verify_tls12_signature(
&self, &self,
message: &[u8], message: &[u8],
@ -66,7 +66,7 @@ impl rustls::client::danger::ServerCertVerifier for NoVerifier {
) )
} }
// verify_tls13_signature verifies the TLS 1.3 signature. /// verify_tls13_signature verifies the TLS 1.3 signature.
fn verify_tls13_signature( fn verify_tls13_signature(
&self, &self,
message: &[u8], message: &[u8],
@ -81,15 +81,15 @@ impl rustls::client::danger::ServerCertVerifier for NoVerifier {
) )
} }
// supported_verify_schemes returns the supported signature schemes. /// supported_verify_schemes returns the supported signature schemes.
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> { fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
self.0.signature_verification_algorithms.supported_schemes() self.0.signature_verification_algorithms.supported_schemes()
} }
} }
// Generate a CA certificate from PEM format files. /// Generate a CA certificate from PEM format files.
// Generate CA by openssl with PEM format files: /// Generate CA by openssl with PEM format files:
// openssl req -x509 -sha256 -days 36500 -nodes -newkey rsa:4096 -keyout ca.key -out ca.crt /// openssl req -x509 -sha256 -days 36500 -nodes -newkey rsa:4096 -keyout ca.key -out ca.crt
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn generate_ca_cert_from_pem( pub fn generate_ca_cert_from_pem(
ca_cert_path: &PathBuf, ca_cert_path: &PathBuf,
@ -110,7 +110,7 @@ pub fn generate_ca_cert_from_pem(
Ok(ca_cert) Ok(ca_cert)
} }
// Generate certificates from PEM format files. /// Generate certificates from PEM format files.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn generate_certs_from_pem(cert_path: &PathBuf) -> ClientResult<Vec<CertificateDer<'static>>> { pub fn generate_certs_from_pem(cert_path: &PathBuf) -> ClientResult<Vec<CertificateDer<'static>>> {
let f = fs::File::open(cert_path)?; let f = fs::File::open(cert_path)?;
@ -119,8 +119,8 @@ pub fn generate_certs_from_pem(cert_path: &PathBuf) -> ClientResult<Vec<Certific
Ok(certs) Ok(certs)
} }
// generate_self_signed_certs_by_ca_cert generates a self-signed certificates /// generate_self_signed_certs_by_ca_cert generates a self-signed certificates
// by given subject alternative names with CA certificate. /// by given subject alternative names with CA certificate.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn generate_self_signed_certs_by_ca_cert( pub fn generate_self_signed_certs_by_ca_cert(
ca_cert: &Certificate, ca_cert: &Certificate,
@ -146,7 +146,7 @@ pub fn generate_self_signed_certs_by_ca_cert(
Ok((certs, key)) Ok((certs, key))
} }
// generate_simple_self_signed_certs generates a simple self-signed certificates /// generate_simple_self_signed_certs generates a simple self-signed certificates
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn generate_simple_self_signed_certs( pub fn generate_simple_self_signed_certs(
subject_alt_names: impl Into<Vec<String>>, subject_alt_names: impl Into<Vec<String>>,
@ -162,7 +162,7 @@ pub fn generate_simple_self_signed_certs(
Ok((certs, key)) Ok((certs, key))
} }
// certs_to_raw_certs converts DER format of the certificates to raw certificates. /// certs_to_raw_certs converts DER format of the certificates to raw certificates.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn certs_to_raw_certs(certs: Vec<CertificateDer<'static>>) -> Vec<Vec<u8>> { pub fn certs_to_raw_certs(certs: Vec<CertificateDer<'static>>) -> Vec<Vec<u8>> {
certs certs
@ -171,7 +171,7 @@ pub fn certs_to_raw_certs(certs: Vec<CertificateDer<'static>>) -> Vec<Vec<u8>> {
.collect() .collect()
} }
// raw_certs_to_certs converts raw certificates to DER format of certificates. /// raw_certs_to_certs converts raw certificates to DER format of certificates.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn raw_certs_to_certs(raw_certs: Vec<Vec<u8>>) -> Vec<CertificateDer<'static>> { pub fn raw_certs_to_certs(raw_certs: Vec<Vec<u8>>) -> Vec<CertificateDer<'static>> {
raw_certs.into_iter().map(|cert| cert.into()).collect() raw_certs.into_iter().map(|cert| cert.into()).collect()

View File

@ -18,7 +18,7 @@ use std::env;
use std::process::Command; use std::process::Command;
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
// git_commit_hash returns the short hash of the current git commit. /// git_commit_hash returns the short hash of the current git commit.
fn git_commit_hash() -> String { fn git_commit_hash() -> String {
if let Ok(output) = Command::new("git") if let Ok(output) = Command::new("git")
.args(["rev-parse", "--short", "HEAD"]) .args(["rev-parse", "--short", "HEAD"])

View File

@ -31,24 +31,24 @@ use sysinfo::System;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tracing::{error, info, instrument}; use tracing::{error, info, instrument};
// ManagerAnnouncer is used to announce the dfdaemon information to the manager. /// ManagerAnnouncer is used to announce the dfdaemon information to the manager.
pub struct ManagerAnnouncer { pub struct ManagerAnnouncer {
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// manager_client is the grpc client of the manager. /// manager_client is the grpc client of the manager.
manager_client: Arc<ManagerClient>, manager_client: Arc<ManagerClient>,
// shutdown is used to shutdown the announcer. /// shutdown is used to shutdown the announcer.
shutdown: shutdown::Shutdown, shutdown: shutdown::Shutdown,
// _shutdown_complete is used to notify the announcer is shutdown. /// _shutdown_complete is used to notify the announcer is shutdown.
_shutdown_complete: mpsc::UnboundedSender<()>, _shutdown_complete: mpsc::UnboundedSender<()>,
} }
// ManagerAnnouncer implements the manager announcer of the dfdaemon. /// ManagerAnnouncer implements the manager announcer of the dfdaemon.
impl ManagerAnnouncer { impl ManagerAnnouncer {
// new creates a new manager announcer. /// new creates a new manager announcer.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
config: Arc<Config>, config: Arc<Config>,
@ -64,7 +64,7 @@ impl ManagerAnnouncer {
} }
} }
// run announces the dfdaemon information to the manager. /// run announces the dfdaemon information to the manager.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) -> Result<()> { pub async fn run(&self) -> Result<()> {
// Clone the shutdown channel. // Clone the shutdown channel.
@ -110,27 +110,27 @@ impl ManagerAnnouncer {
} }
} }
// Announcer is used to announce the dfdaemon information to the manager and scheduler. /// Announcer is used to announce the dfdaemon information to the manager and scheduler.
pub struct SchedulerAnnouncer { pub struct SchedulerAnnouncer {
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// host_id is the id of the host. /// host_id is the id of the host.
host_id: String, host_id: String,
// scheduler_client is the grpc client of the scheduler. /// scheduler_client is the grpc client of the scheduler.
scheduler_client: Arc<SchedulerClient>, scheduler_client: Arc<SchedulerClient>,
// shutdown is used to shutdown the announcer. /// shutdown is used to shutdown the announcer.
shutdown: shutdown::Shutdown, shutdown: shutdown::Shutdown,
// _shutdown_complete is used to notify the announcer is shutdown. /// _shutdown_complete is used to notify the announcer is shutdown.
_shutdown_complete: mpsc::UnboundedSender<()>, _shutdown_complete: mpsc::UnboundedSender<()>,
} }
// SchedulerAnnouncer implements the scheduler announcer of the dfdaemon. /// SchedulerAnnouncer implements the scheduler announcer of the dfdaemon.
impl SchedulerAnnouncer { impl SchedulerAnnouncer {
// new creates a new scheduler announcer. /// new creates a new scheduler announcer.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new( pub async fn new(
config: Arc<Config>, config: Arc<Config>,
@ -155,7 +155,7 @@ impl SchedulerAnnouncer {
Ok(announcer) Ok(announcer)
} }
// run announces the dfdaemon information to the scheduler. /// run announces the dfdaemon information to the scheduler.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) { pub async fn run(&self) {
// Clone the shutdown channel. // Clone the shutdown channel.
@ -193,7 +193,7 @@ impl SchedulerAnnouncer {
} }
} }
// make_announce_host_request makes the announce host request. /// make_announce_host_request makes the announce host request.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_announce_host_request(&self) -> Result<AnnounceHostRequest> { fn make_announce_host_request(&self) -> Result<AnnounceHostRequest> {
// If the seed peer is enabled, we should announce the seed peer to the scheduler. // If the seed peer is enabled, we should announce the seed peer to the scheduler.

View File

@ -31,7 +31,7 @@ use tracing::{error, info};
use super::*; use super::*;
// ExportCommand is the subcommand of export. /// ExportCommand is the subcommand of export.
#[derive(Debug, Clone, Parser)] #[derive(Debug, Clone, Parser)]
pub struct ExportCommand { pub struct ExportCommand {
#[arg(help = "Specify the cache task ID to export")] #[arg(help = "Specify the cache task ID to export")]
@ -67,9 +67,9 @@ pub struct ExportCommand {
timeout: Duration, timeout: Duration,
} }
// Implement the execute for ExportCommand. /// Implement the execute for ExportCommand.
impl ExportCommand { impl ExportCommand {
// execute executes the export command. /// execute executes the export command.
pub async fn execute(&self, endpoint: &Path) -> Result<()> { pub async fn execute(&self, endpoint: &Path) -> Result<()> {
// Validate the command line arguments. // Validate the command line arguments.
if let Err(err) = self.validate_args() { if let Err(err) = self.validate_args() {
@ -358,7 +358,7 @@ impl ExportCommand {
Ok(()) Ok(())
} }
// run runs the export command. /// run runs the export command.
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> { async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
// Get the absolute path of the output file. // Get the absolute path of the output file.
let absolute_path = Path::new(&self.output).absolutize()?; let absolute_path = Path::new(&self.output).absolutize()?;
@ -428,7 +428,7 @@ impl ExportCommand {
Ok(()) Ok(())
} }
// validate_args validates the command line arguments. /// validate_args validates the command line arguments.
fn validate_args(&self) -> Result<()> { fn validate_args(&self) -> Result<()> {
let absolute_path = Path::new(&self.output).absolutize()?; let absolute_path = Path::new(&self.output).absolutize()?;
match absolute_path.parent() { match absolute_path.parent() {

View File

@ -28,10 +28,10 @@ use termion::{color, style};
use super::*; use super::*;
// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar. /// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar.
const DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL: Duration = Duration::from_millis(80); const DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL: Duration = Duration::from_millis(80);
// ImportCommand is the subcommand of import. /// ImportCommand is the subcommand of import.
#[derive(Debug, Clone, Parser)] #[derive(Debug, Clone, Parser)]
pub struct ImportCommand { pub struct ImportCommand {
#[arg(help = "Specify the path of the file to import")] #[arg(help = "Specify the path of the file to import")]
@ -75,9 +75,9 @@ pub struct ImportCommand {
timeout: Duration, timeout: Duration,
} }
// Implement the execute for ImportCommand. /// Implement the execute for ImportCommand.
impl ImportCommand { impl ImportCommand {
// execute executes the import sub command. /// execute executes the import sub command.
pub async fn execute(&self, endpoint: &Path) -> Result<()> { pub async fn execute(&self, endpoint: &Path) -> Result<()> {
// Validate the command line arguments. // Validate the command line arguments.
if let Err(err) = self.validate_args() { if let Err(err) = self.validate_args() {
@ -257,7 +257,7 @@ impl ImportCommand {
Ok(()) Ok(())
} }
// run runs the import sub command. /// run runs the import sub command.
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> { async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
let pb = ProgressBar::new_spinner(); let pb = ProgressBar::new_spinner();
pb.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL); pb.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL);
@ -288,7 +288,7 @@ impl ImportCommand {
Ok(()) Ok(())
} }
// validate_args validates the command line arguments. /// validate_args validates the command line arguments.
fn validate_args(&self) -> Result<()> { fn validate_args(&self) -> Result<()> {
if self.path.is_dir() { if self.path.is_dir() {
return Err(Error::ValidationError(format!( return Err(Error::ValidationError(format!(

View File

@ -119,7 +119,7 @@ pub enum Command {
Remove(remove::RemoveCommand), Remove(remove::RemoveCommand),
} }
// Implement the execute for Command. /// Implement the execute for Command.
impl Command { impl Command {
#[allow(unused)] #[allow(unused)]
pub async fn execute(self, endpoint: &Path) -> Result<()> { pub async fn execute(self, endpoint: &Path) -> Result<()> {
@ -154,7 +154,7 @@ async fn main() -> anyhow::Result<()> {
Ok(()) Ok(())
} }
// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health. /// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health.
pub async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownloadClient> { pub async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownloadClient> {
// Check dfdaemon's health. // Check dfdaemon's health.
let health_client = HealthClient::new_unix(endpoint.clone()).await?; let health_client = HealthClient::new_unix(endpoint.clone()).await?;

View File

@ -24,19 +24,19 @@ use termion::{color, style};
use super::*; use super::*;
// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar. /// DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL is the default steady tick interval of progress bar.
const DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL: Duration = Duration::from_millis(80); const DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL: Duration = Duration::from_millis(80);
// RemoveCommand is the subcommand of remove. /// RemoveCommand is the subcommand of remove.
#[derive(Debug, Clone, Parser)] #[derive(Debug, Clone, Parser)]
pub struct RemoveCommand { pub struct RemoveCommand {
#[arg(help = "Specify the cache task ID to remove")] #[arg(help = "Specify the cache task ID to remove")]
id: String, id: String,
} }
// Implement the execute for RemoveCommand. /// Implement the execute for RemoveCommand.
impl RemoveCommand { impl RemoveCommand {
// execute executes the delete command. /// execute executes the delete command.
pub async fn execute(&self, endpoint: &Path) -> Result<()> { pub async fn execute(&self, endpoint: &Path) -> Result<()> {
// Get dfdaemon download client. // Get dfdaemon download client.
let dfdaemon_download_client = let dfdaemon_download_client =
@ -178,7 +178,7 @@ impl RemoveCommand {
Ok(()) Ok(())
} }
// run runs the delete command. /// run runs the delete command.
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> { async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
let pb = ProgressBar::new_spinner(); let pb = ProgressBar::new_spinner();
pb.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL); pb.enable_steady_tick(DEFAULT_PROGRESS_BAR_STEADY_TICK_INTERVAL);

View File

@ -32,16 +32,16 @@ use termion::{color, style};
use super::*; use super::*;
// StatCommand is the subcommand of stat. /// StatCommand is the subcommand of stat.
#[derive(Debug, Clone, Parser)] #[derive(Debug, Clone, Parser)]
pub struct StatCommand { pub struct StatCommand {
#[arg(help = "Specify the cache task ID to stat")] #[arg(help = "Specify the cache task ID to stat")]
id: String, id: String,
} }
// Implement the execute for StatCommand. /// Implement the execute for StatCommand.
impl StatCommand { impl StatCommand {
// execute executes the stat command. /// execute executes the stat command.
pub async fn execute(&self, endpoint: &Path) -> Result<()> { pub async fn execute(&self, endpoint: &Path) -> Result<()> {
// Get dfdaemon download client. // Get dfdaemon download client.
let dfdaemon_download_client = let dfdaemon_download_client =
@ -183,7 +183,7 @@ impl StatCommand {
Ok(()) Ok(())
} }
// run runs the stat command. /// run runs the stat command.
async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> { async fn run(&self, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
let task = dfdaemon_download_client let task = dfdaemon_download_client
.stat_cache_task(StatCacheTaskRequest { .stat_cache_task(StatCacheTaskRequest {

View File

@ -546,7 +546,7 @@ async fn main() -> anyhow::Result<()> {
Ok(()) Ok(())
} }
// run runs the dfget command. /// run runs the dfget command.
async fn run(mut args: Args, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> { async fn run(mut args: Args, dfdaemon_download_client: DfdaemonDownloadClient) -> Result<()> {
// Get the absolute path of the output file. // Get the absolute path of the output file.
args.output = Path::new(&args.output).absolutize()?.into(); args.output = Path::new(&args.output).absolutize()?.into();
@ -567,7 +567,7 @@ async fn run(mut args: Args, dfdaemon_download_client: DfdaemonDownloadClient) -
download(args, ProgressBar::new(0), dfdaemon_download_client).await download(args, ProgressBar::new(0), dfdaemon_download_client).await
} }
// download_dir downloads all files in the directory. /// download_dir downloads all files in the directory.
async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Result<()> { async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Result<()> {
// Initalize the object storage. // Initalize the object storage.
let object_storage = Some(ObjectStorage { let object_storage = Some(ObjectStorage {
@ -657,7 +657,7 @@ async fn download_dir(args: Args, download_client: DfdaemonDownloadClient) -> Re
Ok(()) Ok(())
} }
// download downloads the single file. /// download downloads the single file.
async fn download( async fn download(
args: Args, args: Args,
progress_bar: ProgressBar, progress_bar: ProgressBar,
@ -759,7 +759,7 @@ async fn download(
Ok(()) Ok(())
} }
// get_entries gets all entries in the directory. /// get_entries gets all entries in the directory.
async fn get_entries(args: Args, object_storage: Option<ObjectStorage>) -> Result<Vec<DirEntry>> { async fn get_entries(args: Args, object_storage: Option<ObjectStorage>) -> Result<Vec<DirEntry>> {
// Initialize backend factory and build backend. // Initialize backend factory and build backend.
let backend_factory = BackendFactory::new(None)?; let backend_factory = BackendFactory::new(None)?;
@ -818,7 +818,7 @@ async fn get_entries(args: Args, object_storage: Option<ObjectStorage>) -> Resul
Ok(response.entries) Ok(response.entries)
} }
// make_output_by_entry makes the output path by the entry information. /// make_output_by_entry makes the output path by the entry information.
fn make_output_by_entry(url: Url, output: &Path, entry: DirEntry) -> Result<PathBuf> { fn make_output_by_entry(url: Url, output: &Path, entry: DirEntry) -> Result<PathBuf> {
// Get the root directory of the download directory and the output root directory. // Get the root directory of the download directory and the output root directory.
let root_dir = url.path().to_string(); let root_dir = url.path().to_string();
@ -836,7 +836,7 @@ fn make_output_by_entry(url: Url, output: &Path, entry: DirEntry) -> Result<Path
.into()) .into())
} }
// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health. /// get_and_check_dfdaemon_download_client gets a dfdaemon download client and checks its health.
async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownloadClient> { async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownloadClient> {
// Check dfdaemon's health. // Check dfdaemon's health.
let health_client = HealthClient::new_unix(endpoint.clone()).await?; let health_client = HealthClient::new_unix(endpoint.clone()).await?;
@ -847,7 +847,7 @@ async fn get_dfdaemon_download_client(endpoint: PathBuf) -> Result<DfdaemonDownl
Ok(dfdaemon_download_client) Ok(dfdaemon_download_client)
} }
// validate_args validates the command line arguments. /// validate_args validates the command line arguments.
fn validate_args(args: &Args) -> Result<()> { fn validate_args(args: &Args) -> Result<()> {
// If the URL is a directory, the output path should be a directory. // If the URL is a directory, the output path should be a directory.
if args.url.path().ends_with('/') && !args.output.is_dir() { if args.url.path().ends_with('/') && !args.output.is_dir() {

View File

@ -95,11 +95,11 @@ pub enum Command {
Remove(RemoveCommand), Remove(RemoveCommand),
} }
// Download or upload files using object storage in Dragonfly. /// Download or upload files using object storage in Dragonfly.
#[derive(Debug, Clone, Parser)] #[derive(Debug, Clone, Parser)]
pub struct CopyCommand {} pub struct CopyCommand {}
// Remove a file from Dragonfly object storage. /// Remove a file from Dragonfly object storage.
#[derive(Debug, Clone, Parser)] #[derive(Debug, Clone, Parser)]
pub struct RemoveCommand {} pub struct RemoveCommand {}

View File

@ -27,43 +27,43 @@ use tokio::sync::{mpsc, Mutex, RwLock};
use tonic_health::pb::health_check_response::ServingStatus; use tonic_health::pb::health_check_response::ServingStatus;
use tracing::{error, info, instrument}; use tracing::{error, info, instrument};
// Data is the dynamic configuration of the dfdaemon. /// Data is the dynamic configuration of the dfdaemon.
#[derive(Default)] #[derive(Default)]
pub struct Data { pub struct Data {
// schedulers is the schedulers of the dfdaemon. /// schedulers is the schedulers of the dfdaemon.
pub schedulers: ListSchedulersResponse, pub schedulers: ListSchedulersResponse,
// available_schedulers is the available schedulers of the dfdaemon. /// available_schedulers is the available schedulers of the dfdaemon.
pub available_schedulers: Vec<Scheduler>, pub available_schedulers: Vec<Scheduler>,
// available_scheduler_cluster_id is the id of the available scheduler cluster of the dfdaemon. /// available_scheduler_cluster_id is the id of the available scheduler cluster of the dfdaemon.
pub available_scheduler_cluster_id: Option<u64>, pub available_scheduler_cluster_id: Option<u64>,
} }
// Dynconfig supports dynamic configuration of the client. /// Dynconfig supports dynamic configuration of the client.
pub struct Dynconfig { pub struct Dynconfig {
// data is the dynamic configuration of the dfdaemon. /// data is the dynamic configuration of the dfdaemon.
pub data: RwLock<Data>, pub data: RwLock<Data>,
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// manager_client is the grpc client of the manager. /// manager_client is the grpc client of the manager.
manager_client: Arc<ManagerClient>, manager_client: Arc<ManagerClient>,
// mutex is used to protect refresh. /// mutex is used to protect refresh.
mutex: Mutex<()>, mutex: Mutex<()>,
// shutdown is used to shutdown the dynconfig. /// shutdown is used to shutdown the dynconfig.
shutdown: shutdown::Shutdown, shutdown: shutdown::Shutdown,
// _shutdown_complete is used to notify the dynconfig is shutdown. /// _shutdown_complete is used to notify the dynconfig is shutdown.
_shutdown_complete: mpsc::UnboundedSender<()>, _shutdown_complete: mpsc::UnboundedSender<()>,
} }
// Dynconfig is the implementation of Dynconfig. /// Dynconfig is the implementation of Dynconfig.
impl Dynconfig { impl Dynconfig {
// new creates a new Dynconfig. /// new creates a new Dynconfig.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new( pub async fn new(
config: Arc<Config>, config: Arc<Config>,
@ -86,7 +86,7 @@ impl Dynconfig {
Ok(dc) Ok(dc)
} }
// run starts the dynconfig server. /// run starts the dynconfig server.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) { pub async fn run(&self) {
// Clone the shutdown channel. // Clone the shutdown channel.
@ -110,7 +110,7 @@ impl Dynconfig {
} }
} }
// refresh refreshes the dynamic configuration of the dfdaemon. /// refresh refreshes the dynamic configuration of the dfdaemon.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn refresh(&self) -> Result<()> { pub async fn refresh(&self) -> Result<()> {
// Only one refresh can be running at a time. // Only one refresh can be running at a time.
@ -142,7 +142,7 @@ impl Dynconfig {
Ok(()) Ok(())
} }
// list_schedulers lists the schedulers from the manager. /// list_schedulers lists the schedulers from the manager.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn list_schedulers(&self) -> Result<ListSchedulersResponse> { async fn list_schedulers(&self) -> Result<ListSchedulersResponse> {
// Get the source type. // Get the source type.
@ -166,7 +166,7 @@ impl Dynconfig {
.await .await
} }
// get_available_schedulers gets the available schedulers. /// get_available_schedulers gets the available schedulers.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn get_available_schedulers(&self, schedulers: &[Scheduler]) -> Result<Vec<Scheduler>> { async fn get_available_schedulers(&self, schedulers: &[Scheduler]) -> Result<Vec<Scheduler>> {
let mut available_schedulers: Vec<Scheduler> = Vec::new(); let mut available_schedulers: Vec<Scheduler> = Vec::new();

View File

@ -24,29 +24,29 @@ use std::sync::Arc;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tracing::{error, info, instrument}; use tracing::{error, info, instrument};
// GC is the garbage collector of dfdaemon. /// GC is the garbage collector of dfdaemon.
pub struct GC { pub struct GC {
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// host_id is the id of the host. /// host_id is the id of the host.
host_id: String, host_id: String,
// storage is the local storage. /// storage is the local storage.
storage: Arc<Storage>, storage: Arc<Storage>,
// scheduler_client is the grpc client of the scheduler. /// scheduler_client is the grpc client of the scheduler.
scheduler_client: Arc<SchedulerClient>, scheduler_client: Arc<SchedulerClient>,
// shutdown is used to shutdown the garbage collector. /// shutdown is used to shutdown the garbage collector.
shutdown: shutdown::Shutdown, shutdown: shutdown::Shutdown,
// _shutdown_complete is used to notify the garbage collector is shutdown. /// _shutdown_complete is used to notify the garbage collector is shutdown.
_shutdown_complete: mpsc::UnboundedSender<()>, _shutdown_complete: mpsc::UnboundedSender<()>,
} }
impl GC { impl GC {
// new creates a new GC. /// new creates a new GC.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
config: Arc<Config>, config: Arc<Config>,
@ -66,7 +66,7 @@ impl GC {
} }
} }
// run runs the garbage collector. /// run runs the garbage collector.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) { pub async fn run(&self) {
// Clone the shutdown channel. // Clone the shutdown channel.
@ -106,7 +106,7 @@ impl GC {
} }
} }
// evict_task_by_ttl evicts the task by ttl. /// evict_task_by_ttl evicts the task by ttl.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn evict_task_by_ttl(&self) -> Result<()> { async fn evict_task_by_ttl(&self) -> Result<()> {
info!("start to evict by task ttl"); info!("start to evict by task ttl");
@ -124,7 +124,7 @@ impl GC {
Ok(()) Ok(())
} }
// evict_task_by_disk_usage evicts the task by disk usage. /// evict_task_by_disk_usage evicts the task by disk usage.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn evict_task_by_disk_usage(&self) -> Result<()> { async fn evict_task_by_disk_usage(&self) -> Result<()> {
let stats = fs2::statvfs(self.config.storage.dir.as_path())?; let stats = fs2::statvfs(self.config.storage.dir.as_path())?;
@ -153,7 +153,7 @@ impl GC {
Ok(()) Ok(())
} }
// evict_task_space evicts the task by the given space. /// evict_task_space evicts the task by the given space.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn evict_task_space(&self, need_evict_space: u64) -> Result<()> { async fn evict_task_space(&self, need_evict_space: u64) -> Result<()> {
let mut tasks = self.storage.get_tasks()?; let mut tasks = self.storage.get_tasks()?;
@ -190,7 +190,7 @@ impl GC {
Ok(()) Ok(())
} }
// delete_task_from_scheduler deletes the task from the scheduler. /// delete_task_from_scheduler deletes the task from the scheduler.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn delete_task_from_scheduler(&self, task: metadata::Task) { async fn delete_task_from_scheduler(&self, task: metadata::Task) {
self.scheduler_client self.scheduler_client
@ -204,7 +204,7 @@ impl GC {
}); });
} }
// evict_cache_task_by_ttl evicts the cache task by ttl. /// evict_cache_task_by_ttl evicts the cache task by ttl.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn evict_cache_task_by_ttl(&self) -> Result<()> { async fn evict_cache_task_by_ttl(&self) -> Result<()> {
info!("start to evict by cache task ttl * 2"); info!("start to evict by cache task ttl * 2");
@ -222,7 +222,7 @@ impl GC {
Ok(()) Ok(())
} }
// evict_cache_task_by_disk_usage evicts the cache task by disk usage. /// evict_cache_task_by_disk_usage evicts the cache task by disk usage.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn evict_cache_task_by_disk_usage(&self) -> Result<()> { async fn evict_cache_task_by_disk_usage(&self) -> Result<()> {
let stats = fs2::statvfs(self.config.storage.dir.as_path())?; let stats = fs2::statvfs(self.config.storage.dir.as_path())?;
@ -251,7 +251,7 @@ impl GC {
Ok(()) Ok(())
} }
// evict_cache_task_space evicts the cache task by the given space. /// evict_cache_task_space evicts the cache task by the given space.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn evict_cache_task_space(&self, need_evict_space: u64) -> Result<()> { async fn evict_cache_task_space(&self, need_evict_space: u64) -> Result<()> {
let mut tasks = self.storage.get_cache_tasks()?; let mut tasks = self.storage.get_cache_tasks()?;
@ -286,7 +286,7 @@ impl GC {
Ok(()) Ok(())
} }
// delete_cache_task_from_scheduler deletes the cache task from the scheduler. /// delete_cache_task_from_scheduler deletes the cache task from the scheduler.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn delete_cache_task_from_scheduler(&self, task: metadata::CacheTask) { async fn delete_cache_task_from_scheduler(&self, task: metadata::CacheTask) {
self.scheduler_client self.scheduler_client

View File

@ -60,24 +60,24 @@ use tonic::{
use tower::service_fn; use tower::service_fn;
use tracing::{error, info, instrument, Instrument, Span}; use tracing::{error, info, instrument, Instrument, Span};
// DfdaemonDownloadServer is the grpc unix server of the download. /// DfdaemonDownloadServer is the grpc unix server of the download.
pub struct DfdaemonDownloadServer { pub struct DfdaemonDownloadServer {
// socket_path is the path of the unix domain socket. /// socket_path is the path of the unix domain socket.
socket_path: PathBuf, socket_path: PathBuf,
// service is the grpc service of the dfdaemon. /// service is the grpc service of the dfdaemon.
service: DfdaemonDownloadGRPCServer<DfdaemonDownloadServerHandler>, service: DfdaemonDownloadGRPCServer<DfdaemonDownloadServerHandler>,
// shutdown is used to shutdown the grpc server. /// shutdown is used to shutdown the grpc server.
shutdown: shutdown::Shutdown, shutdown: shutdown::Shutdown,
// _shutdown_complete is used to notify the grpc server is shutdown. /// _shutdown_complete is used to notify the grpc server is shutdown.
_shutdown_complete: mpsc::UnboundedSender<()>, _shutdown_complete: mpsc::UnboundedSender<()>,
} }
// DfdaemonDownloadServer implements the grpc server of the download. /// DfdaemonDownloadServer implements the grpc server of the download.
impl DfdaemonDownloadServer { impl DfdaemonDownloadServer {
// new creates a new DfdaemonServer. /// new creates a new DfdaemonServer.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
socket_path: PathBuf, socket_path: PathBuf,
@ -105,7 +105,7 @@ impl DfdaemonDownloadServer {
} }
} }
// run starts the download server with unix domain socket. /// run starts the download server with unix domain socket.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&mut self) { pub async fn run(&mut self) {
// Register the reflection service. // Register the reflection service.
@ -156,25 +156,25 @@ impl DfdaemonDownloadServer {
} }
} }
// DfdaemonDownloadServerHandler is the handler of the dfdaemon download grpc service. /// DfdaemonDownloadServerHandler is the handler of the dfdaemon download grpc service.
pub struct DfdaemonDownloadServerHandler { pub struct DfdaemonDownloadServerHandler {
// socket_path is the path of the unix domain socket. /// socket_path is the path of the unix domain socket.
socket_path: PathBuf, socket_path: PathBuf,
// task is the task manager. /// task is the task manager.
task: Arc<task::Task>, task: Arc<task::Task>,
// cache_task is the cache task manager. /// cache_task is the cache task manager.
cache_task: Arc<cache_task::CacheTask>, cache_task: Arc<cache_task::CacheTask>,
} }
// DfdaemonDownloadServerHandler implements the dfdaemon download grpc service. /// DfdaemonDownloadServerHandler implements the dfdaemon download grpc service.
#[tonic::async_trait] #[tonic::async_trait]
impl DfdaemonDownload for DfdaemonDownloadServerHandler { impl DfdaemonDownload for DfdaemonDownloadServerHandler {
// DownloadTaskStream is the stream of the download task response. /// DownloadTaskStream is the stream of the download task response.
type DownloadTaskStream = ReceiverStream<Result<DownloadTaskResponse, Status>>; type DownloadTaskStream = ReceiverStream<Result<DownloadTaskResponse, Status>>;
// download_task tells the dfdaemon to download the task. /// download_task tells the dfdaemon to download the task.
#[instrument(skip_all, fields(host_id, task_id, peer_id))] #[instrument(skip_all, fields(host_id, task_id, peer_id))]
async fn download_task( async fn download_task(
&self, &self,
@ -544,7 +544,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
Ok(Response::new(ReceiverStream::new(out_stream_rx))) Ok(Response::new(ReceiverStream::new(out_stream_rx)))
} }
// stat_task gets the status of the task. /// stat_task gets the status of the task.
#[instrument(skip_all, fields(host_id, task_id))] #[instrument(skip_all, fields(host_id, task_id))]
async fn stat_task( async fn stat_task(
&self, &self,
@ -582,7 +582,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
Ok(Response::new(task)) Ok(Response::new(task))
} }
// delete_task calls the dfdaemon to delete the task. /// delete_task calls the dfdaemon to delete the task.
#[instrument(skip_all, fields(host_id, task_id))] #[instrument(skip_all, fields(host_id, task_id))]
async fn delete_task( async fn delete_task(
&self, &self,
@ -619,7 +619,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
Ok(Response::new(())) Ok(Response::new(()))
} }
// delete_host calls the scheduler to delete the host. /// delete_host calls the scheduler to delete the host.
#[instrument(skip_all, fields(host_id))] #[instrument(skip_all, fields(host_id))]
async fn delete_host(&self, _: Request<()>) -> Result<Response<()>, Status> { async fn delete_host(&self, _: Request<()>) -> Result<Response<()>, Status> {
// Generate the host id. // Generate the host id.
@ -646,10 +646,10 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
Ok(Response::new(())) Ok(Response::new(()))
} }
// DownloadCacheTaskStream is the stream of the download cache task response. /// DownloadCacheTaskStream is the stream of the download cache task response.
type DownloadCacheTaskStream = ReceiverStream<Result<DownloadCacheTaskResponse, Status>>; type DownloadCacheTaskStream = ReceiverStream<Result<DownloadCacheTaskResponse, Status>>;
// download_cache_task downloads the cache task. /// download_cache_task downloads the cache task.
#[instrument(skip_all, fields(host_id, task_id, peer_id))] #[instrument(skip_all, fields(host_id, task_id, peer_id))]
async fn download_cache_task( async fn download_cache_task(
&self, &self,
@ -818,7 +818,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
Ok(Response::new(ReceiverStream::new(out_stream_rx))) Ok(Response::new(ReceiverStream::new(out_stream_rx)))
} }
// upload_cache_task uploads the cache task. /// upload_cache_task uploads the cache task.
#[instrument(skip_all, fields(host_id, task_id, peer_id))] #[instrument(skip_all, fields(host_id, task_id, peer_id))]
async fn upload_cache_task( async fn upload_cache_task(
&self, &self,
@ -912,7 +912,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
Ok(Response::new(task)) Ok(Response::new(task))
} }
// stat_cache_task stats the cache task. /// stat_cache_task stats the cache task.
#[instrument(skip_all, fields(host_id, task_id))] #[instrument(skip_all, fields(host_id, task_id))]
async fn stat_cache_task( async fn stat_cache_task(
&self, &self,
@ -949,7 +949,7 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
Ok(Response::new(task)) Ok(Response::new(task))
} }
// delete_cache_task deletes the cache task. /// delete_cache_task deletes the cache task.
#[instrument(skip_all, fields(host_id, task_id))] #[instrument(skip_all, fields(host_id, task_id))]
async fn delete_cache_task( async fn delete_cache_task(
&self, &self,
@ -986,16 +986,16 @@ impl DfdaemonDownload for DfdaemonDownloadServerHandler {
} }
} }
// DfdaemonDownloadClient is a wrapper of DfdaemonDownloadGRPCClient. /// DfdaemonDownloadClient is a wrapper of DfdaemonDownloadGRPCClient.
#[derive(Clone)] #[derive(Clone)]
pub struct DfdaemonDownloadClient { pub struct DfdaemonDownloadClient {
// client is the grpc client of the dfdaemon. /// client is the grpc client of the dfdaemon.
pub client: DfdaemonDownloadGRPCClient<Channel>, pub client: DfdaemonDownloadGRPCClient<Channel>,
} }
// DfdaemonDownloadClient implements the grpc client of the dfdaemon download. /// DfdaemonDownloadClient implements the grpc client of the dfdaemon download.
impl DfdaemonDownloadClient { impl DfdaemonDownloadClient {
// new_unix creates a new DfdaemonDownloadClient with unix domain socket. /// new_unix creates a new DfdaemonDownloadClient with unix domain socket.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new_unix(socket_path: PathBuf) -> ClientResult<Self> { pub async fn new_unix(socket_path: PathBuf) -> ClientResult<Self> {
// Ignore the uri because it is not used. // Ignore the uri because it is not used.
@ -1024,7 +1024,7 @@ impl DfdaemonDownloadClient {
Ok(Self { client }) Ok(Self { client })
} }
// download_task tells the dfdaemon to download the task. /// download_task tells the dfdaemon to download the task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_task( pub async fn download_task(
&self, &self,
@ -1050,7 +1050,7 @@ impl DfdaemonDownloadClient {
Ok(response) Ok(response)
} }
// stat_task gets the status of the task. /// stat_task gets the status of the task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn stat_task(&self, request: DfdaemonStatTaskRequest) -> ClientResult<Task> { pub async fn stat_task(&self, request: DfdaemonStatTaskRequest) -> ClientResult<Task> {
let request = Self::make_request(request); let request = Self::make_request(request);
@ -1058,7 +1058,7 @@ impl DfdaemonDownloadClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// delete_task tells the dfdaemon to delete the task. /// delete_task tells the dfdaemon to delete the task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn delete_task(&self, request: DeleteTaskRequest) -> ClientResult<()> { pub async fn delete_task(&self, request: DeleteTaskRequest) -> ClientResult<()> {
let request = Self::make_request(request); let request = Self::make_request(request);
@ -1066,7 +1066,7 @@ impl DfdaemonDownloadClient {
Ok(()) Ok(())
} }
// download_cache_task downloads the cache task. /// download_cache_task downloads the cache task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_cache_task( pub async fn download_cache_task(
&self, &self,
@ -1090,7 +1090,7 @@ impl DfdaemonDownloadClient {
Ok(response) Ok(response)
} }
// upload_cache_task uploads the cache task. /// upload_cache_task uploads the cache task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn upload_cache_task( pub async fn upload_cache_task(
&self, &self,
@ -1114,7 +1114,7 @@ impl DfdaemonDownloadClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// stat_cache_task stats the cache task. /// stat_cache_task stats the cache task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> ClientResult<CacheTask> { pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> ClientResult<CacheTask> {
let mut request = tonic::Request::new(request); let mut request = tonic::Request::new(request);
@ -1124,7 +1124,7 @@ impl DfdaemonDownloadClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// delete_cache_task deletes the cache task. /// delete_cache_task deletes the cache task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> ClientResult<()> { pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> ClientResult<()> {
let request = Self::make_request(request); let request = Self::make_request(request);
@ -1132,7 +1132,7 @@ impl DfdaemonDownloadClient {
Ok(()) Ok(())
} }
// make_request creates a new request with timeout. /// make_request creates a new request with timeout.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_request<T>(request: T) -> tonic::Request<T> { fn make_request<T>(request: T) -> tonic::Request<T> {
let mut request = tonic::Request::new(request); let mut request = tonic::Request::new(request);

View File

@ -54,24 +54,24 @@ use tonic::{
}; };
use tracing::{error, info, instrument, Instrument, Span}; use tracing::{error, info, instrument, Instrument, Span};
// DfdaemonUploadServer is the grpc server of the upload. /// DfdaemonUploadServer is the grpc server of the upload.
pub struct DfdaemonUploadServer { pub struct DfdaemonUploadServer {
// addr is the address of the grpc server. /// addr is the address of the grpc server.
addr: SocketAddr, addr: SocketAddr,
// service is the grpc service of the dfdaemon upload. /// service is the grpc service of the dfdaemon upload.
service: DfdaemonUploadGRPCServer<DfdaemonUploadServerHandler>, service: DfdaemonUploadGRPCServer<DfdaemonUploadServerHandler>,
// shutdown is used to shutdown the grpc server. /// shutdown is used to shutdown the grpc server.
shutdown: shutdown::Shutdown, shutdown: shutdown::Shutdown,
// _shutdown_complete is used to notify the grpc server is shutdown. /// _shutdown_complete is used to notify the grpc server is shutdown.
_shutdown_complete: mpsc::UnboundedSender<()>, _shutdown_complete: mpsc::UnboundedSender<()>,
} }
// DfdaemonUploadServer implements the grpc server of the upload. /// DfdaemonUploadServer implements the grpc server of the upload.
impl DfdaemonUploadServer { impl DfdaemonUploadServer {
// new creates a new DfdaemonUploadServer. /// new creates a new DfdaemonUploadServer.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
config: Arc<Config>, config: Arc<Config>,
@ -100,7 +100,7 @@ impl DfdaemonUploadServer {
} }
} }
// run starts the upload server. /// run starts the upload server.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&mut self) { pub async fn run(&mut self) {
// Register the reflection service. // Register the reflection service.
@ -139,25 +139,25 @@ impl DfdaemonUploadServer {
} }
} }
// DfdaemonUploadServerHandler is the handler of the dfdaemon upload grpc service. /// DfdaemonUploadServerHandler is the handler of the dfdaemon upload grpc service.
pub struct DfdaemonUploadServerHandler { pub struct DfdaemonUploadServerHandler {
// socket_path is the path of the unix domain socket. /// socket_path is the path of the unix domain socket.
socket_path: PathBuf, socket_path: PathBuf,
// task is the task manager. /// task is the task manager.
task: Arc<task::Task>, task: Arc<task::Task>,
// cache_task is the cache task manager. /// cache_task is the cache task manager.
cache_task: Arc<cache_task::CacheTask>, cache_task: Arc<cache_task::CacheTask>,
} }
// DfdaemonUploadServerHandler implements the dfdaemon upload grpc service. /// DfdaemonUploadServerHandler implements the dfdaemon upload grpc service.
#[tonic::async_trait] #[tonic::async_trait]
impl DfdaemonUpload for DfdaemonUploadServerHandler { impl DfdaemonUpload for DfdaemonUploadServerHandler {
// DownloadTaskStream is the stream of the download task response. /// DownloadTaskStream is the stream of the download task response.
type DownloadTaskStream = ReceiverStream<Result<DownloadTaskResponse, Status>>; type DownloadTaskStream = ReceiverStream<Result<DownloadTaskResponse, Status>>;
// download_task downloads the task. /// download_task downloads the task.
#[instrument(skip_all, fields(host_id, task_id, peer_id))] #[instrument(skip_all, fields(host_id, task_id, peer_id))]
async fn download_task( async fn download_task(
&self, &self,
@ -530,7 +530,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
Ok(Response::new(ReceiverStream::new(out_stream_rx))) Ok(Response::new(ReceiverStream::new(out_stream_rx)))
} }
// stat_task stats the task. /// stat_task stats the task.
#[instrument(skip_all, fields(host_id, task_id))] #[instrument(skip_all, fields(host_id, task_id))]
async fn stat_task(&self, request: Request<StatTaskRequest>) -> Result<Response<Task>, Status> { async fn stat_task(&self, request: Request<StatTaskRequest>) -> Result<Response<Task>, Status> {
// Clone the request. // Clone the request.
@ -565,7 +565,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
Ok(Response::new(task)) Ok(Response::new(task))
} }
// delete_task deletes the task. /// delete_task deletes the task.
#[instrument(skip_all, fields(host_id, task_id))] #[instrument(skip_all, fields(host_id, task_id))]
async fn delete_task( async fn delete_task(
&self, &self,
@ -602,10 +602,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
Ok(Response::new(())) Ok(Response::new(()))
} }
// SyncPiecesStream is the stream of the sync pieces response. /// SyncPiecesStream is the stream of the sync pieces response.
type SyncPiecesStream = ReceiverStream<Result<SyncPiecesResponse, Status>>; type SyncPiecesStream = ReceiverStream<Result<SyncPiecesResponse, Status>>;
// sync_pieces provides the piece metadata for remote peer. /// sync_pieces provides the piece metadata for remote peer.
#[instrument(skip_all, fields(host_id, remote_host_id, task_id))] #[instrument(skip_all, fields(host_id, remote_host_id, task_id))]
async fn sync_pieces( async fn sync_pieces(
&self, &self,
@ -734,7 +734,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
Ok(Response::new(ReceiverStream::new(out_stream_rx))) Ok(Response::new(ReceiverStream::new(out_stream_rx)))
} }
// download_piece provides the piece content for remote peer. /// download_piece provides the piece content for remote peer.
#[instrument(skip_all, fields(host_id, remote_host_id, task_id, piece_id))] #[instrument(skip_all, fields(host_id, remote_host_id, task_id, piece_id))]
async fn download_piece( async fn download_piece(
&self, &self,
@ -829,10 +829,10 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
})) }))
} }
// DownloadCacheTaskStream is the stream of the download cache task response. /// DownloadCacheTaskStream is the stream of the download cache task response.
type DownloadCacheTaskStream = ReceiverStream<Result<DownloadCacheTaskResponse, Status>>; type DownloadCacheTaskStream = ReceiverStream<Result<DownloadCacheTaskResponse, Status>>;
// download_cache_task downloads the cache task. /// download_cache_task downloads the cache task.
#[instrument(skip_all, fields(host_id, task_id, peer_id))] #[instrument(skip_all, fields(host_id, task_id, peer_id))]
async fn download_cache_task( async fn download_cache_task(
&self, &self,
@ -1001,7 +1001,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
Ok(Response::new(ReceiverStream::new(out_stream_rx))) Ok(Response::new(ReceiverStream::new(out_stream_rx)))
} }
// stat_cache_task stats the cache task. /// stat_cache_task stats the cache task.
#[instrument(skip_all, fields(host_id, task_id))] #[instrument(skip_all, fields(host_id, task_id))]
async fn stat_cache_task( async fn stat_cache_task(
&self, &self,
@ -1038,7 +1038,7 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
Ok(Response::new(task)) Ok(Response::new(task))
} }
// delete_cache_task deletes the cache task. /// delete_cache_task deletes the cache task.
#[instrument(skip_all, fields(host_id, task_id))] #[instrument(skip_all, fields(host_id, task_id))]
async fn delete_cache_task( async fn delete_cache_task(
&self, &self,
@ -1075,16 +1075,16 @@ impl DfdaemonUpload for DfdaemonUploadServerHandler {
} }
} }
// DfdaemonUploadClient is a wrapper of DfdaemonUploadGRPCClient. /// DfdaemonUploadClient is a wrapper of DfdaemonUploadGRPCClient.
#[derive(Clone)] #[derive(Clone)]
pub struct DfdaemonUploadClient { pub struct DfdaemonUploadClient {
// client is the grpc client of the dfdaemon upload. /// client is the grpc client of the dfdaemon upload.
pub client: DfdaemonUploadGRPCClient<Channel>, pub client: DfdaemonUploadGRPCClient<Channel>,
} }
// DfdaemonUploadClient implements the dfdaemon upload grpc client. /// DfdaemonUploadClient implements the dfdaemon upload grpc client.
impl DfdaemonUploadClient { impl DfdaemonUploadClient {
// new creates a new DfdaemonUploadClient. /// new creates a new DfdaemonUploadClient.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new(addr: String) -> ClientResult<Self> { pub async fn new(addr: String) -> ClientResult<Self> {
let channel = Channel::from_static(Box::leak(addr.clone().into_boxed_str())) let channel = Channel::from_static(Box::leak(addr.clone().into_boxed_str()))
@ -1106,7 +1106,7 @@ impl DfdaemonUploadClient {
Ok(Self { client }) Ok(Self { client })
} }
// download_task downloads the task. /// download_task downloads the task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_task( pub async fn download_task(
&self, &self,
@ -1132,7 +1132,7 @@ impl DfdaemonUploadClient {
Ok(response) Ok(response)
} }
// sync_pieces provides the piece metadata for remote peer. /// sync_pieces provides the piece metadata for remote peer.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn sync_pieces( pub async fn sync_pieces(
&self, &self,
@ -1143,7 +1143,7 @@ impl DfdaemonUploadClient {
Ok(response) Ok(response)
} }
// download_piece provides the piece content for remote peer. /// download_piece provides the piece content for remote peer.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_piece( pub async fn download_piece(
&self, &self,
@ -1157,7 +1157,7 @@ impl DfdaemonUploadClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// download_cache_task downloads the cache task. /// download_cache_task downloads the cache task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_cache_task( pub async fn download_cache_task(
&self, &self,
@ -1181,7 +1181,7 @@ impl DfdaemonUploadClient {
Ok(response) Ok(response)
} }
// stat_cache_task stats the cache task. /// stat_cache_task stats the cache task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> ClientResult<CacheTask> { pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> ClientResult<CacheTask> {
let request = Self::make_request(request); let request = Self::make_request(request);
@ -1189,7 +1189,7 @@ impl DfdaemonUploadClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// delete_cache_task deletes the cache task. /// delete_cache_task deletes the cache task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> ClientResult<()> { pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> ClientResult<()> {
let request = Self::make_request(request); let request = Self::make_request(request);
@ -1197,7 +1197,7 @@ impl DfdaemonUploadClient {
Ok(()) Ok(())
} }
// make_request creates a new request with timeout. /// make_request creates a new request with timeout.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_request<T>(request: T) -> tonic::Request<T> { fn make_request<T>(request: T) -> tonic::Request<T> {
let mut request = tonic::Request::new(request); let mut request = tonic::Request::new(request);

View File

@ -28,16 +28,16 @@ use tonic_health::pb::{
use tower::service_fn; use tower::service_fn;
use tracing::{error, instrument}; use tracing::{error, instrument};
// HealthClient is a wrapper of HealthGRPCClient. /// HealthClient is a wrapper of HealthGRPCClient.
#[derive(Clone)] #[derive(Clone)]
pub struct HealthClient { pub struct HealthClient {
// client is the grpc client of the certificate. /// client is the grpc client of the certificate.
client: HealthGRPCClient<Channel>, client: HealthGRPCClient<Channel>,
} }
// HealthClient implements the grpc client of the health. /// HealthClient implements the grpc client of the health.
impl HealthClient { impl HealthClient {
// new creates a new HealthClient. /// new creates a new HealthClient.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new(addr: &str) -> Result<Self> { pub async fn new(addr: &str) -> Result<Self> {
let channel = Channel::from_shared(addr.to_string()) let channel = Channel::from_shared(addr.to_string())
@ -60,7 +60,7 @@ impl HealthClient {
Ok(Self { client }) Ok(Self { client })
} }
// new_unix creates a new HealthClient with unix domain socket. /// new_unix creates a new HealthClient with unix domain socket.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new_unix(socket_path: PathBuf) -> Result<Self> { pub async fn new_unix(socket_path: PathBuf) -> Result<Self> {
// Ignore the uri because it is not used. // Ignore the uri because it is not used.
@ -86,7 +86,7 @@ impl HealthClient {
Ok(Self { client }) Ok(Self { client })
} }
// check checks the health of the grpc service without service name. /// check checks the health of the grpc service without service name.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn check(&self) -> Result<HealthCheckResponse> { pub async fn check(&self) -> Result<HealthCheckResponse> {
let request = Self::make_request(HealthCheckRequest { let request = Self::make_request(HealthCheckRequest {
@ -96,7 +96,7 @@ impl HealthClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// check_service checks the health of the grpc service with service name. /// check_service checks the health of the grpc service with service name.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn check_service(&self, service: String) -> Result<HealthCheckResponse> { pub async fn check_service(&self, service: String) -> Result<HealthCheckResponse> {
let request = Self::make_request(HealthCheckRequest { service }); let request = Self::make_request(HealthCheckRequest { service });
@ -104,21 +104,21 @@ impl HealthClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// check_dfdaemon_download checks the health of the dfdaemon download service. /// check_dfdaemon_download checks the health of the dfdaemon download service.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn check_dfdaemon_download(&self) -> Result<HealthCheckResponse> { pub async fn check_dfdaemon_download(&self) -> Result<HealthCheckResponse> {
self.check_service("dfdaemon.v2.DfdaemonDownload".to_string()) self.check_service("dfdaemon.v2.DfdaemonDownload".to_string())
.await .await
} }
// check_dfdaemon_upload checks the health of the dfdaemon upload service. /// check_dfdaemon_upload checks the health of the dfdaemon upload service.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn check_dfdaemon_upload(&self) -> Result<HealthCheckResponse> { pub async fn check_dfdaemon_upload(&self) -> Result<HealthCheckResponse> {
self.check_service("dfdaemon.v2.DfdaemonUpload".to_string()) self.check_service("dfdaemon.v2.DfdaemonUpload".to_string())
.await .await
} }
// make_request creates a new request with timeout. /// make_request creates a new request with timeout.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_request<T>(request: T) -> tonic::Request<T> { fn make_request<T>(request: T) -> tonic::Request<T> {
let mut request = tonic::Request::new(request); let mut request = tonic::Request::new(request);

View File

@ -27,16 +27,16 @@ use tonic::transport::Channel;
use tonic_health::pb::health_check_response::ServingStatus; use tonic_health::pb::health_check_response::ServingStatus;
use tracing::{error, info, instrument, warn}; use tracing::{error, info, instrument, warn};
// ManagerClient is a wrapper of ManagerGRPCClient. /// ManagerClient is a wrapper of ManagerGRPCClient.
#[derive(Clone)] #[derive(Clone)]
pub struct ManagerClient { pub struct ManagerClient {
// client is the grpc client of the manager. /// client is the grpc client of the manager.
pub client: ManagerGRPCClient<Channel>, pub client: ManagerGRPCClient<Channel>,
} }
// ManagerClient implements the grpc client of the manager. /// ManagerClient implements the grpc client of the manager.
impl ManagerClient { impl ManagerClient {
// new creates a new ManagerClient. /// new creates a new ManagerClient.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new(addrs: Vec<String>) -> Result<Self> { pub async fn new(addrs: Vec<String>) -> Result<Self> {
// Find the available manager address. // Find the available manager address.
@ -91,7 +91,7 @@ impl ManagerClient {
Ok(Self { client }) Ok(Self { client })
} }
// list_schedulers lists all schedulers that best match the client. /// list_schedulers lists all schedulers that best match the client.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn list_schedulers( pub async fn list_schedulers(
&self, &self,
@ -102,7 +102,7 @@ impl ManagerClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// update_seed_peer updates the seed peer information. /// update_seed_peer updates the seed peer information.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn update_seed_peer(&self, request: UpdateSeedPeerRequest) -> Result<SeedPeer> { pub async fn update_seed_peer(&self, request: UpdateSeedPeerRequest) -> Result<SeedPeer> {
let request = Self::make_request(request); let request = Self::make_request(request);
@ -110,7 +110,7 @@ impl ManagerClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// delete_seed_peer deletes the seed peer information. /// delete_seed_peer deletes the seed peer information.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn delete_seed_peer(&self, request: DeleteSeedPeerRequest) -> Result<()> { pub async fn delete_seed_peer(&self, request: DeleteSeedPeerRequest) -> Result<()> {
let request = Self::make_request(request); let request = Self::make_request(request);
@ -118,7 +118,7 @@ impl ManagerClient {
Ok(()) Ok(())
} }
// make_request creates a new request with timeout. /// make_request creates a new request with timeout.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_request<T>(request: T) -> tonic::Request<T> { fn make_request<T>(request: T) -> tonic::Request<T> {
let mut request = tonic::Request::new(request); let mut request = tonic::Request::new(request);

View File

@ -31,31 +31,31 @@ pub mod manager;
pub mod scheduler; pub mod scheduler;
pub mod security; pub mod security;
// CONNECT_TIMEOUT is the timeout for GRPC connection. /// CONNECT_TIMEOUT is the timeout for GRPC connection.
pub const CONNECT_TIMEOUT: Duration = Duration::from_secs(2); pub const CONNECT_TIMEOUT: Duration = Duration::from_secs(2);
// REQUEST_TIMEOUT is the timeout for GRPC requests. /// REQUEST_TIMEOUT is the timeout for GRPC requests.
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(10); pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
// TCP_KEEPALIVE is the keepalive duration for TCP connection. /// TCP_KEEPALIVE is the keepalive duration for TCP connection.
pub const TCP_KEEPALIVE: Duration = Duration::from_secs(3600); pub const TCP_KEEPALIVE: Duration = Duration::from_secs(3600);
// HTTP2_KEEP_ALIVE_INTERVAL is the interval for HTTP2 keep alive. /// HTTP2_KEEP_ALIVE_INTERVAL is the interval for HTTP2 keep alive.
pub const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(300); pub const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(300);
// HTTP2_KEEP_ALIVE_TIMEOUT is the timeout for HTTP2 keep alive. /// HTTP2_KEEP_ALIVE_TIMEOUT is the timeout for HTTP2 keep alive.
pub const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(20); pub const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(20);
// MAX_FRAME_SIZE is the max frame size for GRPC, default is 12MB. /// MAX_FRAME_SIZE is the max frame size for GRPC, default is 12MB.
pub const MAX_FRAME_SIZE: u32 = 12 * 1024 * 1024; pub const MAX_FRAME_SIZE: u32 = 12 * 1024 * 1024;
// INITIAL_WINDOW_SIZE is the initial window size for GRPC, default is 12MB. /// INITIAL_WINDOW_SIZE is the initial window size for GRPC, default is 12MB.
pub const INITIAL_WINDOW_SIZE: u32 = 12 * 1024 * 1024; pub const INITIAL_WINDOW_SIZE: u32 = 12 * 1024 * 1024;
// BUFFER_SIZE is the buffer size for GRPC, default is 64KB. /// BUFFER_SIZE is the buffer size for GRPC, default is 64KB.
pub const BUFFER_SIZE: usize = 64 * 1024; pub const BUFFER_SIZE: usize = 64 * 1024;
// prefetch_task prefetches the task if prefetch flag is true. /// prefetch_task prefetches the task if prefetch flag is true.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn prefetch_task( pub async fn prefetch_task(
socket_path: PathBuf, socket_path: PathBuf,

View File

@ -37,40 +37,40 @@ use tokio::task::JoinSet;
use tonic::transport::Channel; use tonic::transport::Channel;
use tracing::{error, info, instrument, Instrument}; use tracing::{error, info, instrument, Instrument};
// VNode is the virtual node of the hashring. /// VNode is the virtual node of the hashring.
#[derive(Debug, Copy, Clone, Hash, PartialEq)] #[derive(Debug, Copy, Clone, Hash, PartialEq)]
struct VNode { struct VNode {
// addr is the address of the virtual node. /// addr is the address of the virtual node.
addr: SocketAddr, addr: SocketAddr,
} }
// VNode implements the Display trait. /// VNode implements the Display trait.
impl std::fmt::Display for VNode { impl std::fmt::Display for VNode {
// fmt formats the virtual node. /// fmt formats the virtual node.
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.addr) write!(f, "{}", self.addr)
} }
} }
// SchedulerClient is a wrapper of SchedulerGRPCClient. /// SchedulerClient is a wrapper of SchedulerGRPCClient.
#[derive(Clone)] #[derive(Clone)]
pub struct SchedulerClient { pub struct SchedulerClient {
// dynconfig is the dynamic configuration of the dfdaemon. /// dynconfig is the dynamic configuration of the dfdaemon.
dynconfig: Arc<Dynconfig>, dynconfig: Arc<Dynconfig>,
// available_schedulers is the available schedulers. /// available_schedulers is the available schedulers.
available_schedulers: Arc<RwLock<Vec<Scheduler>>>, available_schedulers: Arc<RwLock<Vec<Scheduler>>>,
// available_scheduler_addrs is the addresses of available schedulers. /// available_scheduler_addrs is the addresses of available schedulers.
available_scheduler_addrs: Arc<RwLock<Vec<SocketAddr>>>, available_scheduler_addrs: Arc<RwLock<Vec<SocketAddr>>>,
// hashring is the hashring of the scheduler. /// hashring is the hashring of the scheduler.
hashring: Arc<RwLock<HashRing<VNode>>>, hashring: Arc<RwLock<HashRing<VNode>>>,
} }
// SchedulerClient implements the grpc client of the scheduler. /// SchedulerClient implements the grpc client of the scheduler.
impl SchedulerClient { impl SchedulerClient {
// new creates a new SchedulerClient. /// new creates a new SchedulerClient.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new(dynconfig: Arc<Dynconfig>) -> Result<Self> { pub async fn new(dynconfig: Arc<Dynconfig>) -> Result<Self> {
let client = Self { let client = Self {
@ -84,7 +84,7 @@ impl SchedulerClient {
Ok(client) Ok(client)
} }
// announce_peer announces the peer to the scheduler. /// announce_peer announces the peer to the scheduler.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn announce_peer( pub async fn announce_peer(
&self, &self,
@ -100,7 +100,7 @@ impl SchedulerClient {
Ok(response) Ok(response)
} }
// stat_peer gets the status of the peer. /// stat_peer gets the status of the peer.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn stat_peer(&self, request: StatPeerRequest) -> Result<Peer> { pub async fn stat_peer(&self, request: StatPeerRequest) -> Result<Peer> {
let task_id = request.task_id.clone(); let task_id = request.task_id.clone();
@ -113,7 +113,7 @@ impl SchedulerClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// delete_peer tells the scheduler that the peer is deleting. /// delete_peer tells the scheduler that the peer is deleting.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn delete_peer(&self, request: DeletePeerRequest) -> Result<()> { pub async fn delete_peer(&self, request: DeletePeerRequest) -> Result<()> {
let task_id = request.task_id.clone(); let task_id = request.task_id.clone();
@ -125,7 +125,7 @@ impl SchedulerClient {
Ok(()) Ok(())
} }
// stat_task gets the status of the task. /// stat_task gets the status of the task.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn stat_task(&self, request: StatTaskRequest) -> Result<Task> { pub async fn stat_task(&self, request: StatTaskRequest) -> Result<Task> {
let task_id = request.task_id.clone(); let task_id = request.task_id.clone();
@ -138,7 +138,7 @@ impl SchedulerClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// delete_task tells the scheduler that the task is deleting. /// delete_task tells the scheduler that the task is deleting.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn delete_task(&self, request: DeleteTaskRequest) -> Result<()> { pub async fn delete_task(&self, request: DeleteTaskRequest) -> Result<()> {
let task_id = request.task_id.clone(); let task_id = request.task_id.clone();
@ -150,7 +150,7 @@ impl SchedulerClient {
Ok(()) Ok(())
} }
// announce_host announces the host to the scheduler. /// announce_host announces the host to the scheduler.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn announce_host(&self, request: AnnounceHostRequest) -> Result<()> { pub async fn announce_host(&self, request: AnnounceHostRequest) -> Result<()> {
// Update scheduler addresses of the client. // Update scheduler addresses of the client.
@ -208,7 +208,7 @@ impl SchedulerClient {
Ok(()) Ok(())
} }
// init_announce_host announces the host to the scheduler. /// init_announce_host announces the host to the scheduler.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn init_announce_host(&self, request: AnnounceHostRequest) -> Result<()> { pub async fn init_announce_host(&self, request: AnnounceHostRequest) -> Result<()> {
let mut join_set = JoinSet::new(); let mut join_set = JoinSet::new();
@ -263,7 +263,7 @@ impl SchedulerClient {
Ok(()) Ok(())
} }
// delete_host tells the scheduler that the host is deleting. /// delete_host tells the scheduler that the host is deleting.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn delete_host(&self, request: DeleteHostRequest) -> Result<()> { pub async fn delete_host(&self, request: DeleteHostRequest) -> Result<()> {
// Update scheduler addresses of the client. // Update scheduler addresses of the client.
@ -321,7 +321,7 @@ impl SchedulerClient {
Ok(()) Ok(())
} }
// announce_cache_peer announces the cache peer to the scheduler. /// announce_cache_peer announces the cache peer to the scheduler.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn announce_cache_peer( pub async fn announce_cache_peer(
&self, &self,
@ -337,7 +337,7 @@ impl SchedulerClient {
Ok(response) Ok(response)
} }
// stat_cache_peer gets the status of the cache peer. /// stat_cache_peer gets the status of the cache peer.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn stat_cache_peer(&self, request: StatCachePeerRequest) -> Result<CachePeer> { pub async fn stat_cache_peer(&self, request: StatCachePeerRequest) -> Result<CachePeer> {
let task_id = request.task_id.clone(); let task_id = request.task_id.clone();
@ -350,7 +350,7 @@ impl SchedulerClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// delete_cache_peer tells the scheduler that the cache peer is deleting. /// delete_cache_peer tells the scheduler that the cache peer is deleting.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn delete_cache_peer(&self, request: DeleteCachePeerRequest) -> Result<()> { pub async fn delete_cache_peer(&self, request: DeleteCachePeerRequest) -> Result<()> {
let task_id = request.task_id.clone(); let task_id = request.task_id.clone();
@ -362,7 +362,7 @@ impl SchedulerClient {
Ok(()) Ok(())
} }
// upload_cache_task_started uploads the metadata of the cache task started. /// upload_cache_task_started uploads the metadata of the cache task started.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn upload_cache_task_started( pub async fn upload_cache_task_started(
&self, &self,
@ -377,7 +377,7 @@ impl SchedulerClient {
Ok(()) Ok(())
} }
// upload_cache_task_finished uploads the metadata of the cache task finished. /// upload_cache_task_finished uploads the metadata of the cache task finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn upload_cache_task_finished( pub async fn upload_cache_task_finished(
&self, &self,
@ -393,7 +393,7 @@ impl SchedulerClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// upload_cache_task_failed uploads the metadata of the cache task failed. /// upload_cache_task_failed uploads the metadata of the cache task failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn upload_cache_task_failed( pub async fn upload_cache_task_failed(
&self, &self,
@ -408,7 +408,7 @@ impl SchedulerClient {
Ok(()) Ok(())
} }
// stat_cache_task gets the status of the cache task. /// stat_cache_task gets the status of the cache task.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> Result<CacheTask> { pub async fn stat_cache_task(&self, request: StatCacheTaskRequest) -> Result<CacheTask> {
let task_id = request.task_id.clone(); let task_id = request.task_id.clone();
@ -421,7 +421,7 @@ impl SchedulerClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// delete_cache_task tells the scheduler that the cache task is deleting. /// delete_cache_task tells the scheduler that the cache task is deleting.
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> Result<()> { pub async fn delete_cache_task(&self, request: DeleteCacheTaskRequest) -> Result<()> {
let task_id = request.task_id.clone(); let task_id = request.task_id.clone();
@ -433,7 +433,7 @@ impl SchedulerClient {
Ok(()) Ok(())
} }
// client gets the grpc client of the scheduler. /// client gets the grpc client of the scheduler.
#[instrument(skip(self))] #[instrument(skip(self))]
async fn client( async fn client(
&self, &self,
@ -480,7 +480,7 @@ impl SchedulerClient {
.max_encoding_message_size(usize::MAX)) .max_encoding_message_size(usize::MAX))
} }
// update_available_scheduler_addrs updates the addresses of available schedulers. /// update_available_scheduler_addrs updates the addresses of available schedulers.
#[instrument(skip(self))] #[instrument(skip(self))]
async fn update_available_scheduler_addrs(&self) -> Result<()> { async fn update_available_scheduler_addrs(&self) -> Result<()> {
// Get the endpoints of available schedulers. // Get the endpoints of available schedulers.
@ -566,7 +566,7 @@ impl SchedulerClient {
Ok(()) Ok(())
} }
// refresh_available_scheduler_addrs refreshes addresses of available schedulers. /// refresh_available_scheduler_addrs refreshes addresses of available schedulers.
#[instrument(skip(self))] #[instrument(skip(self))]
async fn refresh_available_scheduler_addrs(&self) -> Result<()> { async fn refresh_available_scheduler_addrs(&self) -> Result<()> {
// Refresh the dynamic configuration. // Refresh the dynamic configuration.
@ -576,7 +576,7 @@ impl SchedulerClient {
self.update_available_scheduler_addrs().await self.update_available_scheduler_addrs().await
} }
// make_request creates a new request with timeout. /// make_request creates a new request with timeout.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_request<T>(request: T) -> tonic::Request<T> { fn make_request<T>(request: T) -> tonic::Request<T> {
let mut request = tonic::Request::new(request); let mut request = tonic::Request::new(request);

View File

@ -25,16 +25,16 @@ use dragonfly_client_core::{
use tonic::transport::Channel; use tonic::transport::Channel;
use tracing::instrument; use tracing::instrument;
// CertificateClient is a wrapper of CertificateGRPCClient. /// CertificateClient is a wrapper of CertificateGRPCClient.
#[derive(Clone)] #[derive(Clone)]
pub struct CertificateClient { pub struct CertificateClient {
// client is the grpc client of the certificate. /// client is the grpc client of the certificate.
pub client: CertificateGRPCClient<Channel>, pub client: CertificateGRPCClient<Channel>,
} }
// CertificateClient implements the grpc client of the certificate. /// CertificateClient implements the grpc client of the certificate.
impl CertificateClient { impl CertificateClient {
// new creates a new CertificateClient. /// new creates a new CertificateClient.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new(addr: String) -> Result<Self> { pub async fn new(addr: String) -> Result<Self> {
let channel = Channel::from_static(Box::leak(addr.into_boxed_str())) let channel = Channel::from_static(Box::leak(addr.into_boxed_str()))
@ -49,7 +49,7 @@ impl CertificateClient {
Ok(Self { client }) Ok(Self { client })
} }
// issue_certificate issues a certificate for the peer. /// issue_certificate issues a certificate for the peer.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn issue_certificate( pub async fn issue_certificate(
&self, &self,
@ -60,7 +60,7 @@ impl CertificateClient {
Ok(response.into_inner()) Ok(response.into_inner())
} }
// make_request creates a new request with timeout. /// make_request creates a new request with timeout.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_request<T>(request: T) -> tonic::Request<T> { fn make_request<T>(request: T) -> tonic::Request<T> {
let mut request = tonic::Request::new(request); let mut request = tonic::Request::new(request);

View File

@ -20,22 +20,22 @@ use tokio::sync::mpsc;
use tracing::{info, instrument}; use tracing::{info, instrument};
use warp::{Filter, Rejection, Reply}; use warp::{Filter, Rejection, Reply};
// Health is the health server. /// Health is the health server.
#[derive(Debug)] #[derive(Debug)]
pub struct Health { pub struct Health {
// addr is the address of the health server. /// addr is the address of the health server.
addr: SocketAddr, addr: SocketAddr,
// shutdown is used to shutdown the health server. /// shutdown is used to shutdown the health server.
shutdown: shutdown::Shutdown, shutdown: shutdown::Shutdown,
// _shutdown_complete is used to notify the health server is shutdown. /// _shutdown_complete is used to notify the health server is shutdown.
_shutdown_complete: mpsc::UnboundedSender<()>, _shutdown_complete: mpsc::UnboundedSender<()>,
} }
// Health implements the health server. /// Health implements the health server.
impl Health { impl Health {
// new creates a new Health. /// new creates a new Health.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
addr: SocketAddr, addr: SocketAddr,
@ -49,7 +49,7 @@ impl Health {
} }
} }
// run starts the health server. /// run starts the health server.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) { pub async fn run(&self) {
// Clone the shutdown channel. // Clone the shutdown channel.
@ -75,7 +75,7 @@ impl Health {
} }
} }
// health_handler handles the health check request. /// health_handler handles the health check request.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn health_handler() -> Result<impl Reply, Rejection> { async fn health_handler() -> Result<impl Reply, Rejection> {
Ok(warp::reply()) Ok(warp::reply())

View File

@ -31,201 +31,201 @@ use tokio::sync::mpsc;
use tracing::{error, info, instrument, warn}; use tracing::{error, info, instrument, warn};
use warp::{Filter, Rejection, Reply}; use warp::{Filter, Rejection, Reply};
// DOWNLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of download task level1 duration for /// DOWNLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of download task level1 duration for
// recording slow download task. /// recording slow download task.
const DOWNLOAD_TASK_LEVEL1_DURATION_THRESHOLD: Duration = Duration::from_millis(500); const DOWNLOAD_TASK_LEVEL1_DURATION_THRESHOLD: Duration = Duration::from_millis(500);
// UPLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of upload task level1 duration for /// UPLOAD_TASK_LEVEL1_DURATION_THRESHOLD is the threshold of upload task level1 duration for
// recording slow upload task. /// recording slow upload task.
const UPLOAD_TASK_LEVEL1_DURATION_THRESHOLD: Duration = Duration::from_millis(500); const UPLOAD_TASK_LEVEL1_DURATION_THRESHOLD: Duration = Duration::from_millis(500);
lazy_static! { lazy_static! {
// REGISTRY is used to register all metrics. /// REGISTRY is used to register all metrics.
pub static ref REGISTRY: Registry = Registry::new(); pub static ref REGISTRY: Registry = Registry::new();
// VERSION_GAUGE is used to record the version info of the service. /// VERSION_GAUGE is used to record the version info of the service.
pub static ref VERSION_GAUGE: IntGaugeVec = pub static ref VERSION_GAUGE: IntGaugeVec =
IntGaugeVec::new( IntGaugeVec::new(
Opts::new("version", "Version info of the service.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("version", "Version info of the service.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["git_version", "git_commit", "platform", "build_time"] &["git_version", "git_commit", "platform", "build_time"]
).expect("metric can be created"); ).expect("metric can be created");
// UPLOAD_TASK_COUNT is used to count the number of upload tasks. /// UPLOAD_TASK_COUNT is used to count the number of upload tasks.
pub static ref UPLOAD_TASK_COUNT: IntCounterVec = pub static ref UPLOAD_TASK_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("upload_task_total", "Counter of the number of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("upload_task_total", "Counter of the number of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type", "tag", "app"] &["type", "tag", "app"]
).expect("metric can be created"); ).expect("metric can be created");
// UPLOAD_TASK_FAILURE_COUNT is used to count the failed number of upload tasks. /// UPLOAD_TASK_FAILURE_COUNT is used to count the failed number of upload tasks.
pub static ref UPLOAD_TASK_FAILURE_COUNT: IntCounterVec = pub static ref UPLOAD_TASK_FAILURE_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("upload_task_failure_total", "Counter of the number of failed of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("upload_task_failure_total", "Counter of the number of failed of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type", "tag", "app"] &["type", "tag", "app"]
).expect("metric can be created"); ).expect("metric can be created");
// CONCURRENT_UPLOAD_TASK_GAUGE is used to gauge the number of concurrent upload tasks. /// CONCURRENT_UPLOAD_TASK_GAUGE is used to gauge the number of concurrent upload tasks.
pub static ref CONCURRENT_UPLOAD_TASK_GAUGE: IntGaugeVec = pub static ref CONCURRENT_UPLOAD_TASK_GAUGE: IntGaugeVec =
IntGaugeVec::new( IntGaugeVec::new(
Opts::new("concurrent_upload_task_total", "Gauge of the number of concurrent of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("concurrent_upload_task_total", "Gauge of the number of concurrent of the upload task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type", "tag", "app"] &["type", "tag", "app"]
).expect("metric can be created"); ).expect("metric can be created");
// UPLOAD_TASK_DURATION is used to record the upload task duration. /// UPLOAD_TASK_DURATION is used to record the upload task duration.
pub static ref UPLOAD_TASK_DURATION: HistogramVec = pub static ref UPLOAD_TASK_DURATION: HistogramVec =
HistogramVec::new( HistogramVec::new(
HistogramOpts::new("upload_task_duration_milliseconds", "Histogram of the upload task duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()), HistogramOpts::new("upload_task_duration_milliseconds", "Histogram of the upload task duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()),
&["task_type", "task_size_level"] &["task_type", "task_size_level"]
).expect("metric can be created"); ).expect("metric can be created");
// DOWNLOAD_TASK_COUNT is used to count the number of download tasks. /// DOWNLOAD_TASK_COUNT is used to count the number of download tasks.
pub static ref DOWNLOAD_TASK_COUNT: IntCounterVec = pub static ref DOWNLOAD_TASK_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("download_task_total", "Counter of the number of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("download_task_total", "Counter of the number of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type", "tag", "app", "priority"] &["type", "tag", "app", "priority"]
).expect("metric can be created"); ).expect("metric can be created");
// DOWNLOAD_TASK_FAILURE_COUNT is used to count the failed number of download tasks. /// DOWNLOAD_TASK_FAILURE_COUNT is used to count the failed number of download tasks.
pub static ref DOWNLOAD_TASK_FAILURE_COUNT: IntCounterVec = pub static ref DOWNLOAD_TASK_FAILURE_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("download_task_failure_total", "Counter of the number of failed of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("download_task_failure_total", "Counter of the number of failed of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type", "tag", "app", "priority"] &["type", "tag", "app", "priority"]
).expect("metric can be created"); ).expect("metric can be created");
// PREFETCH_TASK_COUNT is used to count the number of prefetch tasks. /// PREFETCH_TASK_COUNT is used to count the number of prefetch tasks.
pub static ref PREFETCH_TASK_COUNT: IntCounterVec = pub static ref PREFETCH_TASK_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("prefetch_task_total", "Counter of the number of the prefetch task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("prefetch_task_total", "Counter of the number of the prefetch task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type", "tag", "app", "priority"] &["type", "tag", "app", "priority"]
).expect("metric can be created"); ).expect("metric can be created");
// PREFETCH_TASK_FAILURE_COUNT is used to count the failed number of prefetch tasks. /// PREFETCH_TASK_FAILURE_COUNT is used to count the failed number of prefetch tasks.
pub static ref PREFETCH_TASK_FAILURE_COUNT: IntCounterVec = pub static ref PREFETCH_TASK_FAILURE_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("prefetch_task_failure_total", "Counter of the number of failed of the prefetch task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("prefetch_task_failure_total", "Counter of the number of failed of the prefetch task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type", "tag", "app", "priority"] &["type", "tag", "app", "priority"]
).expect("metric can be created"); ).expect("metric can be created");
// CONCURRENT_DOWNLOAD_TASK_GAUGE is used to gauge the number of concurrent download tasks. /// CONCURRENT_DOWNLOAD_TASK_GAUGE is used to gauge the number of concurrent download tasks.
pub static ref CONCURRENT_DOWNLOAD_TASK_GAUGE: IntGaugeVec = pub static ref CONCURRENT_DOWNLOAD_TASK_GAUGE: IntGaugeVec =
IntGaugeVec::new( IntGaugeVec::new(
Opts::new("concurrent_download_task_total", "Gauge of the number of concurrent of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("concurrent_download_task_total", "Gauge of the number of concurrent of the download task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type", "tag", "app", "priority"] &["type", "tag", "app", "priority"]
).expect("metric can be created"); ).expect("metric can be created");
// CONCURRENT_UPLOAD_PIECE_GAUGE is used to gauge the number of concurrent upload pieces. /// CONCURRENT_UPLOAD_PIECE_GAUGE is used to gauge the number of concurrent upload pieces.
pub static ref CONCURRENT_UPLOAD_PIECE_GAUGE: IntGaugeVec = pub static ref CONCURRENT_UPLOAD_PIECE_GAUGE: IntGaugeVec =
IntGaugeVec::new( IntGaugeVec::new(
Opts::new("concurrent_upload_piece_total", "Gauge of the number of concurrent of the upload piece.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("concurrent_upload_piece_total", "Gauge of the number of concurrent of the upload piece.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&[] &[]
).expect("metric can be created"); ).expect("metric can be created");
// DOWNLOAD_TRAFFIC is used to count the download traffic. /// DOWNLOAD_TRAFFIC is used to count the download traffic.
pub static ref DOWNLOAD_TRAFFIC: IntCounterVec = pub static ref DOWNLOAD_TRAFFIC: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("download_traffic", "Counter of the number of the download traffic.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("download_traffic", "Counter of the number of the download traffic.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type", "task_type"] &["type", "task_type"]
).expect("metric can be created"); ).expect("metric can be created");
// UPLOAD_TRAFFIC is used to count the upload traffic. /// UPLOAD_TRAFFIC is used to count the upload traffic.
pub static ref UPLOAD_TRAFFIC: IntCounterVec = pub static ref UPLOAD_TRAFFIC: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("upload_traffic", "Counter of the number of the upload traffic.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("upload_traffic", "Counter of the number of the upload traffic.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["task_type"] &["task_type"]
).expect("metric can be created"); ).expect("metric can be created");
// DOWNLOAD_TASK_DURATION is used to record the download task duration. /// DOWNLOAD_TASK_DURATION is used to record the download task duration.
pub static ref DOWNLOAD_TASK_DURATION: HistogramVec = pub static ref DOWNLOAD_TASK_DURATION: HistogramVec =
HistogramVec::new( HistogramVec::new(
HistogramOpts::new("download_task_duration_milliseconds", "Histogram of the download task duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()), HistogramOpts::new("download_task_duration_milliseconds", "Histogram of the download task duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()),
&["task_type", "task_size_level"] &["task_type", "task_size_level"]
).expect("metric can be created"); ).expect("metric can be created");
// BACKEND_REQUEST_COUNT is used to count the number of backend requset. /// BACKEND_REQUEST_COUNT is used to count the number of backend requset.
pub static ref BACKEND_REQUEST_COUNT: IntCounterVec = pub static ref BACKEND_REQUEST_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("backend_request_total", "Counter of the number of the backend request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("backend_request_total", "Counter of the number of the backend request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["scheme", "method"] &["scheme", "method"]
).expect("metric can be created"); ).expect("metric can be created");
// BACKEND_REQUEST_FAILURE_COUNT is used to count the failed number of backend request. /// BACKEND_REQUEST_FAILURE_COUNT is used to count the failed number of backend request.
pub static ref BACKEND_REQUEST_FAILURE_COUNT: IntCounterVec = pub static ref BACKEND_REQUEST_FAILURE_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("backend_request_failure_total", "Counter of the number of failed of the backend request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("backend_request_failure_total", "Counter of the number of failed of the backend request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["scheme", "method"] &["scheme", "method"]
).expect("metric can be created"); ).expect("metric can be created");
// BACKEND_REQUEST_DURATION is used to record the backend request duration. /// BACKEND_REQUEST_DURATION is used to record the backend request duration.
pub static ref BACKEND_REQUEST_DURATION: HistogramVec = pub static ref BACKEND_REQUEST_DURATION: HistogramVec =
HistogramVec::new( HistogramVec::new(
HistogramOpts::new("backend_request_duration_milliseconds", "Histogram of the backend request duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()), HistogramOpts::new("backend_request_duration_milliseconds", "Histogram of the backend request duration.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME).buckets(exponential_buckets(1.0, 2.0, 24).unwrap()),
&["scheme", "method"] &["scheme", "method"]
).expect("metric can be created"); ).expect("metric can be created");
// PROXY_REQUEST_COUNT is used to count the number of proxy requset. /// PROXY_REQUEST_COUNT is used to count the number of proxy requset.
pub static ref PROXY_REQUEST_COUNT: IntCounterVec = pub static ref PROXY_REQUEST_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("proxy_request_total", "Counter of the number of the proxy request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("proxy_request_total", "Counter of the number of the proxy request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&[] &[]
).expect("metric can be created"); ).expect("metric can be created");
// PROXY_REQUEST_FAILURE_COUNT is used to count the failed number of proxy request. /// PROXY_REQUEST_FAILURE_COUNT is used to count the failed number of proxy request.
pub static ref PROXY_REQUEST_FAILURE_COUNT: IntCounterVec = pub static ref PROXY_REQUEST_FAILURE_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("proxy_request_failure_total", "Counter of the number of failed of the proxy request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("proxy_request_failure_total", "Counter of the number of failed of the proxy request.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&[] &[]
).expect("metric can be created"); ).expect("metric can be created");
// STAT_TASK_COUNT is used to count the number of stat tasks. /// STAT_TASK_COUNT is used to count the number of stat tasks.
pub static ref STAT_TASK_COUNT: IntCounterVec = pub static ref STAT_TASK_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("stat_task_total", "Counter of the number of the stat task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("stat_task_total", "Counter of the number of the stat task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type"] &["type"]
).expect("metric can be created"); ).expect("metric can be created");
// STAT_TASK_FAILURE_COUNT is used to count the failed number of stat tasks. /// STAT_TASK_FAILURE_COUNT is used to count the failed number of stat tasks.
pub static ref STAT_TASK_FAILURE_COUNT: IntCounterVec = pub static ref STAT_TASK_FAILURE_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("stat_task_failure_total", "Counter of the number of failed of the stat task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("stat_task_failure_total", "Counter of the number of failed of the stat task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type"] &["type"]
).expect("metric can be created"); ).expect("metric can be created");
// DELETE_TASK_COUNT is used to count the number of delete tasks. /// DELETE_TASK_COUNT is used to count the number of delete tasks.
pub static ref DELETE_TASK_COUNT: IntCounterVec = pub static ref DELETE_TASK_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("delete_task_total", "Counter of the number of the delete task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("delete_task_total", "Counter of the number of the delete task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type"] &["type"]
).expect("metric can be created"); ).expect("metric can be created");
// DELETE_TASK_FAILURE_COUNT is used to count the failed number of delete tasks. /// DELETE_TASK_FAILURE_COUNT is used to count the failed number of delete tasks.
pub static ref DELETE_TASK_FAILURE_COUNT: IntCounterVec = pub static ref DELETE_TASK_FAILURE_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("delete_task_failure_total", "Counter of the number of failed of the delete task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("delete_task_failure_total", "Counter of the number of failed of the delete task.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&["type"] &["type"]
).expect("metric can be created"); ).expect("metric can be created");
// DELETE_HOST_COUNT is used to count the number of delete host. /// DELETE_HOST_COUNT is used to count the number of delete host.
pub static ref DELETE_HOST_COUNT: IntCounterVec = pub static ref DELETE_HOST_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("delete_host_total", "Counter of the number of the delete host.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("delete_host_total", "Counter of the number of the delete host.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&[] &[]
).expect("metric can be created"); ).expect("metric can be created");
// DELETE_HOST_FAILURE_COUNT is used to count the failed number of delete host. /// DELETE_HOST_FAILURE_COUNT is used to count the failed number of delete host.
pub static ref DELETE_HOST_FAILURE_COUNT: IntCounterVec = pub static ref DELETE_HOST_FAILURE_COUNT: IntCounterVec =
IntCounterVec::new( IntCounterVec::new(
Opts::new("delete_host_failure_total", "Counter of the number of failed of the delete host.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("delete_host_failure_total", "Counter of the number of failed of the delete host.").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&[] &[]
).expect("metric can be created"); ).expect("metric can be created");
// DISK_SPACE is used to count of the disk space. /// DISK_SPACE is used to count of the disk space.
pub static ref DISK_SPACE: IntGaugeVec = pub static ref DISK_SPACE: IntGaugeVec =
IntGaugeVec::new( IntGaugeVec::new(
Opts::new("disk_space_total", "Gauge of the disk space in bytes").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("disk_space_total", "Gauge of the disk space in bytes").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
&[] &[]
).expect("metric can be created"); ).expect("metric can be created");
// DISK_USAGE_SPACE is used to count of the disk usage space. /// DISK_USAGE_SPACE is used to count of the disk usage space.
pub static ref DISK_USAGE_SPACE: IntGaugeVec = pub static ref DISK_USAGE_SPACE: IntGaugeVec =
IntGaugeVec::new( IntGaugeVec::new(
Opts::new("disk_usage_space_total", "Gauge of the disk usage space in bytes").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME), Opts::new("disk_usage_space_total", "Gauge of the disk usage space in bytes").namespace(dragonfly_client_config::SERVICE_NAME).subsystem(dragonfly_client_config::NAME),
@ -233,76 +233,76 @@ lazy_static! {
).expect("metric can be created"); ).expect("metric can be created");
} }
// TaskSize represents the size of the task. /// TaskSize represents the size of the task.
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TaskSize { pub enum TaskSize {
// Level0 represents unknown size. /// Level0 represents unknown size.
Level0, Level0,
// Level0 represents size range is from 0 to 1M. /// Level0 represents size range is from 0 to 1M.
Level1, Level1,
// Level1 represents size range is from 1M to 4M. /// Level1 represents size range is from 1M to 4M.
Level2, Level2,
// Level2 represents size range is from 4M to 8M. /// Level2 represents size range is from 4M to 8M.
Level3, Level3,
// Level3 represents size range is from 8M to 16M. /// Level3 represents size range is from 8M to 16M.
Level4, Level4,
// Level4 represents size range is from 16M to 32M. /// Level4 represents size range is from 16M to 32M.
Level5, Level5,
// Level5 represents size range is from 32M to 64M. /// Level5 represents size range is from 32M to 64M.
Level6, Level6,
// Level6 represents size range is from 64M to 128M. /// Level6 represents size range is from 64M to 128M.
Level7, Level7,
// Level7 represents size range is from 128M to 256M. /// Level7 represents size range is from 128M to 256M.
Level8, Level8,
// Level8 represents size range is from 256M to 512M. /// Level8 represents size range is from 256M to 512M.
Level9, Level9,
// Level9 represents size range is from 512M to 1G. /// Level9 represents size range is from 512M to 1G.
Level10, Level10,
// Level10 represents size range is from 1G to 4G. /// Level10 represents size range is from 1G to 4G.
Level11, Level11,
// Level11 represents size range is from 4G to 8G. /// Level11 represents size range is from 4G to 8G.
Level12, Level12,
// Level12 represents size range is from 8G to 16G. /// Level12 represents size range is from 8G to 16G.
Level13, Level13,
// Level13 represents size range is from 16G to 32G. /// Level13 represents size range is from 16G to 32G.
Level14, Level14,
// Level14 represents size range is from 32G to 64G. /// Level14 represents size range is from 32G to 64G.
Level15, Level15,
// Level15 represents size range is from 64G to 128G. /// Level15 represents size range is from 64G to 128G.
Level16, Level16,
// Level16 represents size range is from 128G to 256G. /// Level16 represents size range is from 128G to 256G.
Level17, Level17,
// Level17 represents size range is from 256G to 512G. /// Level17 represents size range is from 256G to 512G.
Level18, Level18,
// Level18 represents size range is from 512G to 1T. /// Level18 represents size range is from 512G to 1T.
Level19, Level19,
// Level20 represents size is greater than 1T. /// Level20 represents size is greater than 1T.
Level20, Level20,
} }
// TaskSize implements the Display trait. /// TaskSize implements the Display trait.
impl std::fmt::Display for TaskSize { impl std::fmt::Display for TaskSize {
// fmt formats the TaskSize. /// fmt formats the TaskSize.
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self { match self {
TaskSize::Level0 => write!(f, "0"), TaskSize::Level0 => write!(f, "0"),
@ -330,9 +330,9 @@ impl std::fmt::Display for TaskSize {
} }
} }
// TaskSize implements the TaskSize. /// TaskSize implements the TaskSize.
impl TaskSize { impl TaskSize {
// calculate_size_level calculates the size level according to the size. /// calculate_size_level calculates the size level according to the size.
pub fn calculate_size_level(size: u64) -> Self { pub fn calculate_size_level(size: u64) -> Self {
match size { match size {
0 => TaskSize::Level0, 0 => TaskSize::Level0,
@ -360,7 +360,7 @@ impl TaskSize {
} }
} }
// collect_upload_task_started_metrics collects the upload task started metrics. /// collect_upload_task_started_metrics collects the upload task started metrics.
pub fn collect_upload_task_started_metrics(typ: i32, tag: &str, app: &str) { pub fn collect_upload_task_started_metrics(typ: i32, tag: &str, app: &str) {
UPLOAD_TASK_COUNT UPLOAD_TASK_COUNT
.with_label_values(&[typ.to_string().as_str(), tag, app]) .with_label_values(&[typ.to_string().as_str(), tag, app])
@ -371,7 +371,7 @@ pub fn collect_upload_task_started_metrics(typ: i32, tag: &str, app: &str) {
.inc(); .inc();
} }
// collect_upload_task_finished_metrics collects the upload task finished metrics. /// collect_upload_task_finished_metrics collects the upload task finished metrics.
pub fn collect_upload_task_finished_metrics( pub fn collect_upload_task_finished_metrics(
typ: i32, typ: i32,
tag: &str, tag: &str,
@ -399,7 +399,7 @@ pub fn collect_upload_task_finished_metrics(
.dec(); .dec();
} }
// collect_upload_task_failure_metrics collects the upload task failure metrics. /// collect_upload_task_failure_metrics collects the upload task failure metrics.
pub fn collect_upload_task_failure_metrics(typ: i32, tag: &str, app: &str) { pub fn collect_upload_task_failure_metrics(typ: i32, tag: &str, app: &str) {
UPLOAD_TASK_FAILURE_COUNT UPLOAD_TASK_FAILURE_COUNT
.with_label_values(&[typ.to_string().as_str(), tag, app]) .with_label_values(&[typ.to_string().as_str(), tag, app])
@ -410,7 +410,7 @@ pub fn collect_upload_task_failure_metrics(typ: i32, tag: &str, app: &str) {
.dec(); .dec();
} }
// collect_download_task_started_metrics collects the download task started metrics. /// collect_download_task_started_metrics collects the download task started metrics.
pub fn collect_download_task_started_metrics(typ: i32, tag: &str, app: &str, priority: &str) { pub fn collect_download_task_started_metrics(typ: i32, tag: &str, app: &str, priority: &str) {
DOWNLOAD_TASK_COUNT DOWNLOAD_TASK_COUNT
.with_label_values(&[typ.to_string().as_str(), tag, app, priority]) .with_label_values(&[typ.to_string().as_str(), tag, app, priority])
@ -421,7 +421,7 @@ pub fn collect_download_task_started_metrics(typ: i32, tag: &str, app: &str, pri
.inc(); .inc();
} }
// collect_download_task_finished_metrics collects the download task finished metrics. /// collect_download_task_finished_metrics collects the download task finished metrics.
pub fn collect_download_task_finished_metrics( pub fn collect_download_task_finished_metrics(
typ: i32, typ: i32,
tag: &str, tag: &str,
@ -457,7 +457,7 @@ pub fn collect_download_task_finished_metrics(
.dec(); .dec();
} }
// collect_download_task_failure_metrics collects the download task failure metrics. /// collect_download_task_failure_metrics collects the download task failure metrics.
pub fn collect_download_task_failure_metrics(typ: i32, tag: &str, app: &str, priority: &str) { pub fn collect_download_task_failure_metrics(typ: i32, tag: &str, app: &str, priority: &str) {
DOWNLOAD_TASK_FAILURE_COUNT DOWNLOAD_TASK_FAILURE_COUNT
.with_label_values(&[typ.to_string().as_str(), tag, app, priority]) .with_label_values(&[typ.to_string().as_str(), tag, app, priority])
@ -468,119 +468,119 @@ pub fn collect_download_task_failure_metrics(typ: i32, tag: &str, app: &str, pri
.dec(); .dec();
} }
// collect_prefetch_task_started_metrics collects the prefetch task started metrics. /// collect_prefetch_task_started_metrics collects the prefetch task started metrics.
pub fn collect_prefetch_task_started_metrics(typ: i32, tag: &str, app: &str, priority: &str) { pub fn collect_prefetch_task_started_metrics(typ: i32, tag: &str, app: &str, priority: &str) {
PREFETCH_TASK_COUNT PREFETCH_TASK_COUNT
.with_label_values(&[typ.to_string().as_str(), tag, app, priority]) .with_label_values(&[typ.to_string().as_str(), tag, app, priority])
.inc(); .inc();
} }
// collect_prefetch_task_failure_metrics collects the prefetch task failure metrics. /// collect_prefetch_task_failure_metrics collects the prefetch task failure metrics.
pub fn collect_prefetch_task_failure_metrics(typ: i32, tag: &str, app: &str, priority: &str) { pub fn collect_prefetch_task_failure_metrics(typ: i32, tag: &str, app: &str, priority: &str) {
PREFETCH_TASK_FAILURE_COUNT PREFETCH_TASK_FAILURE_COUNT
.with_label_values(&[typ.to_string().as_str(), tag, app, priority]) .with_label_values(&[typ.to_string().as_str(), tag, app, priority])
.inc(); .inc();
} }
// collect_download_piece_traffic_metrics collects the download piece traffic metrics. /// collect_download_piece_traffic_metrics collects the download piece traffic metrics.
pub fn collect_download_piece_traffic_metrics(typ: &TrafficType, task_type: i32, length: u64) { pub fn collect_download_piece_traffic_metrics(typ: &TrafficType, task_type: i32, length: u64) {
DOWNLOAD_TRAFFIC DOWNLOAD_TRAFFIC
.with_label_values(&[typ.as_str_name(), task_type.to_string().as_str()]) .with_label_values(&[typ.as_str_name(), task_type.to_string().as_str()])
.inc_by(length); .inc_by(length);
} }
// collect_upload_piece_started_metrics collects the upload piece started metrics. /// collect_upload_piece_started_metrics collects the upload piece started metrics.
pub fn collect_upload_piece_started_metrics() { pub fn collect_upload_piece_started_metrics() {
CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).inc(); CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).inc();
} }
// collect_upload_piece_finished_metrics collects the upload piece finished metrics. /// collect_upload_piece_finished_metrics collects the upload piece finished metrics.
pub fn collect_upload_piece_finished_metrics() { pub fn collect_upload_piece_finished_metrics() {
CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).dec(); CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).dec();
} }
// collect_upload_piece_traffic_metrics collects the upload piece traffic metrics. /// collect_upload_piece_traffic_metrics collects the upload piece traffic metrics.
pub fn collect_upload_piece_traffic_metrics(task_type: i32, length: u64) { pub fn collect_upload_piece_traffic_metrics(task_type: i32, length: u64) {
UPLOAD_TRAFFIC UPLOAD_TRAFFIC
.with_label_values(&[task_type.to_string().as_str()]) .with_label_values(&[task_type.to_string().as_str()])
.inc_by(length); .inc_by(length);
} }
// collect_upload_piece_failure_metrics collects the upload piece failure metrics. /// collect_upload_piece_failure_metrics collects the upload piece failure metrics.
pub fn collect_upload_piece_failure_metrics() { pub fn collect_upload_piece_failure_metrics() {
CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).dec(); CONCURRENT_UPLOAD_PIECE_GAUGE.with_label_values(&[]).dec();
} }
// collect_backend_request_started_metrics collects the backend request started metrics. /// collect_backend_request_started_metrics collects the backend request started metrics.
pub fn collect_backend_request_started_metrics(scheme: &str, method: &str) { pub fn collect_backend_request_started_metrics(scheme: &str, method: &str) {
BACKEND_REQUEST_COUNT BACKEND_REQUEST_COUNT
.with_label_values(&[scheme, method]) .with_label_values(&[scheme, method])
.inc(); .inc();
} }
// collect_backend_request_failure_metrics collects the backend request failure metrics. /// collect_backend_request_failure_metrics collects the backend request failure metrics.
pub fn collect_backend_request_failure_metrics(scheme: &str, method: &str) { pub fn collect_backend_request_failure_metrics(scheme: &str, method: &str) {
BACKEND_REQUEST_FAILURE_COUNT BACKEND_REQUEST_FAILURE_COUNT
.with_label_values(&[scheme, method]) .with_label_values(&[scheme, method])
.inc(); .inc();
} }
// collect_backend_request_finished_metrics collects the backend request finished metrics. /// collect_backend_request_finished_metrics collects the backend request finished metrics.
pub fn collect_backend_request_finished_metrics(scheme: &str, method: &str, cost: Duration) { pub fn collect_backend_request_finished_metrics(scheme: &str, method: &str, cost: Duration) {
BACKEND_REQUEST_DURATION BACKEND_REQUEST_DURATION
.with_label_values(&[scheme, method]) .with_label_values(&[scheme, method])
.observe(cost.as_millis() as f64); .observe(cost.as_millis() as f64);
} }
// collect_proxy_request_started_metrics collects the proxy request started metrics. /// collect_proxy_request_started_metrics collects the proxy request started metrics.
pub fn collect_proxy_request_started_metrics() { pub fn collect_proxy_request_started_metrics() {
PROXY_REQUEST_COUNT.with_label_values(&[]).inc(); PROXY_REQUEST_COUNT.with_label_values(&[]).inc();
} }
// collect_proxy_request_failure_metrics collects the proxy request failure metrics. /// collect_proxy_request_failure_metrics collects the proxy request failure metrics.
pub fn collect_proxy_request_failure_metrics() { pub fn collect_proxy_request_failure_metrics() {
PROXY_REQUEST_FAILURE_COUNT.with_label_values(&[]).inc(); PROXY_REQUEST_FAILURE_COUNT.with_label_values(&[]).inc();
} }
// collect_stat_task_started_metrics collects the stat task started metrics. /// collect_stat_task_started_metrics collects the stat task started metrics.
pub fn collect_stat_task_started_metrics(typ: i32) { pub fn collect_stat_task_started_metrics(typ: i32) {
STAT_TASK_COUNT STAT_TASK_COUNT
.with_label_values(&[typ.to_string().as_str()]) .with_label_values(&[typ.to_string().as_str()])
.inc(); .inc();
} }
// collect_stat_task_failure_metrics collects the stat task failure metrics. /// collect_stat_task_failure_metrics collects the stat task failure metrics.
pub fn collect_stat_task_failure_metrics(typ: i32) { pub fn collect_stat_task_failure_metrics(typ: i32) {
STAT_TASK_FAILURE_COUNT STAT_TASK_FAILURE_COUNT
.with_label_values(&[typ.to_string().as_str()]) .with_label_values(&[typ.to_string().as_str()])
.inc(); .inc();
} }
// collect_delete_task_started_metrics collects the delete task started metrics. /// collect_delete_task_started_metrics collects the delete task started metrics.
pub fn collect_delete_task_started_metrics(typ: i32) { pub fn collect_delete_task_started_metrics(typ: i32) {
DELETE_TASK_COUNT DELETE_TASK_COUNT
.with_label_values(&[typ.to_string().as_str()]) .with_label_values(&[typ.to_string().as_str()])
.inc(); .inc();
} }
// collect_delete_task_failure_metrics collects the delete task failure metrics. /// collect_delete_task_failure_metrics collects the delete task failure metrics.
pub fn collect_delete_task_failure_metrics(typ: i32) { pub fn collect_delete_task_failure_metrics(typ: i32) {
DELETE_TASK_FAILURE_COUNT DELETE_TASK_FAILURE_COUNT
.with_label_values(&[typ.to_string().as_str()]) .with_label_values(&[typ.to_string().as_str()])
.inc(); .inc();
} }
// collect_delete_host_started_metrics collects the delete host started metrics. /// collect_delete_host_started_metrics collects the delete host started metrics.
pub fn collect_delete_host_started_metrics() { pub fn collect_delete_host_started_metrics() {
DELETE_HOST_COUNT.with_label_values(&[]).inc(); DELETE_HOST_COUNT.with_label_values(&[]).inc();
} }
// collect_delete_host_failure_metrics collects the delete host failure metrics. /// collect_delete_host_failure_metrics collects the delete host failure metrics.
pub fn collect_delete_host_failure_metrics() { pub fn collect_delete_host_failure_metrics() {
DELETE_HOST_FAILURE_COUNT.with_label_values(&[]).inc(); DELETE_HOST_FAILURE_COUNT.with_label_values(&[]).inc();
} }
// collect_disk_space_metrics collects the disk space metrics. /// collect_disk_space_metrics collects the disk space metrics.
pub fn collect_disk_space_metrics(path: &Path) { pub fn collect_disk_space_metrics(path: &Path) {
let stats = match fs2::statvfs(path) { let stats = match fs2::statvfs(path) {
Ok(stats) => stats, Ok(stats) => stats,
@ -599,22 +599,22 @@ pub fn collect_disk_space_metrics(path: &Path) {
.set(usage_space as i64); .set(usage_space as i64);
} }
// Metrics is the metrics server. /// Metrics is the metrics server.
#[derive(Debug)] #[derive(Debug)]
pub struct Metrics { pub struct Metrics {
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// shutdown is used to shutdown the metrics server. /// shutdown is used to shutdown the metrics server.
shutdown: shutdown::Shutdown, shutdown: shutdown::Shutdown,
// _shutdown_complete is used to notify the metrics server is shutdown. /// _shutdown_complete is used to notify the metrics server is shutdown.
_shutdown_complete: mpsc::UnboundedSender<()>, _shutdown_complete: mpsc::UnboundedSender<()>,
} }
// Metrics implements the metrics server. /// Metrics implements the metrics server.
impl Metrics { impl Metrics {
// new creates a new Metrics. /// new creates a new Metrics.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
config: Arc<Config>, config: Arc<Config>,
@ -628,7 +628,7 @@ impl Metrics {
} }
} }
// run starts the metrics server. /// run starts the metrics server.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) { pub async fn run(&self) {
// Clone the shutdown channel. // Clone the shutdown channel.
@ -680,7 +680,7 @@ impl Metrics {
} }
} }
// register_custom_metrics registers all custom metrics. /// register_custom_metrics registers all custom metrics.
#[instrument(skip_all)] #[instrument(skip_all)]
fn register_custom_metrics(&self) { fn register_custom_metrics(&self) {
REGISTRY REGISTRY
@ -776,7 +776,7 @@ impl Metrics {
.expect("metric can be registered"); .expect("metric can be registered");
} }
// metrics_handler handles the metrics request. /// metrics_handler handles the metrics request.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn metrics_handler(config: Arc<Config>) -> Result<impl Reply, Rejection> { async fn metrics_handler(config: Arc<Config>) -> Result<impl Reply, Rejection> {
// Collect the disk space metrics. // Collect the disk space metrics.

View File

@ -18,34 +18,34 @@ use dragonfly_api::common::v2::Priority;
use reqwest::header::HeaderMap; use reqwest::header::HeaderMap;
use tracing::{error, instrument}; use tracing::{error, instrument};
// DRAGONFLY_TAG_HEADER is the header key of tag in http request. /// DRAGONFLY_TAG_HEADER is the header key of tag in http request.
pub const DRAGONFLY_TAG_HEADER: &str = "X-Dragonfly-Tag"; pub const DRAGONFLY_TAG_HEADER: &str = "X-Dragonfly-Tag";
// DRAGONFLY_APPLICATION_HEADER is the header key of application in http request. /// DRAGONFLY_APPLICATION_HEADER is the header key of application in http request.
pub const DRAGONFLY_APPLICATION_HEADER: &str = "X-Dragonfly-Application"; pub const DRAGONFLY_APPLICATION_HEADER: &str = "X-Dragonfly-Application";
// DRAGONFLY_PRIORITY_HEADER is the header key of priority in http request, /// DRAGONFLY_PRIORITY_HEADER is the header key of priority in http request,
// refer to https://github.com/dragonflyoss/api/blob/main/proto/common.proto#L67. /// refer to https://github.com/dragonflyoss/api/blob/main/proto/common.proto#L67.
pub const DRAGONFLY_PRIORITY_HEADER: &str = "X-Dragonfly-Priority"; pub const DRAGONFLY_PRIORITY_HEADER: &str = "X-Dragonfly-Priority";
// DRAGONFLY_REGISTRY_HEADER is the header key of custom address of container registry. /// DRAGONFLY_REGISTRY_HEADER is the header key of custom address of container registry.
pub const DRAGONFLY_REGISTRY_HEADER: &str = "X-Dragonfly-Registry"; pub const DRAGONFLY_REGISTRY_HEADER: &str = "X-Dragonfly-Registry";
// DRAGONFLY_FILTERS_HEADER is the header key of filters in http request, /// DRAGONFLY_FILTERS_HEADER is the header key of filters in http request,
// it is the filtered query params to generate the task id. /// it is the filtered query params to generate the task id.
// When filter is "X-Dragonfly-Filtered-Query-Params: Signature,Expires,ns" for example: /// When filter is "X-Dragonfly-Filtered-Query-Params: Signature,Expires,ns" for example:
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io /// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
// will generate the same task id. /// will generate the same task id.
// Default value includes the filtered query params of s3, gcs, oss, obs, cos. /// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
pub const DRAGONFLY_FILTERED_QUERY_PARAMS_HEADER: &str = "X-Dragonfly-Filtered-Query-Params"; pub const DRAGONFLY_FILTERED_QUERY_PARAMS_HEADER: &str = "X-Dragonfly-Filtered-Query-Params";
// DRAGONFLY_USE_P2P_HEADER is the header key of use p2p in http request. /// DRAGONFLY_USE_P2P_HEADER is the header key of use p2p in http request.
// If the value is "true", the request will use P2P technology to distribute /// If the value is "true", the request will use P2P technology to distribute
// the content. If the value is "false", but url matches the regular expression in proxy config. /// the content. If the value is "false", but url matches the regular expression in proxy config.
// The request will also use P2P technology to distribute the content. /// The request will also use P2P technology to distribute the content.
pub const DRAGONFLY_USE_P2P_HEADER: &str = "X-Dragonfly-Use-P2P"; pub const DRAGONFLY_USE_P2P_HEADER: &str = "X-Dragonfly-Use-P2P";
// get_tag gets the tag from http header. /// get_tag gets the tag from http header.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_tag(header: &HeaderMap) -> Option<String> { pub fn get_tag(header: &HeaderMap) -> Option<String> {
match header.get(DRAGONFLY_TAG_HEADER) { match header.get(DRAGONFLY_TAG_HEADER) {
@ -60,7 +60,7 @@ pub fn get_tag(header: &HeaderMap) -> Option<String> {
} }
} }
// get_application gets the application from http header. /// get_application gets the application from http header.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_application(header: &HeaderMap) -> Option<String> { pub fn get_application(header: &HeaderMap) -> Option<String> {
match header.get(DRAGONFLY_APPLICATION_HEADER) { match header.get(DRAGONFLY_APPLICATION_HEADER) {
@ -75,7 +75,7 @@ pub fn get_application(header: &HeaderMap) -> Option<String> {
} }
} }
// get_priority gets the priority from http header. /// get_priority gets the priority from http header.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_priority(header: &HeaderMap) -> i32 { pub fn get_priority(header: &HeaderMap) -> i32 {
let default_priority = Priority::Level6 as i32; let default_priority = Priority::Level6 as i32;
@ -97,7 +97,7 @@ pub fn get_priority(header: &HeaderMap) -> i32 {
} }
} }
// get_registry gets the custom address of container registry from http header. /// get_registry gets the custom address of container registry from http header.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_registry(header: &HeaderMap) -> Option<String> { pub fn get_registry(header: &HeaderMap) -> Option<String> {
match header.get(DRAGONFLY_REGISTRY_HEADER) { match header.get(DRAGONFLY_REGISTRY_HEADER) {
@ -112,7 +112,7 @@ pub fn get_registry(header: &HeaderMap) -> Option<String> {
} }
} }
// get_filters gets the filters from http header. /// get_filters gets the filters from http header.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_filtered_query_params( pub fn get_filtered_query_params(
header: &HeaderMap, header: &HeaderMap,
@ -130,7 +130,7 @@ pub fn get_filtered_query_params(
} }
} }
// get_use_p2p gets the use p2p from http header. /// get_use_p2p gets the use p2p from http header.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get_use_p2p(header: &HeaderMap) -> bool { pub fn get_use_p2p(header: &HeaderMap) -> bool {
match header.get(DRAGONFLY_USE_P2P_HEADER) { match header.get(DRAGONFLY_USE_P2P_HEADER) {

View File

@ -67,37 +67,37 @@ use tracing::{error, info, instrument, Span};
pub mod header; pub mod header;
// Response is the response of the proxy server. /// Response is the response of the proxy server.
pub type Response = hyper::Response<BoxBody<Bytes, ClientError>>; pub type Response = hyper::Response<BoxBody<Bytes, ClientError>>;
// Proxy is the proxy server. /// Proxy is the proxy server.
pub struct Proxy { pub struct Proxy {
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// task is the task manager. /// task is the task manager.
task: Arc<Task>, task: Arc<Task>,
// addr is the address of the proxy server. /// addr is the address of the proxy server.
addr: SocketAddr, addr: SocketAddr,
// registry_certs is the certificate of the client for the registry. /// registry_certs is the certificate of the client for the registry.
registry_certs: Arc<Option<Vec<CertificateDer<'static>>>>, registry_certs: Arc<Option<Vec<CertificateDer<'static>>>>,
// server_ca_cert is the CA certificate of the proxy server to /// server_ca_cert is the CA certificate of the proxy server to
// sign the self-signed certificate. /// sign the self-signed certificate.
server_ca_cert: Arc<Option<Certificate>>, server_ca_cert: Arc<Option<Certificate>>,
// shutdown is used to shutdown the proxy server. /// shutdown is used to shutdown the proxy server.
shutdown: shutdown::Shutdown, shutdown: shutdown::Shutdown,
// _shutdown_complete is used to notify the proxy server is shutdown. /// _shutdown_complete is used to notify the proxy server is shutdown.
_shutdown_complete: mpsc::UnboundedSender<()>, _shutdown_complete: mpsc::UnboundedSender<()>,
} }
// Proxy implements the proxy server. /// Proxy implements the proxy server.
impl Proxy { impl Proxy {
// new creates a new Proxy. /// new creates a new Proxy.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
config: Arc<Config>, config: Arc<Config>,
@ -168,7 +168,7 @@ impl Proxy {
proxy proxy
} }
// run starts the proxy server. /// run starts the proxy server.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) -> ClientResult<()> { pub async fn run(&self) -> ClientResult<()> {
let listener = TcpListener::bind(self.addr).await?; let listener = TcpListener::bind(self.addr).await?;
@ -223,7 +223,7 @@ impl Proxy {
} }
} }
// handler handles the request from the client. /// handler handles the request from the client.
#[instrument(skip_all, fields(uri, method))] #[instrument(skip_all, fields(uri, method))]
pub async fn handler( pub async fn handler(
config: Arc<Config>, config: Arc<Config>,
@ -289,7 +289,7 @@ pub async fn handler(
.await .await
} }
// registry_mirror_http_handler handles the http request for the registry mirror by client. /// registry_mirror_http_handler handles the http request for the registry mirror by client.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn registry_mirror_http_handler( pub async fn registry_mirror_http_handler(
config: Arc<Config>, config: Arc<Config>,
@ -309,7 +309,7 @@ pub async fn registry_mirror_http_handler(
.await; .await;
} }
// registry_mirror_https_handler handles the https request for the registry mirror by client. /// registry_mirror_https_handler handles the https request for the registry mirror by client.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn registry_mirror_https_handler( pub async fn registry_mirror_https_handler(
config: Arc<Config>, config: Arc<Config>,
@ -331,7 +331,7 @@ pub async fn registry_mirror_https_handler(
.await; .await;
} }
// http_handler handles the http request by client. /// http_handler handles the http request by client.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn http_handler( pub async fn http_handler(
config: Arc<Config>, config: Arc<Config>,
@ -397,7 +397,7 @@ pub async fn http_handler(
return proxy_http(request).await; return proxy_http(request).await;
} }
// https_handler handles the https request by client. /// https_handler handles the https request by client.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn https_handler( pub async fn https_handler(
config: Arc<Config>, config: Arc<Config>,
@ -439,9 +439,9 @@ pub async fn https_handler(
} }
} }
// upgraded_tunnel handles the upgraded connection. If the ca_cert is not set, use the /// upgraded_tunnel handles the upgraded connection. If the ca_cert is not set, use the
// self-signed certificate. Otherwise, use the CA certificate to sign the /// self-signed certificate. Otherwise, use the CA certificate to sign the
// self-signed certificate. /// self-signed certificate.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn upgraded_tunnel( async fn upgraded_tunnel(
config: Arc<Config>, config: Arc<Config>,
@ -503,7 +503,7 @@ async fn upgraded_tunnel(
Ok(()) Ok(())
} }
// upgraded_handler handles the upgraded https request from the client. /// upgraded_handler handles the upgraded https request from the client.
#[instrument(skip_all, fields(uri, method))] #[instrument(skip_all, fields(uri, method))]
pub async fn upgraded_handler( pub async fn upgraded_handler(
config: Arc<Config>, config: Arc<Config>,
@ -579,7 +579,7 @@ pub async fn upgraded_handler(
return proxy_http(request).await; return proxy_http(request).await;
} }
// proxy_by_dfdaemon proxies the request via the dfdaemon. /// proxy_by_dfdaemon proxies the request via the dfdaemon.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn proxy_by_dfdaemon( async fn proxy_by_dfdaemon(
config: Arc<Config>, config: Arc<Config>,
@ -839,7 +839,7 @@ async fn proxy_by_dfdaemon(
} }
} }
// proxy_http proxies the HTTP request directly to the remote server. /// proxy_http proxies the HTTP request directly to the remote server.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn proxy_http(request: Request<hyper::body::Incoming>) -> ClientResult<Response> { async fn proxy_http(request: Request<hyper::body::Incoming>) -> ClientResult<Response> {
let Some(host) = request.uri().host() else { let Some(host) = request.uri().host() else {
@ -866,7 +866,7 @@ async fn proxy_http(request: Request<hyper::body::Incoming>) -> ClientResult<Res
Ok(response.map(|b| b.map_err(ClientError::from).boxed())) Ok(response.map(|b| b.map_err(ClientError::from).boxed()))
} }
// proxy_https proxies the HTTPS request directly to the remote server. /// proxy_https proxies the HTTPS request directly to the remote server.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn proxy_https( async fn proxy_https(
request: Request<hyper::body::Incoming>, request: Request<hyper::body::Incoming>,
@ -904,7 +904,7 @@ async fn proxy_https(
Ok(response.map(|b| b.map_err(ClientError::from).boxed())) Ok(response.map(|b| b.map_err(ClientError::from).boxed()))
} }
// make_registry_mirror_request makes a registry mirror request by the request. /// make_registry_mirror_request makes a registry mirror request by the request.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_registry_mirror_request( fn make_registry_mirror_request(
config: Arc<Config>, config: Arc<Config>,
@ -940,7 +940,7 @@ fn make_registry_mirror_request(
Ok(request) Ok(request)
} }
// make_download_task_request makes a download task request by the request. /// make_download_task_request makes a download task request by the request.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_download_task_request( fn make_download_task_request(
config: Arc<Config>, config: Arc<Config>,
@ -983,7 +983,7 @@ fn make_download_task_request(
}) })
} }
// make_download_url makes a download url by the given uri. /// make_download_url makes a download url by the given uri.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_download_url( fn make_download_url(
uri: &hyper::Uri, uri: &hyper::Uri,
@ -1009,7 +1009,7 @@ fn make_download_url(
.to_string()) .to_string())
} }
// make_response_headers makes the response headers. /// make_response_headers makes the response headers.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_response_headers( fn make_response_headers(
mut download_task_started_response: DownloadTaskStartedResponse, mut download_task_started_response: DownloadTaskStartedResponse,
@ -1035,14 +1035,14 @@ fn make_response_headers(
hashmap_to_hyper_header_map(&download_task_started_response.response_header) hashmap_to_hyper_header_map(&download_task_started_response.response_header)
} }
// find_matching_rule returns whether the dfdaemon should be used to download the task. /// find_matching_rule returns whether the dfdaemon should be used to download the task.
// If the dfdaemon should be used, return the matched rule. /// If the dfdaemon should be used, return the matched rule.
#[instrument(skip_all)] #[instrument(skip_all)]
fn find_matching_rule(rules: Option<Vec<Rule>>, url: &str) -> Option<Rule> { fn find_matching_rule(rules: Option<Vec<Rule>>, url: &str) -> Option<Rule> {
rules?.iter().find(|rule| rule.regex.is_match(url)).cloned() rules?.iter().find(|rule| rule.regex.is_match(url)).cloned()
} }
// make_error_response makes an error response with the given status and message. /// make_error_response makes an error response with the given status and message.
#[instrument(skip_all)] #[instrument(skip_all)]
fn make_error_response(status: http::StatusCode, header: Option<http::HeaderMap>) -> Response { fn make_error_response(status: http::StatusCode, header: Option<http::HeaderMap>) -> Response {
let mut response = Response::new(empty()); let mut response = Response::new(empty());
@ -1056,7 +1056,7 @@ fn make_error_response(status: http::StatusCode, header: Option<http::HeaderMap>
response response
} }
// empty returns an empty body. /// empty returns an empty body.
#[instrument(skip_all)] #[instrument(skip_all)]
fn empty() -> BoxBody<Bytes, ClientError> { fn empty() -> BoxBody<Bytes, ClientError> {
Empty::<Bytes>::new() Empty::<Bytes>::new()

View File

@ -54,27 +54,27 @@ use tracing::{error, info, instrument, Instrument};
use super::*; use super::*;
// CacheTask represents a cache task manager. /// CacheTask represents a cache task manager.
pub struct CacheTask { pub struct CacheTask {
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// id_generator is the id generator. /// id_generator is the id generator.
pub id_generator: Arc<IDGenerator>, pub id_generator: Arc<IDGenerator>,
// storage is the local storage. /// storage is the local storage.
storage: Arc<Storage>, storage: Arc<Storage>,
// scheduler_client is the grpc client of the scheduler. /// scheduler_client is the grpc client of the scheduler.
pub scheduler_client: Arc<SchedulerClient>, pub scheduler_client: Arc<SchedulerClient>,
// piece is the piece manager. /// piece is the piece manager.
pub piece: Arc<piece::Piece>, pub piece: Arc<piece::Piece>,
} }
// CacheTask is the implementation of CacheTask. /// CacheTask is the implementation of CacheTask.
impl CacheTask { impl CacheTask {
// new creates a new CacheTask. /// new creates a new CacheTask.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
config: Arc<Config>, config: Arc<Config>,
@ -100,7 +100,7 @@ impl CacheTask {
} }
} }
// create_persistent creates a persistent cache task from local. /// create_persistent creates a persistent cache task from local.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn create_persistent( pub async fn create_persistent(
&self, &self,
@ -224,7 +224,7 @@ impl CacheTask {
} }
} }
// download_started updates the metadata of the cache task when the cache task downloads started. /// download_started updates the metadata of the cache task when the cache task downloads started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_started( pub async fn download_started(
&self, &self,
@ -253,20 +253,20 @@ impl CacheTask {
) )
} }
// download_finished updates the metadata of the cache task when the task downloads finished. /// download_finished updates the metadata of the cache task when the task downloads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_finished(&self, id: &str) -> ClientResult<metadata::CacheTask> { pub fn download_finished(&self, id: &str) -> ClientResult<metadata::CacheTask> {
self.storage.download_cache_task_finished(id) self.storage.download_cache_task_finished(id)
} }
// download_failed updates the metadata of the cache task when the task downloads failed. /// download_failed updates the metadata of the cache task when the task downloads failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_failed(&self, id: &str) -> ClientResult<()> { pub async fn download_failed(&self, id: &str) -> ClientResult<()> {
let _ = self.storage.download_cache_task_failed(id).await?; let _ = self.storage.download_cache_task_failed(id).await?;
Ok(()) Ok(())
} }
// hard_link_or_copy hard links or copies the cache task content to the destination. /// hard_link_or_copy hard links or copies the cache task content to the destination.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn hard_link_or_copy( pub async fn hard_link_or_copy(
&self, &self,
@ -276,7 +276,7 @@ impl CacheTask {
self.storage.hard_link_or_copy_cache_task(task, to).await self.storage.hard_link_or_copy_cache_task(task, to).await
} }
// download downloads a cache task. /// download downloads a cache task.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download( pub async fn download(
@ -455,7 +455,7 @@ impl CacheTask {
Ok(()) Ok(())
} }
// download_partial_with_scheduler downloads a partial cache task with scheduler. /// download_partial_with_scheduler downloads a partial cache task with scheduler.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(skip_all)] #[instrument(skip_all)]
async fn download_partial_with_scheduler( async fn download_partial_with_scheduler(
@ -758,7 +758,7 @@ impl CacheTask {
Ok(finished_pieces) Ok(finished_pieces)
} }
// download_partial_with_scheduler_from_remote_peer downloads a partial cache task with scheduler from a remote peer. /// download_partial_with_scheduler_from_remote_peer downloads a partial cache task with scheduler from a remote peer.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(skip_all)] #[instrument(skip_all)]
async fn download_partial_with_scheduler_from_remote_peer( async fn download_partial_with_scheduler_from_remote_peer(
@ -984,7 +984,7 @@ impl CacheTask {
Ok(finished_pieces) Ok(finished_pieces)
} }
// download_partial_from_local_peer downloads a partial cache task from a local peer. /// download_partial_from_local_peer downloads a partial cache task from a local peer.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(skip_all)] #[instrument(skip_all)]
async fn download_partial_from_local_peer( async fn download_partial_from_local_peer(
@ -1073,7 +1073,7 @@ impl CacheTask {
Ok(finished_pieces) Ok(finished_pieces)
} }
// stat stats the cache task from the scheduler. /// stat stats the cache task from the scheduler.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn stat(&self, task_id: &str, host_id: &str) -> ClientResult<CommonCacheTask> { pub async fn stat(&self, task_id: &str, host_id: &str) -> ClientResult<CommonCacheTask> {
self.scheduler_client self.scheduler_client
@ -1084,7 +1084,7 @@ impl CacheTask {
.await .await
} }
// delete_cache_task deletes a cache task. /// delete_cache_task deletes a cache task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn delete(&self, task_id: &str, host_id: &str) -> ClientResult<()> { pub async fn delete(&self, task_id: &str, host_id: &str) -> ClientResult<()> {
self.scheduler_client self.scheduler_client

View File

@ -38,48 +38,48 @@ use tracing::{error, info, instrument, Span};
use super::*; use super::*;
// MAX_PIECE_COUNT is the maximum piece count. If the piece count is upper /// MAX_PIECE_COUNT is the maximum piece count. If the piece count is upper
// than MAX_PIECE_COUNT, the piece length will be optimized by the file length. /// than MAX_PIECE_COUNT, the piece length will be optimized by the file length.
// When piece length becames the MAX_PIECE_LENGTH, the piece piece count /// When piece length becames the MAX_PIECE_LENGTH, the piece piece count
// probably will be upper than MAX_PIECE_COUNT. /// probably will be upper than MAX_PIECE_COUNT.
const MAX_PIECE_COUNT: u64 = 500; const MAX_PIECE_COUNT: u64 = 500;
// MIN_PIECE_LENGTH is the minimum piece length. /// MIN_PIECE_LENGTH is the minimum piece length.
const MIN_PIECE_LENGTH: u64 = 4 * 1024 * 1024; const MIN_PIECE_LENGTH: u64 = 4 * 1024 * 1024;
// MAX_PIECE_LENGTH is the maximum piece length. /// MAX_PIECE_LENGTH is the maximum piece length.
const MAX_PIECE_LENGTH: u64 = 16 * 1024 * 1024; const MAX_PIECE_LENGTH: u64 = 16 * 1024 * 1024;
// PieceLengthStrategy sets the optimization strategy of piece length. /// PieceLengthStrategy sets the optimization strategy of piece length.
pub enum PieceLengthStrategy { pub enum PieceLengthStrategy {
// OptimizeByFileLength optimizes the piece length by the file length. /// OptimizeByFileLength optimizes the piece length by the file length.
OptimizeByFileLength, OptimizeByFileLength,
} }
// Piece represents a piece manager. /// Piece represents a piece manager.
pub struct Piece { pub struct Piece {
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// id_generator is the id generator. /// id_generator is the id generator.
id_generator: Arc<IDGenerator>, id_generator: Arc<IDGenerator>,
// storage is the local storage. /// storage is the local storage.
storage: Arc<Storage>, storage: Arc<Storage>,
// backend_factory is the backend factory. /// backend_factory is the backend factory.
backend_factory: Arc<BackendFactory>, backend_factory: Arc<BackendFactory>,
// download_rate_limiter is the rate limiter of the download speed in bps(bytes per second). /// download_rate_limiter is the rate limiter of the download speed in bps(bytes per second).
download_rate_limiter: Arc<RateLimiter>, download_rate_limiter: Arc<RateLimiter>,
// upload_rate_limiter is the rate limiter of the upload speed in bps(bytes per second). /// upload_rate_limiter is the rate limiter of the upload speed in bps(bytes per second).
upload_rate_limiter: Arc<RateLimiter>, upload_rate_limiter: Arc<RateLimiter>,
} }
// Piece implements the piece manager. /// Piece implements the piece manager.
impl Piece { impl Piece {
// new returns a new Piece. /// new returns a new Piece.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
config: Arc<Config>, config: Arc<Config>,
@ -110,13 +110,13 @@ impl Piece {
} }
} }
// get gets a piece from the local storage. /// get gets a piece from the local storage.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn get(&self, task_id: &str, number: u32) -> Result<Option<metadata::Piece>> { pub fn get(&self, task_id: &str, number: u32) -> Result<Option<metadata::Piece>> {
self.storage.get_piece(task_id, number) self.storage.get_piece(task_id, number)
} }
// calculate_interested calculates the interested pieces by content_length and range. /// calculate_interested calculates the interested pieces by content_length and range.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn calculate_interested( pub fn calculate_interested(
&self, &self,
@ -230,7 +230,7 @@ impl Piece {
Ok(pieces) Ok(pieces)
} }
// remove_finished_from_interested removes the finished pieces from interested pieces. /// remove_finished_from_interested removes the finished pieces from interested pieces.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn remove_finished_from_interested( pub fn remove_finished_from_interested(
&self, &self,
@ -248,7 +248,7 @@ impl Piece {
.collect::<Vec<metadata::Piece>>() .collect::<Vec<metadata::Piece>>()
} }
// merge_finished_pieces merges the finished pieces and has finished pieces. /// merge_finished_pieces merges the finished pieces and has finished pieces.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn merge_finished_pieces( pub fn merge_finished_pieces(
&self, &self,
@ -269,7 +269,7 @@ impl Piece {
pieces.into_values().collect() pieces.into_values().collect()
} }
// calculate_piece_size calculates the piece size by content_length. /// calculate_piece_size calculates the piece size by content_length.
pub fn calculate_piece_length( pub fn calculate_piece_length(
&self, &self,
strategy: PieceLengthStrategy, strategy: PieceLengthStrategy,
@ -292,7 +292,7 @@ impl Piece {
} }
} }
// upload_from_local_peer_into_async_read uploads a single piece from a local peer. /// upload_from_local_peer_into_async_read uploads a single piece from a local peer.
#[instrument(skip_all, fields(piece_id))] #[instrument(skip_all, fields(piece_id))]
pub async fn upload_from_local_peer_into_async_read( pub async fn upload_from_local_peer_into_async_read(
&self, &self,
@ -323,7 +323,7 @@ impl Piece {
}) })
} }
// download_from_local_peer_into_async_read downloads a single piece from a local peer. /// download_from_local_peer_into_async_read downloads a single piece from a local peer.
#[instrument(skip_all, fields(piece_id))] #[instrument(skip_all, fields(piece_id))]
pub async fn download_from_local_peer_into_async_read( pub async fn download_from_local_peer_into_async_read(
&self, &self,
@ -345,8 +345,8 @@ impl Piece {
self.storage.upload_piece(task_id, number, range).await self.storage.upload_piece(task_id, number, range).await
} }
// download_from_local_peer downloads a single piece from a local peer. Fake the download piece /// download_from_local_peer downloads a single piece from a local peer. Fake the download piece
// from the local peer, just collect the metrics. /// from the local peer, just collect the metrics.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_from_local_peer(&self, task_id: &str, length: u64) { pub fn download_from_local_peer(&self, task_id: &str, length: u64) {
collect_download_piece_traffic_metrics( collect_download_piece_traffic_metrics(
@ -356,7 +356,7 @@ impl Piece {
); );
} }
// download_from_remote_peer downloads a single piece from a remote peer. /// download_from_remote_peer downloads a single piece from a remote peer.
#[instrument(skip_all, fields(piece_id))] #[instrument(skip_all, fields(piece_id))]
pub async fn download_from_remote_peer( pub async fn download_from_remote_peer(
&self, &self,
@ -482,7 +482,7 @@ impl Piece {
}) })
} }
// download_from_source downloads a single piece from the source. /// download_from_source downloads a single piece from the source.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(skip_all, fields(piece_id))] #[instrument(skip_all, fields(piece_id))]
pub async fn download_from_source( pub async fn download_from_source(

View File

@ -29,51 +29,51 @@ use tokio::task::JoinSet;
use tokio_stream::StreamExt; use tokio_stream::StreamExt;
use tracing::{error, info, instrument, Instrument}; use tracing::{error, info, instrument, Instrument};
// CollectedParent is the parent peer collected from the remote peer. /// CollectedParent is the parent peer collected from the remote peer.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct CollectedParent { pub struct CollectedParent {
// id is the id of the parent. /// id is the id of the parent.
pub id: String, pub id: String,
// host is the host of the parent. /// host is the host of the parent.
pub host: Option<Host>, pub host: Option<Host>,
} }
// CollectedPiece is the piece collected from a peer. /// CollectedPiece is the piece collected from a peer.
pub struct CollectedPiece { pub struct CollectedPiece {
// number is the piece number. /// number is the piece number.
pub number: u32, pub number: u32,
// length is the piece length. /// length is the piece length.
pub length: u64, pub length: u64,
// parent is the parent peer. /// parent is the parent peer.
pub parent: CollectedParent, pub parent: CollectedParent,
} }
// PieceCollector is used to collect pieces from peers. /// PieceCollector is used to collect pieces from peers.
pub struct PieceCollector { pub struct PieceCollector {
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// host_id is the id of the host. /// host_id is the id of the host.
host_id: String, host_id: String,
// task_id is the id of the task. /// task_id is the id of the task.
task_id: String, task_id: String,
// parents is the parent peers. /// parents is the parent peers.
parents: Vec<CollectedParent>, parents: Vec<CollectedParent>,
// interested_pieces is the pieces interested by the collector. /// interested_pieces is the pieces interested by the collector.
interested_pieces: Vec<metadata::Piece>, interested_pieces: Vec<metadata::Piece>,
// collected_pieces is the pieces collected from peers. /// collected_pieces is the pieces collected from peers.
collected_pieces: Arc<DashMap<u32, String>>, collected_pieces: Arc<DashMap<u32, String>>,
} }
impl PieceCollector { impl PieceCollector {
// new creates a new PieceCollector. /// new creates a new PieceCollector.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
config: Arc<Config>, config: Arc<Config>,
@ -100,7 +100,7 @@ impl PieceCollector {
} }
} }
// run runs the piece collector. /// run runs the piece collector.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) -> Receiver<CollectedPiece> { pub async fn run(&self) -> Receiver<CollectedPiece> {
let host_id = self.host_id.clone(); let host_id = self.host_id.clone();
@ -132,7 +132,7 @@ impl PieceCollector {
collected_piece_rx collected_piece_rx
} }
// collect_from_remote_peers collects pieces from remote peers. /// collect_from_remote_peers collects pieces from remote peers.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn collect_from_remote_peers( async fn collect_from_remote_peers(
host_id: String, host_id: String,

View File

@ -64,30 +64,30 @@ use tracing::{error, info, instrument, Instrument};
use super::*; use super::*;
// Task represents a task manager. /// Task represents a task manager.
pub struct Task { pub struct Task {
// config is the configuration of the dfdaemon. /// config is the configuration of the dfdaemon.
config: Arc<Config>, config: Arc<Config>,
// id_generator is the id generator. /// id_generator is the id generator.
pub id_generator: Arc<IDGenerator>, pub id_generator: Arc<IDGenerator>,
// storage is the local storage. /// storage is the local storage.
storage: Arc<Storage>, storage: Arc<Storage>,
// scheduler_client is the grpc client of the scheduler. /// scheduler_client is the grpc client of the scheduler.
pub scheduler_client: Arc<SchedulerClient>, pub scheduler_client: Arc<SchedulerClient>,
// backend_factory is the backend factory. /// backend_factory is the backend factory.
pub backend_factory: Arc<BackendFactory>, pub backend_factory: Arc<BackendFactory>,
// piece is the piece manager. /// piece is the piece manager.
pub piece: Arc<piece::Piece>, pub piece: Arc<piece::Piece>,
} }
// Task implements the task manager. /// Task implements the task manager.
impl Task { impl Task {
// new returns a new Task. /// new returns a new Task.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
config: Arc<Config>, config: Arc<Config>,
@ -114,7 +114,7 @@ impl Task {
} }
} }
// download_started updates the metadata of the task when the task downloads started. /// download_started updates the metadata of the task when the task downloads started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_started( pub async fn download_started(
&self, &self,
@ -208,31 +208,31 @@ impl Task {
) )
} }
// download_finished updates the metadata of the task when the task downloads finished. /// download_finished updates the metadata of the task when the task downloads finished.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn download_finished(&self, id: &str) -> ClientResult<metadata::Task> { pub fn download_finished(&self, id: &str) -> ClientResult<metadata::Task> {
self.storage.download_task_finished(id) self.storage.download_task_finished(id)
} }
// download_failed updates the metadata of the task when the task downloads failed. /// download_failed updates the metadata of the task when the task downloads failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download_failed(&self, id: &str) -> ClientResult<()> { pub async fn download_failed(&self, id: &str) -> ClientResult<()> {
self.storage.download_task_failed(id).await.map(|_| ()) self.storage.download_task_failed(id).await.map(|_| ())
} }
// prefetch_task_started updates the metadata of the task when the task prefetch started. /// prefetch_task_started updates the metadata of the task when the task prefetch started.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn prefetch_task_started(&self, id: &str) -> ClientResult<metadata::Task> { pub async fn prefetch_task_started(&self, id: &str) -> ClientResult<metadata::Task> {
self.storage.prefetch_task_started(id).await self.storage.prefetch_task_started(id).await
} }
// prefetch_task_failed updates the metadata of the task when the task prefetch failed. /// prefetch_task_failed updates the metadata of the task when the task prefetch failed.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn prefetch_task_failed(&self, id: &str) -> ClientResult<metadata::Task> { pub async fn prefetch_task_failed(&self, id: &str) -> ClientResult<metadata::Task> {
self.storage.prefetch_task_failed(id).await self.storage.prefetch_task_failed(id).await
} }
// hard_link_or_copy hard links or copies the task content to the destination. /// hard_link_or_copy hard links or copies the task content to the destination.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn hard_link_or_copy( pub async fn hard_link_or_copy(
&self, &self,
@ -243,7 +243,7 @@ impl Task {
self.storage.hard_link_or_copy_task(task, to, range).await self.storage.hard_link_or_copy_task(task, to, range).await
} }
// download downloads a task. /// download downloads a task.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn download( pub async fn download(
@ -462,7 +462,7 @@ impl Task {
Ok(()) Ok(())
} }
// download_partial_with_scheduler downloads a partial task with scheduler. /// download_partial_with_scheduler downloads a partial task with scheduler.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(skip_all)] #[instrument(skip_all)]
async fn download_partial_with_scheduler( async fn download_partial_with_scheduler(
@ -894,7 +894,7 @@ impl Task {
Ok(finished_pieces) Ok(finished_pieces)
} }
// download_partial_with_scheduler_from_remote_peer downloads a partial task with scheduler from a remote peer. /// download_partial_with_scheduler_from_remote_peer downloads a partial task with scheduler from a remote peer.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(skip_all)] #[instrument(skip_all)]
async fn download_partial_with_scheduler_from_remote_peer( async fn download_partial_with_scheduler_from_remote_peer(
@ -1160,7 +1160,7 @@ impl Task {
Ok(finished_pieces) Ok(finished_pieces)
} }
// download_partial_with_scheduler_from_source downloads a partial task with scheduler from the source. /// download_partial_with_scheduler_from_source downloads a partial task with scheduler from the source.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(skip_all)] #[instrument(skip_all)]
async fn download_partial_with_scheduler_from_source( async fn download_partial_with_scheduler_from_source(
@ -1407,7 +1407,7 @@ impl Task {
Ok(finished_pieces) Ok(finished_pieces)
} }
// download_partial_from_local_peer downloads a partial task from a local peer. /// download_partial_from_local_peer downloads a partial task from a local peer.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(skip_all)] #[instrument(skip_all)]
async fn download_partial_from_local_peer( async fn download_partial_from_local_peer(
@ -1500,7 +1500,7 @@ impl Task {
Ok(finished_pieces) Ok(finished_pieces)
} }
// download_partial_from_source downloads a partial task from the source. /// download_partial_from_source downloads a partial task from the source.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
#[instrument(skip_all)] #[instrument(skip_all)]
async fn download_partial_from_source( async fn download_partial_from_source(
@ -1663,7 +1663,7 @@ impl Task {
)) ))
} }
// stat_task returns the task metadata. /// stat_task returns the task metadata.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn stat(&self, task_id: &str, host_id: &str) -> ClientResult<CommonTask> { pub async fn stat(&self, task_id: &str, host_id: &str) -> ClientResult<CommonTask> {
let task = self let task = self
@ -1681,7 +1681,7 @@ impl Task {
Ok(task) Ok(task)
} }
// Delete a task and reclaim local storage. /// Delete a task and reclaim local storage.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn delete(&self, task_id: &str, host_id: &str) -> ClientResult<()> { pub async fn delete(&self, task_id: &str, host_id: &str) -> ClientResult<()> {
let task = self.storage.get_task(task_id).map_err(|err| { let task = self.storage.get_task(task_id).map_err(|err| {

View File

@ -18,22 +18,22 @@ use tokio::signal::unix::{signal, SignalKind};
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tracing::info; use tracing::info;
// Shutdown is a signal to shutdown. /// Shutdown is a signal to shutdown.
#[derive(Debug)] #[derive(Debug)]
pub struct Shutdown { pub struct Shutdown {
// is_shutdown is true if the shutdown signal has been received. /// is_shutdown is true if the shutdown signal has been received.
is_shutdown: bool, is_shutdown: bool,
// sender is used to send the shutdown signal. /// sender is used to send the shutdown signal.
sender: broadcast::Sender<()>, sender: broadcast::Sender<()>,
// receiver is used to receive the shutdown signal. /// receiver is used to receive the shutdown signal.
receiver: broadcast::Receiver<()>, receiver: broadcast::Receiver<()>,
} }
// Shutdown implements the shutdown signal. /// Shutdown implements the shutdown signal.
impl Shutdown { impl Shutdown {
// new creates a new Shutdown. /// new creates a new Shutdown.
pub fn new() -> Shutdown { pub fn new() -> Shutdown {
let (sender, receiver) = broadcast::channel(1); let (sender, receiver) = broadcast::channel(1);
Self { Self {
@ -43,17 +43,17 @@ impl Shutdown {
} }
} }
// is_shutdown returns true if the shutdown signal has been received. /// is_shutdown returns true if the shutdown signal has been received.
pub fn is_shutdown(&self) -> bool { pub fn is_shutdown(&self) -> bool {
self.is_shutdown self.is_shutdown
} }
// trigger triggers the shutdown signal. /// trigger triggers the shutdown signal.
pub fn trigger(&self) { pub fn trigger(&self) {
let _ = self.sender.send(()); let _ = self.sender.send(());
} }
// recv waits for the shutdown signal. /// recv waits for the shutdown signal.
pub async fn recv(&mut self) { pub async fn recv(&mut self) {
// Return immediately if the shutdown signal has already been received. // Return immediately if the shutdown signal has already been received.
if self.is_shutdown { if self.is_shutdown {
@ -76,9 +76,9 @@ impl Default for Shutdown {
} }
} }
// Clone implements the Clone trait. /// Clone implements the Clone trait.
impl Clone for Shutdown { impl Clone for Shutdown {
// clone returns a new Shutdown. /// clone returns a new Shutdown.
fn clone(&self) -> Self { fn clone(&self) -> Self {
let sender = self.sender.clone(); let sender = self.sender.clone();
let receiver = self.sender.subscribe(); let receiver = self.sender.subscribe();
@ -90,8 +90,8 @@ impl Clone for Shutdown {
} }
} }
// shutdown_signal returns a future that will resolve when a SIGINT, SIGTERM or SIGQUIT signal is /// shutdown_signal returns a future that will resolve when a SIGINT, SIGTERM or SIGQUIT signal is
// received by the process. /// received by the process.
pub async fn shutdown_signal() { pub async fn shutdown_signal() {
let mut sigint = signal(SignalKind::interrupt()).unwrap(); let mut sigint = signal(SignalKind::interrupt()).unwrap();
let mut sigterm = signal(SignalKind::terminate()).unwrap(); let mut sigterm = signal(SignalKind::terminate()).unwrap();

View File

@ -24,24 +24,24 @@ use tokio::sync::mpsc;
use tracing::{error, info, instrument}; use tracing::{error, info, instrument};
use warp::{Filter, Rejection, Reply}; use warp::{Filter, Rejection, Reply};
// DEFAULT_PROFILER_SECONDS is the default seconds to start profiling. /// DEFAULT_PROFILER_SECONDS is the default seconds to start profiling.
const DEFAULT_PROFILER_SECONDS: u64 = 10; const DEFAULT_PROFILER_SECONDS: u64 = 10;
// DEFAULT_PROFILER_FREQUENCY is the default frequency to start profiling. /// DEFAULT_PROFILER_FREQUENCY is the default frequency to start profiling.
const DEFAULT_PROFILER_FREQUENCY: i32 = 1000; const DEFAULT_PROFILER_FREQUENCY: i32 = 1000;
// PProfProfileQueryParams is the query params to start profiling. /// PProfProfileQueryParams is the query params to start profiling.
#[derive(Deserialize, Serialize)] #[derive(Deserialize, Serialize)]
#[serde(default)] #[serde(default)]
pub struct PProfProfileQueryParams { pub struct PProfProfileQueryParams {
// seconds is the seconds to start profiling. /// seconds is the seconds to start profiling.
pub seconds: u64, pub seconds: u64,
// frequency is the frequency to start profiling. /// frequency is the frequency to start profiling.
pub frequency: i32, pub frequency: i32,
} }
// PProfProfileQueryParams implements the default. /// PProfProfileQueryParams implements the default.
impl Default for PProfProfileQueryParams { impl Default for PProfProfileQueryParams {
fn default() -> Self { fn default() -> Self {
Self { Self {
@ -51,22 +51,22 @@ impl Default for PProfProfileQueryParams {
} }
} }
// Stats is the stats server. /// Stats is the stats server.
#[derive(Debug)] #[derive(Debug)]
pub struct Stats { pub struct Stats {
// addr is the address of the stats server. /// addr is the address of the stats server.
addr: SocketAddr, addr: SocketAddr,
// shutdown is used to shutdown the stats server. /// shutdown is used to shutdown the stats server.
shutdown: shutdown::Shutdown, shutdown: shutdown::Shutdown,
// _shutdown_complete is used to notify the stats server is shutdown. /// _shutdown_complete is used to notify the stats server is shutdown.
_shutdown_complete: mpsc::UnboundedSender<()>, _shutdown_complete: mpsc::UnboundedSender<()>,
} }
// Stats implements the stats server. /// Stats implements the stats server.
impl Stats { impl Stats {
// new creates a new Stats. /// new creates a new Stats.
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn new( pub fn new(
addr: SocketAddr, addr: SocketAddr,
@ -80,7 +80,7 @@ impl Stats {
} }
} }
// run starts the stats server. /// run starts the stats server.
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn run(&self) { pub async fn run(&self) {
// Clone the shutdown channel. // Clone the shutdown channel.
@ -114,7 +114,7 @@ impl Stats {
} }
} }
// stats_handler handles the stats request. /// stats_handler handles the stats request.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn pprof_profile_handler( async fn pprof_profile_handler(
query_params: PProfProfileQueryParams, query_params: PProfProfileQueryParams,
@ -149,7 +149,7 @@ impl Stats {
Ok(body) Ok(body)
} }
// pprof_heap_handler handles the pprof heap request. /// pprof_heap_handler handles the pprof heap request.
#[instrument(skip_all)] #[instrument(skip_all)]
async fn pprof_heap_handler() -> Result<impl Reply, Rejection> { async fn pprof_heap_handler() -> Result<impl Reply, Rejection> {
info!("start heap profiling"); info!("start heap profiling");

View File

@ -31,7 +31,7 @@ use tracing_subscriber::{
EnvFilter, Registry, EnvFilter, Registry,
}; };
// init_tracing initializes the tracing system. /// init_tracing initializes the tracing system.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn init_tracing( pub fn init_tracing(
name: &str, name: &str,
@ -141,7 +141,7 @@ pub fn init_tracing(
guards guards
} }
// redirect_stderr_to_file redirects stderr to a file. /// redirect_stderr_to_file redirects stderr to a file.
fn redirect_stderr_to_file(log_dir: PathBuf) { fn redirect_stderr_to_file(log_dir: PathBuf) {
let log_path = log_dir.join("stderr.log"); let log_path = log_dir.join("stderr.log");
let file = OpenOptions::new() let file = OpenOptions::new()