api/proto/common.proto

505 lines
17 KiB
Protocol Buffer

/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package common.v2;
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
// SizeScope represents size scope of task.
enum SizeScope {
// size > one piece size.
NORMAL = 0;
// 128 byte < size <= one piece size and be plain type.
SMALL = 1;
// size <= 128 byte and be plain type.
TINY = 2;
// size == 0 byte and be plain type.
EMPTY = 3;
}
// TaskType represents type of task.
enum TaskType {
// STANDARD is standard type of task, it can download from source, remote peer and
// local peer(local cache). When the standard task is never downloaded in the
// P2P cluster, dfdaemon will download the task from the source. When the standard
// task is downloaded in the P2P cluster, dfdaemon will download the task from
// the remote peer or local peer(local cache).
STANDARD = 0;
// PERSISTENT is persistent type of task, it can import file and export file in P2P cluster.
// When the persistent task is imported into the P2P cluster, dfdaemon will store
// the task in the peer's disk and copy multiple replicas to remote peers to
// prevent data loss.
PERSISTENT = 1;
// PERSISTENT_CACHE is persistent cache type of task, it can import file and export file in P2P cluster.
// When the persistent cache task is imported into the P2P cluster, dfdaemon will store
// the task in the peer's disk and copy multiple replicas to remote peers to prevent data loss.
// When the expiration time is reached, task will be deleted in the P2P cluster.
PERSISTENT_CACHE = 2;
}
// TrafficType represents type of traffic.
enum TrafficType {
// BACK_TO_SOURCE is to download traffic from the source.
BACK_TO_SOURCE = 0;
// REMOTE_PEER is to download traffic from the remote peer.
REMOTE_PEER = 1;
// LOCAL_PEER is to download traffic from the local peer.
LOCAL_PEER = 2;
}
// Priority represents priority of application.
enum Priority {
// LEVEL0 has no special meaning for scheduler.
LEVEL0 = 0;
// LEVEL1 represents the download task is forbidden,
// and an error code is returned during the registration.
LEVEL1 = 1;
// LEVEL2 represents when the task is downloaded for the first time,
// allow peers to download from the other peers,
// but not back-to-source. When the task is not downloaded for
// the first time, it is scheduled normally.
LEVEL2 = 2;
// LEVEL3 represents when the task is downloaded for the first time,
// the normal peer is first to download back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL3 = 3;
// LEVEL4 represents when the task is downloaded for the first time,
// the weak peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL4 = 4;
// LEVEL5 represents when the task is downloaded for the first time,
// the strong peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL5 = 5;
// LEVEL6 represents when the task is downloaded for the first time,
// the super peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL6 = 6;
}
// Peer metadata.
message Peer {
// Peer id.
string id = 1;
// Range is url range of request.
optional Range range = 2;
// Peer priority.
Priority priority = 3;
// Pieces of peer.
repeated Piece pieces = 4;
// Peer downloads costs time.
google.protobuf.Duration cost = 5;
// Peer state.
string state = 6;
// Task info.
Task task = 7;
// Host info.
Host host = 8;
// NeedBackToSource needs downloaded from source.
bool need_back_to_source = 9;
// Peer create time.
google.protobuf.Timestamp created_at = 10;
// Peer update time.
google.protobuf.Timestamp updated_at = 11;
}
// PersistentCachePeer metadata.
message PersistentCachePeer {
// Peer id.
string id = 1;
// Persistent represents whether the persistent cache peer is persistent.
// If the persistent cache peer is persistent, the persistent cache peer will
// not be deleted when dfdaemon runs garbage collection. It only be deleted
// when the task is deleted by the user.
bool persistent = 2;
// Peer downloads costs time.
google.protobuf.Duration cost = 3;
// Peer state.
string state = 4;
// Persistent task info.
PersistentCacheTask task = 5;
// Host info.
Host host = 6;
// Peer create time.
google.protobuf.Timestamp created_at = 7;
// Peer update time.
google.protobuf.Timestamp updated_at = 8;
}
// Task metadata.
message Task {
// Task id.
string id = 1;
// Task type.
TaskType type = 2;
// Download url.
string url = 3;
// Digest of the task digest, for example blake3:xxx or sha256:yyy.
optional string digest = 4;
// URL tag identifies different task for same url.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Filtered query params to generate the task id.
// When filter is ["Signature", "Expires", "ns"], for example:
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
// will generate the same task id.
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
repeated string filtered_query_params = 7;
// Task request headers.
map<string, string> request_header = 8;
// Task content length.
uint64 content_length = 9;
// Task piece count.
uint32 piece_count = 10;
// Task size scope.
SizeScope size_scope = 11;
// Pieces of task.
repeated Piece pieces = 12;
// Task state.
string state = 13;
// Task peer count.
uint32 peer_count = 14;
// Task contains available peer.
bool has_available_peer = 15;
// Task create time.
google.protobuf.Timestamp created_at = 16;
// Task update time.
google.protobuf.Timestamp updated_at = 17;
}
// PersistentCacheTask metadata.
message PersistentCacheTask {
// Task id.
string id = 1;
// Replica count of the persistent cache task. The persistent cache task will
// not be deleted when dfdamon runs garbage collection. It only be deleted
// when the task is deleted by the user.
uint64 persistent_replica_count = 2;
// Current replica count of the persistent cache task. The persistent cache task
// will not be deleted when dfdaemon runs garbage collection. It only be deleted
// when the task is deleted by the user.
uint64 current_persistent_replica_count = 3;
// Current replica count of the cache task. If cache task is not persistent,
// the persistent cache task will be deleted when dfdaemon runs garbage collection.
uint64 current_replica_count = 4;
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Task piece length.
uint64 piece_length = 7;
// Task content length.
uint64 content_length = 8;
// Task piece count.
uint32 piece_count = 9;
// Task state.
string state = 10;
// TTL of the persistent cache task.
google.protobuf.Duration ttl = 11;
// Task create time.
google.protobuf.Timestamp created_at = 12;
// Task update time.
google.protobuf.Timestamp updated_at = 13;
}
// Host metadata.
message Host {
// Host id.
string id = 1;
// Host type.
uint32 type = 2;
// Hostname.
string hostname = 3;
// Host ip.
string ip = 4;
// Port of grpc service.
int32 port = 5;
// Port of download server.
int32 download_port = 6;
// Host OS.
string os = 7;
// Host platform.
string platform = 8;
// Host platform family.
string platform_family = 9;
// Host platform version.
string platform_version = 10;
// Host kernel version.
string kernel_version = 11;
// CPU Stat.
optional CPU cpu = 12;
// Memory Stat.
optional Memory memory = 13;
// Network Stat.
optional Network network = 14;
// Disk Stat.
optional Disk disk = 15;
// Build information.
optional Build build = 16;
// ID of the cluster to which the host belongs.
uint64 scheduler_cluster_id = 17;
// Disable shared data for other peers.
bool disable_shared = 18;
}
// CPU Stat.
message CPU {
// Number of logical cores in the system.
uint32 logical_count = 1;
// Number of physical cores in the system
uint32 physical_count = 2;
// Percent calculates the percentage of cpu used.
double percent = 3;
// Calculates the percentage of cpu used by process.
double process_percent = 4;
// CPUTimes contains the amounts of time the CPU has spent performing different kinds of work.
optional CPUTimes times = 5;
}
// CPUTimes contains the amounts of time the CPU has spent performing different
// kinds of work. Time units are in seconds.
message CPUTimes {
// CPU time of user.
double user = 1;
// CPU time of system.
double system = 2;
// CPU time of idle.
double idle = 3;
// CPU time of nice.
double nice = 4;
// CPU time of iowait.
double iowait = 5;
// CPU time of irq.
double irq = 6;
// CPU time of softirq.
double softirq = 7;
// CPU time of steal.
double steal = 8;
// CPU time of guest.
double guest = 9;
// CPU time of guest nice.
double guest_nice = 10;
}
// Memory Stat.
message Memory {
// Total amount of RAM on this system.
uint64 total = 1;
// RAM available for programs to allocate.
uint64 available = 2;
// RAM used by programs.
uint64 used = 3;
// Percentage of RAM used by programs.
double used_percent = 4;
// Calculates the percentage of memory used by process.
double process_used_percent = 5;
// This is the kernel's notion of free memory.
uint64 free = 6;
}
// Network Stat.
message Network {
// Return count of tcp connections opened and status is ESTABLISHED.
uint32 tcp_connection_count = 1;
// Return count of upload tcp connections opened and status is ESTABLISHED.
uint32 upload_tcp_connection_count = 2;
// Location path(area|country|province|city|...).
optional string location = 3;
// IDC where the peer host is located
optional string idc = 4;
// Download rate is received bytes per second.
uint64 download_rate = 5;
// Download rate is the limit of received bytes per second.
uint64 download_rate_limit = 6;
// Upload rate is transmitted bytes per second.
uint64 upload_rate = 7;
// Upload rate is the limit of transmitted bytes per second.
uint64 upload_rate_limit = 8;
}
// Disk Stat.
message Disk {
// Total amount of disk on the data path of dragonfly.
uint64 total = 1;
// Free amount of disk on the data path of dragonfly.
uint64 free = 2;
// Used amount of disk on the data path of dragonfly.
uint64 used = 3;
// Used percent of disk on the data path of dragonfly directory.
double used_percent = 4;
// Total amount of indoes on the data path of dragonfly directory.
uint64 inodes_total = 5;
// Used amount of indoes on the data path of dragonfly directory.
uint64 inodes_used = 6;
// Free amount of indoes on the data path of dragonfly directory.
uint64 inodes_free = 7;
// Used percent of indoes on the data path of dragonfly directory.
double inodes_used_percent = 8;
// Disk read bandwidth, in bytes per second.
uint64 read_bandwidth = 9;
// Disk write bandwidth, in bytes per second.
uint64 write_bandwidth = 10;
}
// Build information.
message Build {
// Git version.
string git_version = 1;
// Git commit.
optional string git_commit = 2;
// Golang version.
optional string go_version = 3;
// Rust version.
optional string rust_version = 4;
// Build platform.
optional string platform = 5;
}
// Download information.
message Download {
// Download url.
string url = 1;
// Digest of the task digest, for example :xxx or sha256:yyy.
optional string digest = 2;
// Range is url range of request. If protocol is http, range
// will set in request header. If protocol is others, range
// will set in range field.
optional Range range = 3;
// Task type.
TaskType type = 4;
// URL tag identifies different task for same url.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Peer priority.
Priority priority = 7;
// Filtered query params to generate the task id.
// When filter is ["Signature", "Expires", "ns"], for example:
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
// will generate the same task id.
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
repeated string filtered_query_params = 8;
// Task request headers.
map<string, string> request_header = 9;
// Task piece length.
optional uint64 piece_length = 10;
// File path to be downloaded. If output_path is set, the downloaded file will be saved to the specified path.
// Dfdaemon will try to create hard link to the output path before starting the download. If hard link creation fails,
// it will copy the file to the output path after the download is completed.
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
optional string output_path = 11;
// Download timeout.
optional google.protobuf.Duration timeout = 12;
// Dfdaemon cannot download the task from the source if disable_back_to_source is true.
bool disable_back_to_source = 13;
// Scheduler needs to schedule the task downloads from the source if need_back_to_source is true.
bool need_back_to_source = 14;
// certificate_chain is the client certs with DER format for the backend client to download back-to-source.
repeated bytes certificate_chain = 15;
// Prefetch pre-downloads all pieces of the task when the download task request is a range request.
bool prefetch = 16;
// Object storage protocol information.
optional ObjectStorage object_storage = 17;
// HDFS protocol information.
optional HDFS hdfs = 18;
// is_prefetch is the flag to indicate whether the request is a prefetch request.
bool is_prefetch = 19;
// need_piece_content is the flag to indicate whether the response needs to return piece content.
bool need_piece_content = 20;
// load_to_cache indicates whether the content downloaded will be stored in the cache storage.
// Cache storage is designed to store downloaded piece content from preheat tasks,
// allowing other peers to access the content from memory instead of disk.
bool load_to_cache = 21;
// force_hard_link is the flag to indicate whether the download file must be hard linked to the output path.
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
bool force_hard_link = 22;
// content_for_calculating_task_id is the content used to calculate the task id.
// If content_for_calculating_task_id is set, use its value to calculate the task ID.
// Otherwise, calculate the task ID based on url, piece_length, tag, application, and filtered_query_params.
optional string content_for_calculating_task_id = 23;
}
// Object Storage related information.
message ObjectStorage {
// Region is the region of the object storage service.
optional string region = 1;
// Endpoint is the endpoint of the object storage service.
optional string endpoint = 2;
// Access key that used to access the object storage service.
optional string access_key_id = 3;
// Access secret that used to access the object storage service.
optional string access_key_secret = 4;
// Session token that used to access s3 storage service.
optional string session_token = 5;
// Local path to credential file for Google Cloud Storage service OAuth2 authentication.
optional string credential_path = 6;
// Predefined ACL that used for the Google Cloud Storage service.
optional string predefined_acl = 7;
}
// HDFS related information.
message HDFS {
// Delegation token for Web HDFS operator.
optional string delegation_token = 1;
}
// Range represents download range.
message Range {
// Start of range.
uint64 start = 1;
// Length of range.
uint64 length = 2;
}
// Piece represents information of piece.
message Piece {
// Piece number.
uint32 number = 1;
// Parent peer id.
optional string parent_id = 2;
// Piece offset.
uint64 offset = 3;
// Piece length.
uint64 length = 4;
// Digest of the piece data, for example blake3:xxx or sha256:yyy.
string digest = 5;
// Piece content.
optional bytes content = 6;
// Traffic type.
optional TrafficType traffic_type = 7;
// Downloading piece costs time.
google.protobuf.Duration cost = 8;
// Piece create time.
google.protobuf.Timestamp created_at = 9;
}