mirror of https://github.com/dragonflyoss/api.git
611 lines
26 KiB
Protocol Buffer
611 lines
26 KiB
Protocol Buffer
/*
|
|
* Copyright 2022 The Dragonfly Authors
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
syntax = "proto3";
|
|
|
|
package common.v2;
|
|
|
|
import "validate/validate.proto";
|
|
import "google/protobuf/duration.proto";
|
|
import "google/protobuf/timestamp.proto";
|
|
|
|
option go_package = "d7y.io/api/v2/pkg/apis/common/v2;common";
|
|
|
|
// SizeScope represents size scope of task.
|
|
enum SizeScope {
|
|
// NORMAL task has pieces is more than one piece.
|
|
NORMAL = 0;
|
|
|
|
// SMALL task's content length is more than 128 byte and has only one piece.
|
|
SMALL = 1;
|
|
|
|
// TINY task's content length is less than 128 byte.
|
|
TINY = 2;
|
|
|
|
// EMPTY task's content length is equal to zero.
|
|
EMPTY = 3;
|
|
|
|
// UNKNOW task has invalid size scope.
|
|
UNKNOW = 4;
|
|
}
|
|
|
|
// TaskType represents type of task.
|
|
enum TaskType {
|
|
// STANDARD is standard type of task, it can download from source, remote peer and
|
|
// local peer(local cache). When the standard task is never downloaded in the
|
|
// P2P cluster, dfdaemon will download the task from the source. When the standard
|
|
// task is downloaded in the P2P cluster, dfdaemon will download the task from
|
|
// the remote peer or local peer(local cache), where peers use disk storage to store tasks.
|
|
STANDARD = 0;
|
|
|
|
// PERSISTENT is persistent type of task, it can import file and export file in P2P cluster.
|
|
// When the persistent task is imported into the P2P cluster, dfdaemon will store
|
|
// the task in the peer's disk and copy multiple replicas to remote peers to
|
|
// prevent data loss.
|
|
PERSISTENT = 1;
|
|
|
|
// PERSISTENT_CACHE is persistent cache type of task, it can import file and export file in P2P cluster.
|
|
// When the persistent cache task is imported into the P2P cluster, dfdaemon will store
|
|
// the task in the peer's disk and copy multiple replicas to remote peers to prevent data loss.
|
|
// When the expiration time is reached, task will be deleted in the P2P cluster.
|
|
PERSISTENT_CACHE = 2;
|
|
|
|
// CACHE is cache type of task, it can download from source, remote peer and
|
|
// local peer(local cache). When the cache task is never downloaded in the
|
|
// P2P cluster, dfdaemon will download the cache task from the source. When the cache
|
|
// task is downloaded in the P2P cluster, dfdaemon will download the cache task from
|
|
// the remote peer or local peer(local cache), where peers use memory storage to store cache tasks.
|
|
CACHE = 3;
|
|
}
|
|
|
|
// TrafficType represents type of traffic.
|
|
enum TrafficType {
|
|
// BACK_TO_SOURCE is to download traffic from the source.
|
|
BACK_TO_SOURCE = 0;
|
|
|
|
// REMOTE_PEER is to download traffic from the remote peer.
|
|
REMOTE_PEER = 1;
|
|
|
|
// LOCAL_PEER is to download traffic from the local peer.
|
|
LOCAL_PEER = 2;
|
|
}
|
|
|
|
// Priority represents priority of application.
|
|
enum Priority {
|
|
// LEVEL0 has no special meaning for scheduler.
|
|
LEVEL0 = 0;
|
|
|
|
// LEVEL1 represents the download task is forbidden,
|
|
// and an error code is returned during the registration.
|
|
LEVEL1 = 1;
|
|
|
|
// LEVEL2 represents when the task is downloaded for the first time,
|
|
// allow peers to download from the other peers,
|
|
// but not back-to-source. When the task is not downloaded for
|
|
// the first time, it is scheduled normally.
|
|
LEVEL2 = 2;
|
|
|
|
// LEVEL3 represents when the task is downloaded for the first time,
|
|
// the normal peer is first to download back-to-source.
|
|
// When the task is not downloaded for the first time, it is scheduled normally.
|
|
LEVEL3 = 3;
|
|
|
|
// LEVEL4 represents when the task is downloaded for the first time,
|
|
// the weak peer is first triggered to back-to-source.
|
|
// When the task is not downloaded for the first time, it is scheduled normally.
|
|
LEVEL4 = 4;
|
|
|
|
// LEVEL5 represents when the task is downloaded for the first time,
|
|
// the strong peer is first triggered to back-to-source.
|
|
// When the task is not downloaded for the first time, it is scheduled normally.
|
|
LEVEL5 = 5;
|
|
|
|
// LEVEL6 represents when the task is downloaded for the first time,
|
|
// the super peer is first triggered to back-to-source.
|
|
// When the task is not downloaded for the first time, it is scheduled normally.
|
|
LEVEL6 = 6;
|
|
}
|
|
|
|
// Peer metadata.
|
|
message Peer {
|
|
// Peer id.
|
|
string id = 1 [(validate.rules).string.min_len = 1];
|
|
// Range is url range of request.
|
|
optional Range range = 2;
|
|
// Peer priority.
|
|
Priority priority = 3 [(validate.rules).enum.defined_only = true];
|
|
// Pieces of peer.
|
|
repeated Piece pieces = 4 [(validate.rules).repeated = {min_items: 1, ignore_empty: true}];
|
|
// Peer downloads costs time.
|
|
google.protobuf.Duration cost = 5 [(validate.rules).duration.required = true];
|
|
// Peer state.
|
|
string state = 6 [(validate.rules).string.min_len = 1];
|
|
// Task info.
|
|
Task task = 7 [(validate.rules).message.required = true];
|
|
// Host info.
|
|
Host host = 8 [(validate.rules).message.required = true];
|
|
// NeedBackToSource needs downloaded from source.
|
|
bool need_back_to_source = 9;
|
|
// Peer create time.
|
|
google.protobuf.Timestamp created_at = 10 [(validate.rules).timestamp.required = true];
|
|
// Peer update time.
|
|
google.protobuf.Timestamp updated_at = 11 [(validate.rules).timestamp.required = true];
|
|
}
|
|
|
|
// CachePeer metadata.
|
|
message CachePeer {
|
|
// Peer id.
|
|
string id = 1 [(validate.rules).string.min_len = 1];
|
|
// Range is url range of request.
|
|
optional Range range = 2;
|
|
// Peer priority.
|
|
Priority priority = 3 [(validate.rules).enum.defined_only = true];
|
|
// Pieces of peer.
|
|
repeated Piece pieces = 4 [(validate.rules).repeated = {min_items: 1, ignore_empty: true}];
|
|
// Peer downloads costs time.
|
|
google.protobuf.Duration cost = 5 [(validate.rules).duration.required = true];
|
|
// Peer state.
|
|
string state = 6 [(validate.rules).string.min_len = 1];
|
|
// CacheTask info.
|
|
CacheTask task = 7 [(validate.rules).message.required = true];
|
|
// Host info.
|
|
Host host = 8 [(validate.rules).message.required = true];
|
|
// NeedBackToSource needs downloaded from source.
|
|
bool need_back_to_source = 9;
|
|
// Peer create time.
|
|
google.protobuf.Timestamp created_at = 10 [(validate.rules).timestamp.required = true];
|
|
// Peer update time.
|
|
google.protobuf.Timestamp updated_at = 11 [(validate.rules).timestamp.required = true];
|
|
}
|
|
|
|
// PersistentCachePeer metadata.
|
|
message PersistentCachePeer {
|
|
// Peer id.
|
|
string id = 1 [(validate.rules).string.min_len = 1];
|
|
// Persistent represents whether the persistent cache peer is persistent.
|
|
// If the persistent cache peer is persistent, the persistent cache peer will
|
|
// not be deleted when dfdaemon runs garbage collection. It only be deleted
|
|
// when the task is deleted by the user.
|
|
bool persistent = 2;
|
|
// Peer downloads costs time.
|
|
google.protobuf.Duration cost = 3 [(validate.rules).duration.required = true];
|
|
// Peer state.
|
|
string state = 4 [(validate.rules).string.min_len = 1];
|
|
// Task info.
|
|
PersistentCacheTask task = 5 [(validate.rules).message.required = true];
|
|
// Host info.
|
|
Host host = 6 [(validate.rules).message.required = true];
|
|
// Peer create time.
|
|
google.protobuf.Timestamp created_at = 7 [(validate.rules).timestamp.required = true];
|
|
// Peer update time.
|
|
google.protobuf.Timestamp updated_at = 8 [(validate.rules).timestamp.required = true];
|
|
}
|
|
|
|
// Task metadata.
|
|
message Task {
|
|
// Task id.
|
|
string id = 1 [(validate.rules).string.min_len = 1];
|
|
// Task type.
|
|
TaskType type = 2 [(validate.rules).enum.defined_only = true];
|
|
// Download url.
|
|
string url = 3 [(validate.rules).string.uri = true];
|
|
// Verifies task data integrity after download using a digest. Supports CRC32, SHA256, and SHA512 algorithms.
|
|
// Format: `<algorithm>:<hash>`, e.g., `crc32:xxx`, `sha256:yyy`, `sha512:zzz`.
|
|
// Returns an error if the computed digest mismatches the expected value.
|
|
//
|
|
// Performance
|
|
// Digest calculation increases processing time. Enable only when data integrity verification is critical.
|
|
optional string digest = 4 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
|
|
// URL tag identifies different task for same url.
|
|
optional string tag = 5;
|
|
// Application of task.
|
|
optional string application = 6;
|
|
// Filtered query params to generate the task id.
|
|
// When filter is ["Signature", "Expires", "ns"], for example:
|
|
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
|
|
// will generate the same task id.
|
|
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
|
|
repeated string filtered_query_params = 7;
|
|
// Task request headers.
|
|
map<string, string> request_header = 8;
|
|
// Task content length.
|
|
uint64 content_length = 9;
|
|
// Task piece count.
|
|
uint32 piece_count = 10;
|
|
// Task size scope.
|
|
SizeScope size_scope = 11;
|
|
// Pieces of task.
|
|
repeated Piece pieces = 12 [(validate.rules).repeated = {min_items: 1, ignore_empty: true}];
|
|
// Task state.
|
|
string state = 13 [(validate.rules).string.min_len = 1];
|
|
// Task peer count.
|
|
uint32 peer_count = 14;
|
|
// Task contains available peer.
|
|
bool has_available_peer = 15;
|
|
// Task create time.
|
|
google.protobuf.Timestamp created_at = 16 [(validate.rules).timestamp.required = true];
|
|
// Task update time.
|
|
google.protobuf.Timestamp updated_at = 17 [(validate.rules).timestamp.required = true];
|
|
}
|
|
|
|
// CacheTask metadata.
|
|
message CacheTask {
|
|
// Task id.
|
|
string id = 1 [(validate.rules).string.min_len = 1];
|
|
// Task type.
|
|
TaskType type = 2 [(validate.rules).enum.defined_only = true];
|
|
// Download url.
|
|
string url = 3 [(validate.rules).string.uri = true];
|
|
// Verifies task data integrity after download using a digest. Supports CRC32, SHA256, and SHA512 algorithms.
|
|
// Format: `<algorithm>:<hash>`, e.g., `crc32:xxx`, `sha256:yyy`, `sha512:zzz`.
|
|
// Returns an error if the computed digest mismatches the expected value.
|
|
//
|
|
// Performance
|
|
// Digest calculation increases processing time. Enable only when data integrity verification is critical.
|
|
optional string digest = 4 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
|
|
// URL tag identifies different task for same url.
|
|
optional string tag = 5;
|
|
// Application of task.
|
|
optional string application = 6;
|
|
// Filtered query params to generate the task id.
|
|
// When filter is ["Signature", "Expires", "ns"], for example:
|
|
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
|
|
// will generate the same task id.
|
|
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
|
|
repeated string filtered_query_params = 7;
|
|
// Task request headers.
|
|
map<string, string> request_header = 8;
|
|
// Task content length.
|
|
uint64 content_length = 9;
|
|
// Task piece count.
|
|
uint32 piece_count = 10;
|
|
// Task size scope.
|
|
SizeScope size_scope = 11;
|
|
// Pieces of task.
|
|
repeated Piece pieces = 12 [(validate.rules).repeated = {min_items: 1, ignore_empty: true}];
|
|
// Task state.
|
|
string state = 13 [(validate.rules).string.min_len = 1];
|
|
// Task peer count.
|
|
uint32 peer_count = 14;
|
|
// Task contains available peer.
|
|
bool has_available_peer = 15;
|
|
// Task create time.
|
|
google.protobuf.Timestamp created_at = 16 [(validate.rules).timestamp.required = true];
|
|
// Task update time.
|
|
google.protobuf.Timestamp updated_at = 17 [(validate.rules).timestamp.required = true];
|
|
}
|
|
|
|
// PersistentCacheTask metadata.
|
|
message PersistentCacheTask {
|
|
// Task id.
|
|
string id = 1 [(validate.rules).string.min_len = 1];
|
|
// Replica count of the persistent cache task. The persistent cache task will
|
|
// not be deleted when dfdamon runs garbage collection. It only be deleted
|
|
// when the task is deleted by the user.
|
|
uint64 persistent_replica_count = 2 [(validate.rules).uint64.gte = 1];
|
|
// Current replica count of the persistent cache task. The persistent cache task
|
|
// will not be deleted when dfdaemon runs garbage collection. It only be deleted
|
|
// when the task is deleted by the user.
|
|
uint64 current_persistent_replica_count = 3;
|
|
// Current replica count of the cache task. If cache task is not persistent,
|
|
// the persistent cache task will be deleted when dfdaemon runs garbage collection.
|
|
uint64 current_replica_count = 4;
|
|
// Tag is used to distinguish different persistent cache tasks.
|
|
optional string tag = 5;
|
|
// Application of task.
|
|
optional string application = 6;
|
|
// Task piece length.
|
|
uint64 piece_length = 7 [(validate.rules).uint64 = {gte: 4194304, lte: 67108864}];
|
|
// Task content length.
|
|
uint64 content_length = 8;
|
|
// Task piece count.
|
|
uint32 piece_count = 9;
|
|
// Task state.
|
|
string state = 10 [(validate.rules).string.min_len = 1];
|
|
// TTL of the persistent cache task.
|
|
google.protobuf.Duration ttl = 11 [(validate.rules).duration.required = true];
|
|
// Task create time.
|
|
google.protobuf.Timestamp created_at = 12 [(validate.rules).timestamp.required = true];
|
|
// Task update time.
|
|
google.protobuf.Timestamp updated_at = 13 [(validate.rules).timestamp.required = true];
|
|
}
|
|
|
|
// Host metadata.
|
|
message Host {
|
|
// Host id.
|
|
string id = 1 [(validate.rules).string.min_len = 1];
|
|
// Host type.
|
|
uint32 type = 2 [(validate.rules).uint32.lte = 3];
|
|
// Hostname.
|
|
string hostname = 3 [(validate.rules).string.min_len = 1];
|
|
// Host ip.
|
|
string ip = 4 [(validate.rules).string.ip = true];
|
|
// Port of grpc service.
|
|
int32 port = 5 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
|
// Port of download server.
|
|
int32 download_port = 6 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
|
|
// Host OS.
|
|
string os = 7;
|
|
// Host platform.
|
|
string platform = 8;
|
|
// Host platform family.
|
|
string platform_family = 9;
|
|
// Host platform version.
|
|
string platform_version = 10;
|
|
// Host kernel version.
|
|
string kernel_version = 11;
|
|
// CPU Stat.
|
|
optional CPU cpu = 12;
|
|
// Memory Stat.
|
|
optional Memory memory = 13;
|
|
// Network Stat.
|
|
optional Network network = 14;
|
|
// Disk Stat.
|
|
optional Disk disk = 15;
|
|
// Build information.
|
|
optional Build build = 16;
|
|
// ID of the cluster to which the host belongs.
|
|
uint64 scheduler_cluster_id = 17;
|
|
// Disable shared data for other peers.
|
|
bool disable_shared = 18;
|
|
}
|
|
|
|
// CPU Stat.
|
|
message CPU {
|
|
// Number of logical cores in the system.
|
|
uint32 logical_count = 1;
|
|
// Number of physical cores in the system
|
|
uint32 physical_count = 2;
|
|
// Percent calculates the percentage of cpu used.
|
|
double percent = 3 [(validate.rules).double.gte = 0];
|
|
// Calculates the percentage of cpu used by process.
|
|
double process_percent = 4 [(validate.rules).double.gte = 0];
|
|
// CPUTimes contains the amounts of time the CPU has spent performing different kinds of work.
|
|
optional CPUTimes times = 5;
|
|
}
|
|
|
|
// CPUTimes contains the amounts of time the CPU has spent performing different
|
|
// kinds of work. Time units are in seconds.
|
|
message CPUTimes {
|
|
// CPU time of user.
|
|
double user = 1 [(validate.rules).double.gte = 0];
|
|
// CPU time of system.
|
|
double system = 2 [(validate.rules).double.gte = 0];
|
|
// CPU time of idle.
|
|
double idle = 3 [(validate.rules).double.gte = 0];
|
|
// CPU time of nice.
|
|
double nice = 4 [(validate.rules).double.gte = 0];
|
|
// CPU time of iowait.
|
|
double iowait = 5 [(validate.rules).double.gte = 0];
|
|
// CPU time of irq.
|
|
double irq = 6 [(validate.rules).double.gte = 0];
|
|
// CPU time of softirq.
|
|
double softirq = 7 [(validate.rules).double.gte = 0];
|
|
// CPU time of steal.
|
|
double steal = 8 [(validate.rules).double.gte = 0];
|
|
// CPU time of guest.
|
|
double guest = 9 [(validate.rules).double.gte = 0];
|
|
// CPU time of guest nice.
|
|
double guest_nice = 10 [(validate.rules).double.gte = 0];
|
|
}
|
|
|
|
// Memory Stat.
|
|
message Memory {
|
|
// Total amount of RAM on this system.
|
|
uint64 total = 1;
|
|
// RAM available for programs to allocate.
|
|
uint64 available = 2;
|
|
// RAM used by programs.
|
|
uint64 used = 3;
|
|
// Percentage of RAM used by programs.
|
|
double used_percent = 4 [(validate.rules).double = {gte: 0, lte: 100}];
|
|
// Calculates the percentage of memory used by process.
|
|
double process_used_percent = 5 [(validate.rules).double = {gte: 0, lte: 100}];
|
|
// This is the kernel's notion of free memory.
|
|
uint64 free = 6;
|
|
}
|
|
|
|
// Network Stat.
|
|
message Network {
|
|
// Return count of tcp connections opened and status is ESTABLISHED.
|
|
uint32 tcp_connection_count = 1;
|
|
// Return count of upload tcp connections opened and status is ESTABLISHED.
|
|
uint32 upload_tcp_connection_count = 2;
|
|
// Location path(area|country|province|city|...).
|
|
optional string location = 3;
|
|
// IDC where the peer host is located
|
|
optional string idc = 4;
|
|
// Maximum bandwidth of the network interface, in bps (bits per second).
|
|
uint64 max_rx_bandwidth = 9;
|
|
// Receive bandwidth of the network interface, in bps (bits per second).
|
|
uint64 rx_bandwidth = 10;
|
|
// Maximum bandwidth of the network interface for transmission, in bps (bits per second).
|
|
uint64 max_tx_bandwidth = 11;
|
|
// Transmit bandwidth of the network interface, in bps (bits per second).
|
|
uint64 tx_bandwidth = 12;
|
|
|
|
reserved 5;
|
|
reserved "download_rate";
|
|
reserved 6;
|
|
reserved "download_rate_limit";
|
|
reserved 7;
|
|
reserved "upload_rate";
|
|
reserved 8;
|
|
reserved "upload_rate_limit";
|
|
}
|
|
|
|
// Disk Stat.
|
|
message Disk {
|
|
// Total amount of disk on the data path of dragonfly.
|
|
uint64 total = 1;
|
|
// Free amount of disk on the data path of dragonfly.
|
|
uint64 free = 2;
|
|
// Used amount of disk on the data path of dragonfly.
|
|
uint64 used = 3;
|
|
// Used percent of disk on the data path of dragonfly directory.
|
|
double used_percent = 4 [(validate.rules).double = {gte: 0, lte: 100}];
|
|
// Total amount of indoes on the data path of dragonfly directory.
|
|
uint64 inodes_total = 5;
|
|
// Used amount of indoes on the data path of dragonfly directory.
|
|
uint64 inodes_used = 6;
|
|
// Free amount of indoes on the data path of dragonfly directory.
|
|
uint64 inodes_free = 7;
|
|
// Used percent of indoes on the data path of dragonfly directory.
|
|
double inodes_used_percent = 8 [(validate.rules).double = {gte: 0, lte: 100}];
|
|
// Disk read bandwidth, in bytes per second.
|
|
uint64 read_bandwidth = 9;
|
|
// Disk write bandwidth, in bytes per second.
|
|
uint64 write_bandwidth = 10;
|
|
}
|
|
|
|
// Build information.
|
|
message Build {
|
|
// Git version.
|
|
string git_version = 1;
|
|
// Git commit.
|
|
optional string git_commit = 2;
|
|
// Golang version.
|
|
optional string go_version = 3;
|
|
// Rust version.
|
|
optional string rust_version = 4;
|
|
// Build platform.
|
|
optional string platform = 5;
|
|
}
|
|
|
|
// Download information.
|
|
message Download {
|
|
// Download url.
|
|
string url = 1 [(validate.rules).string.uri = true];
|
|
// Digest of the task digest, for example blake3:xxx or sha256:yyy.
|
|
optional string digest = 2 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
|
|
// Range is url range of request. If protocol is http, range
|
|
// will set in request header. If protocol is others, range
|
|
// will set in range field.
|
|
optional Range range = 3;
|
|
// Task type.
|
|
TaskType type = 4 [(validate.rules).enum.defined_only = true];
|
|
// URL tag identifies different task for same url.
|
|
optional string tag = 5;
|
|
// Application of task.
|
|
optional string application = 6;
|
|
// Peer priority.
|
|
Priority priority = 7 [(validate.rules).enum.defined_only = true];
|
|
// Filtered query params to generate the task id.
|
|
// When filter is ["Signature", "Expires", "ns"], for example:
|
|
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
|
|
// will generate the same task id.
|
|
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
|
|
repeated string filtered_query_params = 8;
|
|
// Task request headers.
|
|
map<string, string> request_header = 9;
|
|
// Piece Length is the piece length(bytes) for downloading file. The value needs to
|
|
// be greater than 4MiB (4,194,304 bytes) and less than 64MiB (67,108,864 bytes),
|
|
// for example: 4194304(4mib), 8388608(8mib). If the piece length is not specified,
|
|
// the piece length will be calculated according to the file size.
|
|
optional uint64 piece_length = 10 [(validate.rules).uint64 = {gte: 4194304, lte: 67108864, ignore_empty: true}];
|
|
// File path to be downloaded. If output_path is set, the downloaded file will be saved to the specified path.
|
|
// Dfdaemon will try to create hard link to the output path before starting the download. If hard link creation fails,
|
|
// it will copy the file to the output path after the download is completed.
|
|
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
|
|
optional string output_path = 11 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
|
|
// Download timeout.
|
|
optional google.protobuf.Duration timeout = 12;
|
|
// Dfdaemon cannot download the task from the source if disable_back_to_source is true.
|
|
bool disable_back_to_source = 13;
|
|
// Scheduler needs to schedule the task downloads from the source if need_back_to_source is true.
|
|
bool need_back_to_source = 14;
|
|
// certificate_chain is the client certs with DER format for the backend client to download back-to-source.
|
|
repeated bytes certificate_chain = 15;
|
|
// Prefetch pre-downloads all pieces of the task when the download task request is a range request.
|
|
bool prefetch = 16;
|
|
// Object storage protocol information.
|
|
optional ObjectStorage object_storage = 17;
|
|
// HDFS protocol information.
|
|
optional HDFS hdfs = 18;
|
|
// is_prefetch is the flag to indicate whether the request is a prefetch request.
|
|
bool is_prefetch = 19;
|
|
// need_piece_content is the flag to indicate whether the response needs to return piece content.
|
|
bool need_piece_content = 20;
|
|
// force_hard_link is the flag to indicate whether the download file must be hard linked to the output path.
|
|
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
|
|
bool force_hard_link = 22;
|
|
// content_for_calculating_task_id is the content used to calculate the task id.
|
|
// If content_for_calculating_task_id is set, use its value to calculate the task ID.
|
|
// Otherwise, calculate the task ID based on url, piece_length, tag, application, and filtered_query_params.
|
|
optional string content_for_calculating_task_id = 23;
|
|
// remote_ip represents the IP address of the client initiating the download request.
|
|
// For proxy requests, it is set to the IP address of the request source.
|
|
// For dfget requests, it is set to the IP address of the dfget.
|
|
optional string remote_ip = 24 [(validate.rules).string = {ip: true, ignore_empty: true}];
|
|
|
|
reserved 21;
|
|
reserved "load_to_cache";
|
|
}
|
|
|
|
// Object Storage related information.
|
|
message ObjectStorage {
|
|
// Region is the region of the object storage service.
|
|
optional string region = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
|
|
// Endpoint is the endpoint of the object storage service.
|
|
optional string endpoint = 2 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
|
|
// Access key that used to access the object storage service.
|
|
optional string access_key_id = 3 [(validate.rules).string.min_len = 1];
|
|
// Access secret that used to access the object storage service.
|
|
optional string access_key_secret = 4 [(validate.rules).string.min_len = 1];
|
|
// Session token that used to access s3 storage service.
|
|
optional string session_token = 5 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
|
|
// Local path to credential file for Google Cloud Storage service OAuth2 authentication.
|
|
optional string credential_path = 6 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
|
|
// Predefined ACL that used for the Google Cloud Storage service.
|
|
optional string predefined_acl = 7 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
|
|
}
|
|
|
|
// HDFS related information.
|
|
message HDFS {
|
|
// Delegation token for Web HDFS operator.
|
|
optional string delegation_token = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
|
|
}
|
|
|
|
// Range represents download range.
|
|
message Range {
|
|
// Start of range.
|
|
uint64 start = 1;
|
|
// Length of range.
|
|
uint64 length = 2;
|
|
}
|
|
|
|
// Piece represents information of piece.
|
|
message Piece {
|
|
// Piece number.
|
|
uint32 number = 1;
|
|
// Parent peer id.
|
|
optional string parent_id = 2 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
|
|
// Piece offset.
|
|
uint64 offset = 3;
|
|
// Piece length.
|
|
uint64 length = 4;
|
|
// Digest of the piece data, for example blake3:xxx or sha256:yyy.
|
|
string digest = 5 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
|
|
// Piece content.
|
|
optional bytes content = 6 [(validate.rules).bytes = {min_len: 1, ignore_empty: true}];
|
|
// Traffic type.
|
|
optional TrafficType traffic_type = 7;
|
|
// Downloading piece costs time.
|
|
google.protobuf.Duration cost = 8 [(validate.rules).duration.required = true];
|
|
// Piece create time.
|
|
google.protobuf.Timestamp created_at = 9 [(validate.rules).timestamp.required = true];
|
|
}
|