/*
 *     Copyright 2022 The Dragonfly Authors
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

syntax = "proto3";

package common.v2;

import "validate/validate.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";

option go_package = "d7y.io/api/v2/pkg/apis/common/v2;common";

// SizeScope represents size scope of task.
enum SizeScope {
  // NORMAL task has pieces is more than one piece.
  NORMAL = 0;

  // SMALL task's content length is more than 128 byte and has only one piece.
  SMALL = 1;

  // TINY task's content length is less than 128 byte.
  TINY = 2;

  // EMPTY task's content length is equal to zero.
  EMPTY = 3;

  // UNKNOW task has invalid size scope.
  UNKNOW = 4;
}

// TaskType represents type of task.
enum TaskType {
  // STANDARD is standard type of task, it can download from source, remote peer and
  // local peer(local cache). When the standard task is never downloaded in the
  // P2P cluster, dfdaemon will download the task from the source. When the standard
  // task is downloaded in the P2P cluster, dfdaemon will download the task from
  // the remote peer or local peer(local cache).
  STANDARD = 0;

  // PERSISTENT is persistent type of task, it can import file and export file in P2P cluster.
  // When the persistent task is imported into the P2P cluster, dfdaemon will store
  // the task in the peer's disk and copy multiple replicas to remote peers to
  // prevent data loss.
  PERSISTENT = 1;

  // PERSIST_CACHE is persistent cache type of task, it can import file and export file in P2P cluster.
  // When the persistent cache task is imported into the P2P cluster, dfdaemon will store
  // the task in the peer's disk and copy multiple replicas to remote peers to prevent data loss.
  // When the expiration time is reached, task will be deleted in the P2P cluster.
  PERSISTENT_CACHE = 2;
}

// TrafficType represents type of traffic.
enum TrafficType {
  // BACK_TO_SOURCE is to download traffic from the source.
  BACK_TO_SOURCE = 0;

  // REMOTE_PEER is to download traffic from the remote peer.
  REMOTE_PEER = 1;

  // LOCAL_PEER is to download traffic from the local peer.
  LOCAL_PEER = 2;
}

// Priority represents priority of application.
enum Priority {
  // LEVEL0 has no special meaning for scheduler.
  LEVEL0 = 0;

  // LEVEL1 represents the download task is forbidden,
  // and an error code is returned during the registration.
  LEVEL1 = 1;

  // LEVEL2 represents when the task is downloaded for the first time,
  // allow peers to download from the other peers,
  // but not back-to-source. When the task is not downloaded for
  // the first time, it is scheduled normally.
  LEVEL2 = 2;

  // LEVEL3 represents when the task is downloaded for the first time,
  // the normal peer is first to download back-to-source.
  // When the task is not downloaded for the first time, it is scheduled normally.
  LEVEL3 = 3;

  // LEVEL4 represents when the task is downloaded for the first time,
  // the weak peer is first triggered to back-to-source.
  // When the task is not downloaded for the first time, it is scheduled normally.
  LEVEL4 = 4;

  // LEVEL5 represents when the task is downloaded for the first time,
  // the strong peer is first triggered to back-to-source.
  // When the task is not downloaded for the first time, it is scheduled normally.
  LEVEL5 = 5;

  // LEVEL6 represents when the task is downloaded for the first time,
  // the super peer is first triggered to back-to-source.
  // When the task is not downloaded for the first time, it is scheduled normally.
  LEVEL6 = 6;
}

// Peer metadata.
message Peer {
  // Peer id.
  string id = 1 [(validate.rules).string.min_len = 1];
  // Range is url range of request.
  optional Range range = 2;
  // Peer priority.
  Priority priority = 3 [(validate.rules).enum.defined_only = true];
  // Pieces of peer.
  repeated Piece pieces = 4 [(validate.rules).repeated = {min_items: 1, ignore_empty: true}];
  // Peer downloads costs time.
  google.protobuf.Duration cost = 5 [(validate.rules).duration.required = true];
  // Peer state.
  string state = 6 [(validate.rules).string.min_len = 1];
  // Task info.
  Task task = 7 [(validate.rules).message.required = true];
  // Host info.
  Host host = 8 [(validate.rules).message.required = true];
  // NeedBackToSource needs downloaded from source.
  bool need_back_to_source = 9;
  // Peer create time.
  google.protobuf.Timestamp created_at = 10 [(validate.rules).timestamp.required = true];
  // Peer update time.
  google.protobuf.Timestamp updated_at = 11 [(validate.rules).timestamp.required = true];
}

// PersistentCachePeer metadata.
message PersistentCachePeer {
  // Peer id.
  string id = 1 [(validate.rules).string.min_len = 1];
  // Persistent represents whether the persistent cache peer is persistent.
  // If the persistent cache peer is persistent, the persistent cache peer will
  // not be deleted when dfdaemon runs garbage collection. It only be deleted
  // when the task is deleted by the user.
  bool persistent = 2;
  // Peer downloads costs time.
  google.protobuf.Duration cost = 3 [(validate.rules).duration.required = true];
  // Peer state.
  string state = 4 [(validate.rules).string.min_len = 1];
  // Task info.
  PersistentCacheTask task = 5 [(validate.rules).message.required = true];
  // Host info.
  Host host = 6 [(validate.rules).message.required = true];
  // Peer create time.
  google.protobuf.Timestamp created_at = 7 [(validate.rules).timestamp.required = true];
  // Peer update time.
  google.protobuf.Timestamp updated_at = 8 [(validate.rules).timestamp.required = true];
}

// Task metadata.
message Task {
  // Task id.
  string id = 1 [(validate.rules).string.min_len = 1];
  // Task type.
  TaskType type = 2 [(validate.rules).enum.defined_only = true];
  // Download url.
  string url = 3 [(validate.rules).string.uri = true];
  // Digest of the task digest, for example blake3:xxx or sha256:yyy.
  optional string digest = 4 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]{8})$", ignore_empty: true}];
  // URL tag identifies different task for same url.
  optional string tag = 5;
  // Application of task.
  optional string application = 6;
  // Filtered query params to generate the task id.
  // When filter is ["Signature", "Expires", "ns"], for example:
  // http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
  // will generate the same task id.
  // Default value includes the filtered query params of s3, gcs, oss, obs, cos.
  repeated string filtered_query_params = 7;
  // Task request headers.
  map<string, string> request_header = 8;
  // Task piece length.
  uint64 piece_length = 9 [(validate.rules).uint64.gte = 1];
  // Task content length.
  uint64 content_length = 10;
  // Task piece count.
  uint32 piece_count = 11;
  // Task size scope.
  SizeScope size_scope = 12;
  // Pieces of task.
  repeated Piece pieces = 13 [(validate.rules).repeated = {min_items: 1, ignore_empty: true}];
  // Task state.
  string state = 14 [(validate.rules).string.min_len = 1];
  // Task peer count.
  uint32 peer_count = 15;
  // Task contains available peer.
  bool has_available_peer = 16;
  // Task create time.
  google.protobuf.Timestamp created_at = 17 [(validate.rules).timestamp.required = true];
  // Task update time.
  google.protobuf.Timestamp updated_at = 18 [(validate.rules).timestamp.required = true];
}

// PersistentCacheTask metadata.
message PersistentCacheTask {
  // Task id.
  string id = 1 [(validate.rules).string.min_len = 1];
  // Replica count of the persistent cache task. The persistent cache task will
  // not be deleted when dfdamon runs garbage collection. It only be deleted
  // when the task is deleted by the user.
  uint64 persistent_replica_count = 2 [(validate.rules).uint64.gte = 1];
  // Replica count of the cache task. If cache task is not persistent,
  // the persistent cache task will be deleted when dfdaemon runs garbage collection.
  uint64 replica_count = 3 [(validate.rules).uint64.gte = 1];
  // Digest of the task digest, for example blake3:xxx or sha256:yyy.
  string digest = 4 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]{8})$"}];
  // Tag is used to distinguish different persistent cache tasks.
  optional string tag = 5;
  // Application of task.
  optional string application = 6;
  // Task piece length.
  uint64 piece_length = 7 [(validate.rules).uint64.gte = 1];
  // Task content length.
  uint64 content_length = 8;
  // Task piece count.
  uint32 piece_count = 9;
  // Task state.
  string state = 10 [(validate.rules).string.min_len = 1];
  // Task create time.
  google.protobuf.Timestamp created_at = 11 [(validate.rules).timestamp.required = true];
  // Task update time.
  google.protobuf.Timestamp updated_at = 12 [(validate.rules).timestamp.required = true];
}

// Host metadata.
message Host {
  // Host id.
  string id = 1 [(validate.rules).string.min_len = 1];
  // Host type.
  uint32 type = 2 [(validate.rules).uint32.lte = 3];
  // Hostname.
  string hostname = 3 [(validate.rules).string.min_len = 1];
  // Host ip.
  string ip = 4 [(validate.rules).string.ip = true];
  // Port of grpc service.
  int32 port = 5 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
    // Port of download server.
  int32 download_port = 6 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
  // Host OS.
  string os = 7;
  // Host platform.
  string platform = 8;
  // Host platform family.
  string platform_family = 9;
  // Host platform version.
  string platform_version = 10;
  // Host kernel version.
  string kernel_version = 11;
  // CPU Stat.
  optional CPU cpu = 12;
  // Memory Stat.
  optional Memory memory = 13;
  // Network Stat.
  optional Network network = 14;
  // Disk Stat.
  optional Disk disk = 15;
  // Build information.
  optional Build build = 16;
  // ID of the cluster to which the host belongs.
  uint64 scheduler_cluster_id = 17;
  // Disable shared data for other peers.
  bool disable_shared = 18;
}

// CPU Stat.
message CPU {
  // Number of logical cores in the system.
  uint32 logical_count = 1;
  // Number of physical cores in the system
  uint32 physical_count = 2;
  // Percent calculates the percentage of cpu used.
  double percent = 3 [(validate.rules).double.gte = 0];
  // Calculates the percentage of cpu used by process.
  double process_percent = 4 [(validate.rules).double.gte = 0];
  // CPUTimes contains the amounts of time the CPU has spent performing different kinds of work.
  optional CPUTimes times = 5;
}

// CPUTimes contains the amounts of time the CPU has spent performing different
// kinds of work. Time units are in seconds.
message CPUTimes {
  // CPU time of user.
  double user = 1 [(validate.rules).double.gte = 0];
  // CPU time of system.
  double system = 2 [(validate.rules).double.gte = 0];
  // CPU time of idle.
  double idle = 3 [(validate.rules).double.gte = 0];
  // CPU time of nice.
  double nice = 4 [(validate.rules).double.gte = 0];
  // CPU time of iowait.
  double iowait = 5 [(validate.rules).double.gte = 0];
  // CPU time of irq.
  double irq = 6 [(validate.rules).double.gte = 0];
  // CPU time of softirq.
  double softirq = 7 [(validate.rules).double.gte = 0];
  // CPU time of steal.
  double steal = 8 [(validate.rules).double.gte = 0];
  // CPU time of guest.
  double guest = 9 [(validate.rules).double.gte = 0];
  // CPU time of guest nice.
  double guest_nice = 10 [(validate.rules).double.gte = 0];
}

// Memory Stat.
message Memory {
  // Total amount of RAM on this system.
  uint64 total = 1;
  // RAM available for programs to allocate.
  uint64 available = 2;
  // RAM used by programs.
  uint64 used = 3;
  // Percentage of RAM used by programs.
  double used_percent = 4 [(validate.rules).double = {gte: 0, lte: 100}];
  // Calculates the percentage of memory used by process.
  double process_used_percent = 5 [(validate.rules).double = {gte: 0, lte: 100}];
  // This is the kernel's notion of free memory.
  uint64 free = 6;
}

// Network Stat.
message Network {
  // Return count of tcp connections opened and status is ESTABLISHED.
  uint32 tcp_connection_count = 1;
  // Return count of upload tcp connections opened and status is ESTABLISHED.
  uint32 upload_tcp_connection_count = 2;
  // Location path(area|country|province|city|...).
  optional string location = 3;
  // IDC where the peer host is located
  optional string idc = 4;
  // Download rate is received bytes per second.
  uint64 download_rate = 5;
  // Download rate is the limit of received bytes per second.
  uint64 download_rate_limit = 6;
  // Upload rate is transmitted bytes per second.
  uint64 upload_rate = 7;
  // Upload rate is the limit of transmitted bytes per second.
  uint64 upload_rate_limit = 8;
}

// Disk Stat.
message Disk {
  // Total amount of disk on the data path of dragonfly.
  uint64 total = 1;
  // Free amount of disk on the data path of dragonfly.
  uint64 free = 2;
  // Used amount of disk on the data path of dragonfly.
  uint64 used = 3;
  // Used percent of disk on the data path of dragonfly directory.
  double used_percent = 4 [(validate.rules).double = {gte: 0, lte: 100}];
  // Total amount of indoes on the data path of dragonfly directory.
  uint64 inodes_total = 5;
  // Used amount of indoes on the data path of dragonfly directory.
  uint64 inodes_used = 6;
  // Free amount of indoes on the data path of dragonfly directory.
  uint64 inodes_free = 7;
  // Used percent of indoes on the data path of dragonfly directory.
  double inodes_used_percent = 8 [(validate.rules).double = {gte: 0, lte: 100}];
}

// Build information.
message Build {
  // Git version.
  string git_version = 1;
  // Git commit.
  optional string git_commit = 2;
  // Golang version.
  optional string go_version = 3;
  // Rust version.
  optional string rust_version = 4;
  // Build platform.
  optional string platform = 5;
}

// Download information.
message Download {
  // Download url.
  string url = 1 [(validate.rules).string.uri = true];
  // Digest of the task digest, for example blake3:xxx or sha256:yyy.
  optional string digest = 2 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]{8})$", ignore_empty: true}];
  // Range is url range of request. If protocol is http, range
  // will set in request header. If protocol is others, range
  // will set in range field.
  optional Range range = 3;
  // Task type.
  TaskType type = 4 [(validate.rules).enum.defined_only = true];
  // URL tag identifies different task for same url.
  optional string tag = 5;
  // Application of task.
  optional string application = 6;
  // Peer priority.
  Priority priority = 7 [(validate.rules).enum.defined_only = true];
  // Filtered query params to generate the task id.
  // When filter is ["Signature", "Expires", "ns"], for example:
  // http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
  // will generate the same task id.
  // Default value includes the filtered query params of s3, gcs, oss, obs, cos.
  repeated string filtered_query_params = 8;
  // Task request headers.
  map<string, string> request_header = 9;
  // Task piece length.
  optional uint64 piece_length = 10 [(validate.rules).uint64 = {gte: 1, ignore_empty: true}];
  // File path to be exported.
  optional string output_path = 11 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
  // Download timeout.
  optional google.protobuf.Duration timeout = 12;
  // Dfdaemon will disable download back-to-source by itself, if disable_back_to_source is true.
  bool disable_back_to_source = 13;
  // Scheduler will triggers peer to download back-to-source, if need_back_to_source is true.
  bool need_back_to_source = 14;
  // certificate_chain is the client certs with DER format for the backend client to download back-to-source.
  repeated bytes certificate_chain = 15;
  // Prefetch pre-downloads all pieces of the task when download task with range request.
  bool prefetch = 16;
  // Object Storage related information.
  optional ObjectStorage object_storage = 17;
}

// Object Storage related information.
message ObjectStorage {
  // Region is the region of the object storage service.
  optional string region = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
  // Endpoint is the endpoint of the object storage service.
  optional string endpoint = 2 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
  // Access key that used to access the object storage service.
  optional string access_key_id = 3 [(validate.rules).string.min_len = 1];
  // Access secret that used to access the object storage service.
  optional string access_key_secret = 4 [(validate.rules).string.min_len = 1];
  // Session token that used to access s3 storage service.
  optional string session_token = 5 [(validate.rules).string = {min_len: 1, ignore_empty: true}]; 
  // Local path to credential file for Google Cloud Storage service OAuth2 authentication.
  optional string credential_path = 6 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
  // Predefined ACL that used for the Google Cloud Storage service.
  optional string predefined_acl = 7 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}

// Range represents download range.
message Range {
  // Start of range.
  uint64 start = 1;
  // Length of range.
  uint64 length = 2;
}

// Piece represents information of piece.
message Piece {
  // Piece number.
  uint32 number = 1;
  // Parent peer id.
  optional string parent_id = 2 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
  // Piece offset.
  uint64 offset = 3;
  // Piece length.
  uint64 length = 4;
  // Digest of the piece data, for example blake3:xxx or sha256:yyy.
  string digest = 5 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]{8})$", ignore_empty: true}];
  // Piece content.
  optional bytes content = 6 [(validate.rules).bytes = {min_len: 1, ignore_empty: true}];
  // Traffic type.
  optional TrafficType traffic_type = 7;
  // Downloading piece costs time.
  google.protobuf.Duration cost = 8 [(validate.rules).duration.required = true];
  // Piece create time.
  google.protobuf.Timestamp created_at = 9 [(validate.rules).timestamp.required = true];
}