diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d571985..26b927b 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -6,12 +6,9 @@ on: pull_request: branches: [ main ] -env: - GO_VERSION: 1.19 - jobs: - lint: - name: Lint + golang-lint: + name: Golang Lint runs-on: ubuntu-latest timeout-minutes: 10 steps: @@ -23,6 +20,43 @@ jobs: with: version: v1.46.2 + rust-lint: + name: Rust Lint + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Install protobuf-compiler + run: sudo apt-get install protobuf-compiler + + - name: Install toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + override: true + + - name: Run cargo check + uses: actions-rs/cargo@v1 + with: + command: check + + - name: Cargo clippy + uses: actions-rs/cargo@v1 + with: + command: clippy + args: -- -D warnings + + markdown-lint: + name: Markdown Lint + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - name: Checkout code + uses: actions/checkout@v3 + - name: Markdown lint uses: docker://avtodev/markdown-lint:v1 with: diff --git a/.gitignore b/.gitignore index a0d9990..4b74240 100644 --- a/.gitignore +++ b/.gitignore @@ -62,3 +62,17 @@ Temporary Items .apdisk artifacts + +# Generated by Cargo +# will have compiled files and executables +/target/ + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + +# Added by cargo +/target diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..741f553 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "api" +version = "0.1.0" +authors = ["Gaius "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://d7y.io" +repository = "https://github.com/dragonflyoss/api" +readme = "README.md" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[dependencies] +tonic = "0.8.3" +prost = "0.11" +prost-types = "0.11" +tokio = { version = "1.8.2", features = ["rt-multi-thread", "macros"] } + +[build-dependencies] +tonic-build = "0.8.3" diff --git a/Makefile b/Makefile index e2ca7f3..43c6528 100644 --- a/Makefile +++ b/Makefile @@ -32,10 +32,21 @@ generate: protoc .PHONY: generate # Generate grpc protos -protoc: - @./hack/protoc.sh +protoc: go-protoc rust-protoc .PHONY: protoc +# Generate grpc protos of golang +go-protoc: + @echo "Begin to generate grpc protos of golang." + @./hack/protoc.sh +.PHONY: go-protoc + +# Generate grpc protos of rust +rust-protoc: + @echo "Begin to generate grpc protos of rust." + @cargo build --release +.PHONY: rust-protoc + # Clear compiled files clean: @go clean @@ -47,4 +58,6 @@ help: @echo "make markdownlint run markdown lint" @echo "make generate run go generate" @echo "make protoc generate grpc protos" + @echo "make go-protoc generate grpc protos of golang" + @echo "make rust-protoc generate grpc protos of rust" @echo "make clean clean" diff --git a/build.rs b/build.rs new file mode 100644 index 0000000..920e370 --- /dev/null +++ b/build.rs @@ -0,0 +1,18 @@ +fn main() -> Result<(), Box> { + tonic_build::configure() + .build_client(true) + .build_server(true) + .out_dir("src") + .compile( + &[ + "proto/common.proto", + "proto/security.proto", + "proto/errordetails.proto", + "proto/dfdaemon.proto", + "proto/manager.proto", + "proto/scheduler.proto", + ], + &["proto/"], + )?; + Ok(()) +} diff --git a/pkg/apis/dfdaemon/v1/dfdaemon.pb.go b/pkg/apis/dfdaemon/v1/dfdaemon.pb.go index 432559a..6d2fccb 100644 --- a/pkg/apis/dfdaemon/v1/dfdaemon.pb.go +++ b/pkg/apis/dfdaemon/v1/dfdaemon.pb.go @@ -22,14 +22,13 @@ package v1 import ( - reflect "reflect" - sync "sync" - v1 "d7y.io/api/pkg/apis/common/v1" _ "github.com/envoyproxy/protoc-gen-validate/validate" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" ) const ( diff --git a/pkg/apis/dfdaemon/v2/dfdaemon.proto b/pkg/apis/dfdaemon/v2/dfdaemon.proto index f91f551..d03a513 100644 --- a/pkg/apis/dfdaemon/v2/dfdaemon.proto +++ b/pkg/apis/dfdaemon/v2/dfdaemon.proto @@ -94,7 +94,7 @@ message StatTaskRequest { // StatTaskResponse represents response of StatTask. message StatTaskResponse { - common.Task task = 1[(validate.rules).message.required = true]; + common.Task task = 1 [(validate.rules).message.required = true]; } // ImportTaskRequest represents request of ImportTask. diff --git a/pkg/apis/manager/v2/manager.pb.go b/pkg/apis/manager/v2/manager.pb.go index 17044e3..dda8ad5 100644 --- a/pkg/apis/manager/v2/manager.pb.go +++ b/pkg/apis/manager/v2/manager.pb.go @@ -284,27 +284,27 @@ type SeedPeer struct { // Seed peer type. Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` // Seed peer idc. - Idc string `protobuf:"bytes,5,opt,name=idc,proto3" json:"idc,omitempty"` + Idc string `protobuf:"bytes,4,opt,name=idc,proto3" json:"idc,omitempty"` // Seed peer network topology. - NetTopology string `protobuf:"bytes,6,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"` + NetTopology string `protobuf:"bytes,5,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"` // Seed peer location. - Location string `protobuf:"bytes,7,opt,name=location,proto3" json:"location,omitempty"` + Location string `protobuf:"bytes,6,opt,name=location,proto3" json:"location,omitempty"` // Seed peer ip. - Ip string `protobuf:"bytes,8,opt,name=ip,proto3" json:"ip,omitempty"` + Ip string `protobuf:"bytes,7,opt,name=ip,proto3" json:"ip,omitempty"` // Seed peer grpc port. - Port int32 `protobuf:"varint,9,opt,name=port,proto3" json:"port,omitempty"` + Port int32 `protobuf:"varint,8,opt,name=port,proto3" json:"port,omitempty"` // Seed peer download port. - DownloadPort int32 `protobuf:"varint,10,opt,name=download_port,json=downloadPort,proto3" json:"download_port,omitempty"` + DownloadPort int32 `protobuf:"varint,9,opt,name=download_port,json=downloadPort,proto3" json:"download_port,omitempty"` // Seed peer state. - State string `protobuf:"bytes,11,opt,name=state,proto3" json:"state,omitempty"` + State string `protobuf:"bytes,10,opt,name=state,proto3" json:"state,omitempty"` // ID of the cluster to which the seed peer belongs. - SeedPeerClusterId uint64 `protobuf:"varint,12,opt,name=seed_peer_cluster_id,json=seedPeerClusterId,proto3" json:"seed_peer_cluster_id,omitempty"` + SeedPeerClusterId uint64 `protobuf:"varint,11,opt,name=seed_peer_cluster_id,json=seedPeerClusterId,proto3" json:"seed_peer_cluster_id,omitempty"` // Cluster to which the seed peer belongs. - SeedPeerCluster *SeedPeerCluster `protobuf:"bytes,13,opt,name=seed_peer_cluster,json=seedPeerCluster,proto3" json:"seed_peer_cluster,omitempty"` + SeedPeerCluster *SeedPeerCluster `protobuf:"bytes,12,opt,name=seed_peer_cluster,json=seedPeerCluster,proto3" json:"seed_peer_cluster,omitempty"` // Schedulers included in seed peer. - Schedulers []*Scheduler `protobuf:"bytes,14,rep,name=schedulers,proto3" json:"schedulers,omitempty"` + Schedulers []*Scheduler `protobuf:"bytes,13,rep,name=schedulers,proto3" json:"schedulers,omitempty"` // Seed peer object storage port. - ObjectStoragePort int32 `protobuf:"varint,15,opt,name=object_storage_port,json=objectStoragePort,proto3" json:"object_storage_port,omitempty"` + ObjectStoragePort int32 `protobuf:"varint,14,opt,name=object_storage_port,json=objectStoragePort,proto3" json:"object_storage_port,omitempty"` } func (x *SeedPeer) Reset() { @@ -526,21 +526,21 @@ type UpdateSeedPeerRequest struct { // Seed peer type. Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` // Seed peer idc. - Idc string `protobuf:"bytes,5,opt,name=idc,proto3" json:"idc,omitempty"` + Idc string `protobuf:"bytes,4,opt,name=idc,proto3" json:"idc,omitempty"` // Seed peer network topology. - NetTopology string `protobuf:"bytes,6,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"` + NetTopology string `protobuf:"bytes,5,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"` // Seed peer location. - Location string `protobuf:"bytes,7,opt,name=location,proto3" json:"location,omitempty"` + Location string `protobuf:"bytes,6,opt,name=location,proto3" json:"location,omitempty"` // Seed peer ip. - Ip string `protobuf:"bytes,8,opt,name=ip,proto3" json:"ip,omitempty"` + Ip string `protobuf:"bytes,7,opt,name=ip,proto3" json:"ip,omitempty"` // Seed peer port. - Port int32 `protobuf:"varint,9,opt,name=port,proto3" json:"port,omitempty"` + Port int32 `protobuf:"varint,8,opt,name=port,proto3" json:"port,omitempty"` // Seed peer download port. - DownloadPort int32 `protobuf:"varint,10,opt,name=download_port,json=downloadPort,proto3" json:"download_port,omitempty"` + DownloadPort int32 `protobuf:"varint,9,opt,name=download_port,json=downloadPort,proto3" json:"download_port,omitempty"` // ID of the cluster to which the seed peer belongs. - SeedPeerClusterId uint64 `protobuf:"varint,11,opt,name=seed_peer_cluster_id,json=seedPeerClusterId,proto3" json:"seed_peer_cluster_id,omitempty"` + SeedPeerClusterId uint64 `protobuf:"varint,10,opt,name=seed_peer_cluster_id,json=seedPeerClusterId,proto3" json:"seed_peer_cluster_id,omitempty"` // Seed peer object storage port. - ObjectStoragePort int32 `protobuf:"varint,12,opt,name=object_storage_port,json=objectStoragePort,proto3" json:"object_storage_port,omitempty"` + ObjectStoragePort int32 `protobuf:"varint,11,opt,name=object_storage_port,json=objectStoragePort,proto3" json:"object_storage_port,omitempty"` } func (x *UpdateSeedPeerRequest) Reset() { @@ -784,9 +784,9 @@ type Scheduler struct { // Cluster to which the scheduler belongs. SchedulerCluster *SchedulerCluster `protobuf:"bytes,11,opt,name=scheduler_cluster,json=schedulerCluster,proto3" json:"scheduler_cluster,omitempty"` // Seed peers to which the scheduler belongs. - SeedPeers []*SeedPeer `protobuf:"bytes,13,rep,name=seed_peers,json=seedPeers,proto3" json:"seed_peers,omitempty"` + SeedPeers []*SeedPeer `protobuf:"bytes,12,rep,name=seed_peers,json=seedPeers,proto3" json:"seed_peers,omitempty"` // Scheduler network topology. - NetTopology string `protobuf:"bytes,14,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"` + NetTopology string `protobuf:"bytes,13,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"` } func (x *Scheduler) Reset() { @@ -1131,11 +1131,11 @@ type ListSchedulersRequest struct { // Source service ip. Ip string `protobuf:"bytes,3,opt,name=ip,proto3" json:"ip,omitempty"` // Source service host information. - HostInfo map[string]string `protobuf:"bytes,5,rep,name=host_info,json=hostInfo,proto3" json:"host_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + HostInfo map[string]string `protobuf:"bytes,4,rep,name=host_info,json=hostInfo,proto3" json:"host_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Dfdaemon version. - Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` + Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` // Dfdaemon commit. - Commit string `protobuf:"bytes,7,opt,name=commit,proto3" json:"commit,omitempty"` + Commit string `protobuf:"bytes,6,opt,name=commit,proto3" json:"commit,omitempty"` } func (x *ListSchedulersRequest) Reset() { @@ -3091,30 +3091,30 @@ var file_pkg_apis_manager_v2_manager_proto_rawDesc = []byte{ 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63, + 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, - 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, + 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e, + 0x70, 0x6f, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x65, + 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, + 0x73, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0f, 0x73, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, + 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xd0, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, @@ -3140,26 +3140,26 @@ var file_pkg_apis_manager_v2_manager_proto_rawDesc = []byte{ 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1a, 0xfa, 0x42, 0x17, 0x72, 0x15, 0x52, 0x05, 0x73, 0x75, 0x70, 0x65, 0x72, 0x52, 0x06, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x03, - 0x69, 0x64, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, + 0x69, 0x64, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x30, 0x0a, - 0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x18, 0x06, 0x20, + 0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x12, - 0x27, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x27, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x08, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x08, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, - 0x70, 0x12, 0x20, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x42, + 0x70, 0x12, 0x20, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x31, 0x0a, 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, + 0x70, 0x6f, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x38, 0x0a, 0x14, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70, - 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0b, + 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x01, 0x52, 0x11, 0x73, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3e, 0x0a, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0e, 0xfa, + 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x1a, 0x09, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x40, 0x01, 0x52, 0x11, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xdc, 0x01, 0x0a, 0x10, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6c, @@ -3197,11 +3197,11 @@ var file_pkg_apis_manager_v2_manager_proto_rawDesc = []byte{ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, - 0x30, 0x0a, 0x0a, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, + 0x30, 0x0a, 0x0a, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x09, 0x73, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, - 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, + 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x22, 0xd2, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, @@ -3254,14 +3254,14 @@ var file_pkg_apis_manager_v2_manager_proto_rawDesc = []byte{ 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12, 0x53, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x9a, 0x01, 0x02, 0x30, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x27, 0x0a, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, + 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x1a, 0x3b, 0x0a, 0x0d, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, diff --git a/pkg/apis/manager/v2/manager.proto b/pkg/apis/manager/v2/manager.proto index 662e34b..eb3505f 100644 --- a/pkg/apis/manager/v2/manager.proto +++ b/pkg/apis/manager/v2/manager.proto @@ -74,27 +74,27 @@ message SeedPeer { // Seed peer type. string type = 3; // Seed peer idc. - string idc = 5; + string idc = 4; // Seed peer network topology. - string net_topology = 6; + string net_topology = 5; // Seed peer location. - string location = 7; + string location = 6; // Seed peer ip. - string ip = 8; + string ip = 7; // Seed peer grpc port. - int32 port = 9; + int32 port = 8; // Seed peer download port. - int32 download_port = 10; + int32 download_port = 9; // Seed peer state. - string state = 11; + string state = 10; // ID of the cluster to which the seed peer belongs. - uint64 seed_peer_cluster_id = 12; + uint64 seed_peer_cluster_id = 11; // Cluster to which the seed peer belongs. - SeedPeerCluster seed_peer_cluster = 13; + SeedPeerCluster seed_peer_cluster = 12; // Schedulers included in seed peer. - repeated Scheduler schedulers = 14; + repeated Scheduler schedulers = 13; // Seed peer object storage port. - int32 object_storage_port = 15; + int32 object_storage_port = 14; } // GetSeedPeerRequest represents request of GetSeedPeer. @@ -118,21 +118,21 @@ message UpdateSeedPeerRequest { // Seed peer type. string type = 3 [(validate.rules).string = {in: ["super", "strong", "weak"]}]; // Seed peer idc. - string idc = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; + string idc = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; // Seed peer network topology. - string net_topology = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; + string net_topology = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; // Seed peer location. - string location = 7 [(validate.rules).string = {max_len: 1024, ignore_empty: true}]; + string location = 6 [(validate.rules).string = {max_len: 1024, ignore_empty: true}]; // Seed peer ip. - string ip = 8 [(validate.rules).string = {ip: true}]; + string ip = 7 [(validate.rules).string = {ip: true}]; // Seed peer port. - int32 port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}]; + int32 port = 8 [(validate.rules).int32 = {gte: 1024, lt: 65535}]; // Seed peer download port. - int32 download_port = 10 [(validate.rules).int32 = {gte: 1024, lt: 65535}]; + int32 download_port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}]; // ID of the cluster to which the seed peer belongs. - uint64 seed_peer_cluster_id = 11 [(validate.rules).uint64 = {gte: 1}]; + uint64 seed_peer_cluster_id = 10 [(validate.rules).uint64 = {gte: 1}]; // Seed peer object storage port. - int32 object_storage_port = 12 [(validate.rules).int32 = {gte: 1024, lt: 65535, ignore_empty: true}]; + int32 object_storage_port = 11 [(validate.rules).int32 = {gte: 1024, lt: 65535, ignore_empty: true}]; } // SeedPeerCluster represents cluster of scheduler. @@ -178,9 +178,9 @@ message Scheduler { // Cluster to which the scheduler belongs. SchedulerCluster scheduler_cluster = 11; // Seed peers to which the scheduler belongs. - repeated SeedPeer seed_peers = 13; + repeated SeedPeer seed_peers = 12; // Scheduler network topology. - string net_topology = 14; + string net_topology = 13; } // GetSchedulerRequest represents request of GetScheduler. @@ -228,11 +228,11 @@ message ListSchedulersRequest { // Source service ip. string ip = 3 [(validate.rules).string.ip = true]; // Source service host information. - map host_info = 5 [(validate.rules).map.ignore_empty = true]; + map host_info = 4 [(validate.rules).map.ignore_empty = true]; // Dfdaemon version. - string version = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; + string version = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; // Dfdaemon commit. - string commit = 7 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; + string commit = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; } // ListSchedulersResponse represents response of ListSchedulers. diff --git a/proto/common.proto b/proto/common.proto new file mode 100644 index 0000000..883b709 --- /dev/null +++ b/proto/common.proto @@ -0,0 +1,231 @@ +/* + * Copyright 2022 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +package common; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +// SizeScope represents size scope of task. +enum SizeScope { + // size > one piece size. + NORMAL = 0; + + // 128 byte < size <= one piece size and be plain type. + SMALL = 1; + + // size <= 128 byte and be plain type. + TINY = 2; + + // size == 0 byte and be plain type. + EMPTY = 3; +} + +// TaskType represents type of task. +enum TaskType { + // DFDAEMON is dfdeamon type of task, + // dfdeamon task is a normal p2p task. + DFDAEMON = 0; + + // DFCACHE is dfcache type of task, + // dfcache task is a cache task, and the task url is fake url. + // It can only be used for caching and cannot be downloaded back to source. + DFCACHE = 1; + + // DFSTORE is dfstore type of task, + // dfstore task is a persistent task in backend. + DFSTORE = 2; +} + +// TrafficType represents type of traffic. +enum TrafficType { + // BACK_TO_SOURCE is to download traffic from the source. + BACK_TO_SOURCE = 0; + + // REMOTE_PEER is to download traffic from the remote peer. + REMOTE_PEER = 1; + + // LOCAL_PEER is to download traffic from the local peer. + LOCAL_PEER = 2; +} + +// Priority represents priority of application. +enum Priority { + // LEVEL0 has no special meaning for scheduler. + LEVEL0 = 0; + + // LEVEL1 represents the download task is forbidden, + // and an error code is returned during the registration. + LEVEL1 = 1; + + // LEVEL2 represents when the task is downloaded for the first time, + // allow peers to download from the other peers, + // but not back-to-source. When the task is not downloaded for + // the first time, it is scheduled normally. + LEVEL2 = 2; + + // LEVEL3 represents when the task is downloaded for the first time, + // the normal peer is first to download back-to-source. + // When the task is not downloaded for the first time, it is scheduled normally. + LEVEL3 = 3; + + // LEVEL4 represents when the task is downloaded for the first time, + // the weak peer is first triggered to back-to-source. + // When the task is not downloaded for the first time, it is scheduled normally. + LEVEL4 = 4; + + // LEVEL5 represents when the task is downloaded for the first time, + // the strong peer is first triggered to back-to-source. + // When the task is not downloaded for the first time, it is scheduled normally. + LEVEL5 = 5; + + // LEVEL6 represents when the task is downloaded for the first time, + // the super peer is first triggered to back-to-source. + // When the task is not downloaded for the first time, it is scheduled normally. + LEVEL6 = 6; +} + +// Peer metadata. +message Peer { + // Peer id. + string id = 1; + // Pieces of peer. + repeated Piece pieces = 2; + // Task info. + Task task = 3; + // Host info. + Host host = 4; + // Peer state. + string state = 5; + // Peer create time. + google.protobuf.Timestamp created_at = 6; + // Peer update time. + google.protobuf.Timestamp updated_at = 7; +} + +// Task metadata. +message Task { + // Task id. + string id = 1; + // Host type. + string type = 2; + // Task size scope. + SizeScope size_scope = 3; + // Pieces of task. + repeated Piece pieces = 4; + // Task state. + string state = 5; + // Task metadata. + Metadata metadata = 6; + // Task content length. + int64 content_length = 7; + // Task peer count. + int32 peer_count = 8; + // Task contains available peer. + bool hasAvailablePeer = 9; + // Task create time. + google.protobuf.Timestamp created_at = 10; + // Task update time. + google.protobuf.Timestamp updated_at = 11; +} + +// Host metadata. +message Host { + // Host id. + string id = 1; + // Host ipv4. + string ipv4 = 2; + // Host ipv6. + string ipv6 = 3; + // Peer hostname. + string hostname = 4; + // Port of grpc service. + int32 port = 5; + // Port of download server. + int32 download_port = 6; + // Security domain for network. + string security_domain = 7; + // Host location(area, country, province, city, etc.). + repeated string location = 8; + // IDC where the peer host is located. + string idc = 9; + // Network topology(switch, router, etc.). + repeated string net_topology = 10; +} + +// Range represents download range. +message Range { + // Begin of range. + uint64 begin = 1; + // End of range. + uint64 end = 2; +} + +// Metadata represents metadata of task. +message Metadata { + // Download url. + string url = 1; + // Digest of the pieces digest, for example md5:xxx or sha256:yyy. + string digest = 2; + // Range is url range of request. + Range range = 3; + // Task type. + common.TaskType type = 4; + // URL tag identifies different task for same url. + string tag = 5; + // Application of task. + string application = 6; + // Peer priority. + Priority priority = 7; + // Filter url used to generate task id. + repeated string filters = 8; + // Task request headers. + map header = 9; + // Task piece size. + int32 piece_size = 10; +} + +// Piece represents information of piece. +message Piece { + // Piece number. + uint32 number = 1; + // Parent peer id. + string parent_id = 2; + // Piece offset. + uint64 offset = 3; + // Piece size. + uint64 size = 4; + // Digest of the piece data, for example md5:xxx or sha256:yyy. + string digest = 5; + // Traffic type. + TrafficType traffic_type = 6; + // Downloading piece costs time. + google.protobuf.Duration cost = 7; + // Piece create time. + google.protobuf.Timestamp created_at = 8; +} + +// ExtendAttribute represents extend of attribution. +message ExtendAttribute { + // Task response header, eg: HTTP Response Header + map header = 1; + // Task response code, eg: HTTP Status Code + int32 status_code = 2; + // Task response status, eg: HTTP Status + string status = 3; +} diff --git a/proto/dfdaemon.proto b/proto/dfdaemon.proto new file mode 100644 index 0000000..8fcd04d --- /dev/null +++ b/proto/dfdaemon.proto @@ -0,0 +1,142 @@ +/* + * Copyright 2022 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +package dfdaemon; + +import "common.proto"; +import "errordetails.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; + +// InterestedAllPiecesRequest represents interested all pieces request of SyncPiecesRequest. +message InterestedAllPiecesRequest { +} + +// InterestedPiecesRequest represents interested pieces request of SyncPiecesRequest. +message InterestedPiecesRequest { + // Interested piece numbers. + repeated uint32 piece_numbers = 1; +} + +// StatMetadata represents stat metadata request of SyncPiecesRequest. +message StatMetadataRequest { +} + +// SyncPiecesRequest represents request of AnnouncePeer. +message SyncPiecesRequest{ + oneof request { + InterestedAllPiecesRequest interested_all_pieces_request = 1; + InterestedPiecesRequest interested_pieces_request = 2; + StatMetadataRequest stat_metadata_request = 3; + } +} + +// InterestedPiecesResponse represents interested pieces response of SyncPiecesResponse. +message InterestedPiecesResponse { + // Interested pieces of task. + repeated common.Piece pieces = 1; +} + +// StatMetadata represents stat metadata request of SyncPiecesResponse. +message StatMetadataResponse { + // Task metadata. + common.Metadata metadata = 1; +} + +// SyncPiecesResponse represents response of SyncPieces. +message SyncPiecesResponse { + oneof response { + InterestedPiecesResponse interested_pieces_response = 1; + StatMetadataResponse stat_metadata_response = 2; + } + + oneof errordetails { + errordetails.SyncPiecesFailed sync_pieces_failed = 3; + errordetails.StatMetadataFailed stat_metadata_failed = 4; + } +} + +// TriggerTaskRequest represents request of TriggerTask. +message TriggerTaskRequest { + // Task id. + string task_id = 1; + // Task metadata. + common.Metadata metadata = 2; +} + +// StatTaskRequest represents request of StatTask. +message StatTaskRequest { + // Task id. + string task_id = 1; +} + +// StatTaskResponse represents response of StatTask. +message StatTaskResponse { + common.Task task = 1; +} + +// ImportTaskRequest represents request of ImportTask. +message ImportTaskRequest { + // Task metadata. + common.Metadata metadata = 1; + // File path to be imported. + string path = 2; +} + +// ExportTaskRequest represents request of ExportTask. +message ExportTaskRequest { + // Task metadata. + common.Metadata metadata = 1; + // File path to be exported. + string path = 2; + // Download timeout. + google.protobuf.Duration timeout = 3; + // Download rate limit in bytes per second. + double download_rate_limit = 4; + // User id. + uint64 uid = 5; + // Group id. + uint64 gid = 6; +} + +// DeleteTaskRequest represents request of DeleteTask. +message DeleteTaskRequest { + // Task id. + string task_id = 1; +} + +// Dfdaemon RPC Service. +service Dfdaemon{ + // SyncPieces syncs pieces from the other peers. + rpc SyncPieces(stream SyncPiecesRequest)returns(stream SyncPiecesResponse); + + // TriggerTask triggers task back-to-source download. + rpc TriggerTask(TriggerTaskRequest) returns(google.protobuf.Empty); + + // StatTask stats task information. + rpc StatTask(StatTaskRequest) returns(common.Task); + + // ImportTask imports task to p2p network. + rpc ImportTask(ImportTaskRequest) returns(google.protobuf.Empty); + + // ExportTask exports task from p2p network. + rpc ExportTask(ExportTaskRequest) returns(google.protobuf.Empty); + + // DeleteTask deletes task from p2p network. + rpc DeleteTask(DeleteTaskRequest) returns(google.protobuf.Empty); +} diff --git a/proto/errordetails.proto b/proto/errordetails.proto new file mode 100644 index 0000000..21c390a --- /dev/null +++ b/proto/errordetails.proto @@ -0,0 +1,81 @@ +/* + * Copyright 2022 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +package errordetails; + +import "common.proto"; + +// DownloadPeerBackToSourceFailed is error detail of downloading peer back-to-source. +message DownloadPeerBackToSourceFailed { + // The description of the error. + string description = 1; +} + +// DownloadPieceBackToSourceFailed is error detail of downloading piece back-to-source. +message DownloadPieceBackToSourceFailed { + // Temporary recoverable error of source. + bool temporary = 1; + // Source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header + common.ExtendAttribute metadata = 2; + // The number of piece. + uint32 piece_number = 3; + // The description of the error. + string description = 4; +} + +// DownloadPieceFailed is error detail of downloading piece. +message DownloadPieceFailed { + // Temporary recoverable error of parent peer. + bool temporary = 1; + // Source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header + common.ExtendAttribute metadata = 2; + // Piece is information of piece. + string parent_id = 3; + // The number of piece. + uint32 piece_number = 4; + // The description of the error. + string description = 5; +} + +// SchedulePeerForbidden is error detail of forbidden. +message SchedulePeerForbidden { + // The description of the error. + string description = 1; +} + +// SchedulePeerFailed is error detail of scheduling. +message SchedulePeerFailed { + // The description of the error. + string description = 1; +} + +// SyncPiecesFailed is error detail of syncing pieces. +message SyncPiecesFailed { + // Temporary recoverable error of parent peer. + bool temporary = 1; + // Parent peer id. + string parent_id = 2; + // The description of the error. + string description = 3; +} + +// StatMetadataFailed is error detail of stat metadata. +message StatMetadataFailed { + // The description of the error. + string description = 1; +} diff --git a/proto/manager.proto b/proto/manager.proto new file mode 100644 index 0000000..2acccb5 --- /dev/null +++ b/proto/manager.proto @@ -0,0 +1,575 @@ +/* + * Copyright 2022 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +package manager; + +import "common.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +// Request source type. +enum SourceType { + // Scheduler service. + SCHEDULER_SOURCE = 0; + // Peer service. + PEER_SOURCE = 1; + // SeedPeer service. + SEED_PEER_SOURCE = 2; +} + +// SecurityGroup represents security group of cluster. +message SecurityGroup { + // Group id. + uint64 id = 1; + // Group name. + string name = 2; + // Group biography. + string bio = 3; + // Group domain. + string domain = 4; + // Group proxy domain. + string proxy_domain = 5; +} + +// SeedPeerCluster represents cluster of seed peer. +message SeedPeerCluster { + // Cluster id. + uint64 id = 1; + // Cluster name. + string name = 2; + // Cluster biography. + string bio = 3; + // Cluster configuration. + bytes config = 4; + // Cluster scopes. + bytes scopes = 5; + // Security group to which the seed peer cluster belongs. + SecurityGroup security_group = 6; +} + +// SeedPeer represents seed peer for network. +message SeedPeer { + // Seed peer id. + uint64 id = 1; + // Seed peer hostname. + string host_name = 2; + // Seed peer type. + string type = 3; + // Seed peer idc. + string idc = 5; + // Seed peer network topology. + string net_topology = 6; + // Seed peer location. + string location = 7; + // Seed peer ip. + string ip = 8; + // Seed peer grpc port. + int32 port = 9; + // Seed peer download port. + int32 download_port = 10; + // Seed peer state. + string state = 11; + // ID of the cluster to which the seed peer belongs. + uint64 seed_peer_cluster_id = 12; + // Cluster to which the seed peer belongs. + SeedPeerCluster seed_peer_cluster = 13; + // Schedulers included in seed peer. + repeated Scheduler schedulers = 14; + // Seed peer object storage port. + int32 object_storage_port = 15; +} + +// GetSeedPeerRequest represents request of GetSeedPeer. +message GetSeedPeerRequest { + // Request source type. + SourceType source_type = 1; + // Seed peer hostname. + string host_name = 2; + // ID of the cluster to which the seed peer belongs. + uint64 seed_peer_cluster_id = 3; + // Seed peer ip. + string ip = 4; +} + +// UpdateSeedPeerRequest represents request of UpdateSeedPeer. +message UpdateSeedPeerRequest { + // Request source type. + SourceType source_type = 1; + // Seed peer hostname. + string host_name = 2; + // Seed peer type. + string type = 3; + // Seed peer idc. + string idc = 4; + // Seed peer network topology. + string net_topology = 5; + // Seed peer location. + string location = 6; + // Seed peer ip. + string ip = 7; + // Seed peer port. + int32 port = 8; + // Seed peer download port. + int32 download_port = 9; + // ID of the cluster to which the seed peer belongs. + uint64 seed_peer_cluster_id = 10; + // Seed peer object storage port. + int32 object_storage_port = 11; +} + +// SeedPeerCluster represents cluster of scheduler. +message SchedulerCluster { + // Cluster id. + uint64 id = 1; + // Cluster name. + string name = 2; + // Cluster biography. + string bio = 3; + // Cluster config. + bytes config = 4; + // Cluster client config. + bytes client_config = 5; + // Cluster scopes. + bytes scopes = 6; + // Security group to which the scheduler cluster belongs. + SecurityGroup security_group = 7; +} + +// SeedPeerCluster represents scheduler for network. +message Scheduler { + // Scheduler id. + uint64 id = 1; + // Scheduler hostname. + string host_name = 2; + // Deprecated: Do not use. + string vips = 3; + // Scheduler idc. + string idc = 4; + // Scheduler location. + string location = 5; + // Deprecated: Use net_topology instead. + bytes net_config = 6; + // Scheduler ip. + string ip = 7; + // Scheduler grpc port. + int32 port = 8; + // Scheduler state. + string state = 9; + // ID of the cluster to which the scheduler belongs. + uint64 scheduler_cluster_id = 10; + // Cluster to which the scheduler belongs. + SchedulerCluster scheduler_cluster = 11; + // Seed peers to which the scheduler belongs. + repeated SeedPeer seed_peers = 12; + // Scheduler network topology. + string net_topology = 13; +} + +// GetSchedulerRequest represents request of GetScheduler. +message GetSchedulerRequest { + // Request source type. + SourceType source_type = 1; + // Scheduler hostname. + string host_name = 2; + // ID of the cluster to which the scheduler belongs. + uint64 scheduler_cluster_id = 3; + // Scheduler ip. + string ip = 4; +} + +// UpdateSchedulerRequest represents request of UpdateScheduler. +message UpdateSchedulerRequest { + // Request source type. + SourceType source_type = 1; + // Scheduler hostname. + string host_name = 2; + // ID of the cluster to which the scheduler belongs. + uint64 scheduler_cluster_id = 3; + // Deprecated: Do not use. + string vips = 4; + // Scheduler idc. + string idc = 5; + // Scheduler location. + string location = 6; + // Deprecated: Use net_topology instead. + bytes net_config = 7; + // Scheduler ip. + string ip = 8; + // Scheduler port. + int32 port = 9; + // Scheduler network topology. + string net_topology = 10; +} + +// ListSchedulersRequest represents request of ListSchedulers. +message ListSchedulersRequest { + // Request source type. + SourceType source_type = 1; + // Source service hostname. + string host_name = 2; + // Source service ip. + string ip = 3; + // Source service host information. + map host_info = 4; + // Dfdaemon version. + string version = 5; + // Dfdaemon commit. + string commit = 6; +} + +// ListSchedulersResponse represents response of ListSchedulers. +message ListSchedulersResponse { + // Schedulers to which the source service belongs. + repeated Scheduler schedulers = 1; +} + +// ObjectStorage represents config of object storage. +message ObjectStorage { + // Object storage name of type. + string name = 1; + // Storage region. + string region = 2; + // Datacenter endpoint. + string endpoint = 3; + // Access key id. + string access_key = 4; + // Access key secret. + string secret_key = 5; +} + +// GetObjectStorageRequest represents request of GetObjectStorage. +message GetObjectStorageRequest { + // Request source type. + SourceType source_type = 1; + // Source service hostname. + string host_name = 2; + // Source service ip. + string ip = 3; +} + +// Bucket represents config of bucket. +message Bucket { + // Bucket name. + string name = 1; +} + +// ListSchedulersRequest represents request of ListBuckets. +message ListBucketsRequest { + // Request source type. + SourceType source_type = 1; + // Source service hostname. + string host_name = 2; + // Source service ip. + string ip = 3; +} + +// ListBucketsResponse represents response of ListBuckets. +message ListBucketsResponse { + // Bucket configs. + repeated Bucket buckets = 1; +} + +// Model represents information of model. +message Model { + // Model id. + string model_id = 1; + // Model name. + string name = 2; + // Model version id. + string version_id = 3; + // Scheduler id. + uint64 scheduler_id = 4; + // Scheduler hostname. + string host_name = 5; + // Scheduler ip. + string ip = 6; + // Model create time. + google.protobuf.Timestamp created_at = 7; + // Model update time. + google.protobuf.Timestamp updated_at = 8; +} + +// ListModelsRequest represents request of ListModels. +message ListModelsRequest { + // Scheduler id. + uint64 scheduler_id = 1; +} + +// ListModelsResponse represents response of ListModels. +message ListModelsResponse { + // Model informations. + repeated Model models = 1; +} + +// GetModelRequest represents request of GetModel. +message GetModelRequest { + // Scheduler id. + uint64 scheduler_id = 1; + // Model id. + string model_id = 2; +} + +// CreateModelRequest represents request of CreateModel. +message CreateModelRequest { + // Model id. + string model_id = 1; + // Model name. + string name = 2; + // Model version id. + string version_id = 3; + // Scheduler id. + uint64 scheduler_id = 4; + // Scheduler hostname. + string host_name = 5; + // Scheduler ip. + string ip = 6; +} + +// UpdateModelRequest represents request of UpdateModel. +message UpdateModelRequest { + // Model id. + string model_id = 1; + // Model name. + string name = 2; + // Model version id. + string version_id = 3; + // Scheduler id. + uint64 scheduler_id = 4; + // Scheduler hostname. + string host_name = 5; + // Scheduler ip. + string ip = 6; +} + +// DeleteModelRequest represents request of DeleteModel. +message DeleteModelRequest { + // Scheduler id. + uint64 scheduler_id = 1; + // Model id. + string model_id = 2; +} + +// ModelVersion represents information of model version. +message ModelVersion { + // Model version id. + string version_id = 1; + // Model version data. + bytes data = 2; + // Model version mae. + double mae = 3; + // Model version mse. + double mse = 4; + // Model version rmse. + double rmse = 5; + // Model version r^2. + double r2 = 6; + // Model create time. + google.protobuf.Timestamp created_at = 7; + // Model update time. + google.protobuf.Timestamp updated_at = 8; +} + +// ListModelVersionsRequest represents request of ListModelVersions. +message ListModelVersionsRequest { + // Scheduler id. + uint64 scheduler_id = 1; + // Model id. + string model_id = 2; +} + +// ListModelVersionsResponse represents response of ListModelVersions. +message ListModelVersionsResponse { + // Model version informations. + repeated ModelVersion model_versions = 1; +} + +// GetModelVersionRequest represents request of GetModelVersion. +message GetModelVersionRequest { + // Scheduler id. + uint64 scheduler_id = 1; + // Model id. + string model_id = 2; + // Model version id. + string version_id = 3; +} + +// CreateModelVersionRequest represents request of CreateModelVersion. +message CreateModelVersionRequest { + // Scheduler id. + uint64 scheduler_id = 1; + // Model id. + string model_id = 2; + // Model version data. + bytes data = 3; + // Model version mae. + double mae = 4; + // Model version mse. + double mse = 5; + // Model version rmse. + double rmse = 6; + // Model version r^2. + double r2 = 7; +} + +// UpdateModelVersionRequest represents request of UpdateModelVersion. +message UpdateModelVersionRequest { + // Model version id. + string version_id = 1; + // Scheduler id. + uint64 scheduler_id = 2; + // Model id. + string model_id = 3; + // Model version data. + bytes data = 4; + // Model version mae. + double mae = 5; + // Model version mse. + double mse = 6; + // Model version rmse. + double rmse = 7; + // Model version r^2. + double r2 = 8; +} + +// DeleteModelVersionRequest represents request of DeleteModelVersion. +message DeleteModelVersionRequest { + // Scheduler id. + uint64 scheduler_id = 1; + // Model id. + string model_id = 2; + // Model version id. + string version_id = 3; +} + +// URLPriority represents config of url priority. +message URLPriority { + // URL regex. + string regex = 1; + // URL priority value. + common.Priority value = 2; +} + +// ApplicationPriority represents config of application priority. +message ApplicationPriority { + // Priority value. + common.Priority value = 1; + // URL priority. + repeated URLPriority urls = 2; +} + +// Application represents config of application. +message Application { + // Application id. + uint64 id = 1; + // Application name. + string name = 2; + // Application url. + string url = 3; + // Application biography. + string bio = 4; + // Application priority. + ApplicationPriority priority = 5; +} + +// ListApplicationsRequest represents request of ListApplications. +message ListApplicationsRequest { + // Request source type. + SourceType source_type = 1; + // Source service hostname. + string host_name = 2; + // Source service ip. + string ip = 3; +} + +// ListApplicationsResponse represents response of ListApplications. +message ListApplicationsResponse { + // Application configs. + repeated Application applications = 1; +} + +// KeepAliveRequest represents request of KeepAlive. +message KeepAliveRequest { + // Request source type. + SourceType source_type = 1; + // Source service hostname. + string host_name = 2; + // ID of the cluster to which the source service belongs. + uint64 cluster_id = 3; + // Source service ip. + string ip = 4; +} + +// Manager RPC Service. +service Manager { + // Get SeedPeer and SeedPeer cluster configuration. + rpc GetSeedPeer(GetSeedPeerRequest) returns(SeedPeer); + + // Update SeedPeer configuration. + rpc UpdateSeedPeer(UpdateSeedPeerRequest) returns(SeedPeer); + + // Get Scheduler and Scheduler cluster configuration. + rpc GetScheduler(GetSchedulerRequest)returns(Scheduler); + + // Update scheduler configuration. + rpc UpdateScheduler(UpdateSchedulerRequest) returns(Scheduler); + + // List acitve schedulers configuration. + rpc ListSchedulers(ListSchedulersRequest)returns(ListSchedulersResponse); + + // Get ObjectStorage configuration. + rpc GetObjectStorage(GetObjectStorageRequest) returns(ObjectStorage); + + // List buckets configuration. + rpc ListBuckets(ListBucketsRequest)returns(ListBucketsResponse); + + // List models information. + rpc ListModels(ListModelsRequest)returns(ListModelsResponse); + + // Get model information. + rpc GetModel(GetModelRequest)returns(Model); + + // Create model information. + rpc CreateModel(CreateModelRequest)returns(Model); + + // Update model information. + rpc UpdateModel(UpdateModelRequest)returns(Model); + + // Delete model information. + rpc DeleteModel(DeleteModelRequest)returns(google.protobuf.Empty); + + // List model versions information. + rpc ListModelVersions(ListModelVersionsRequest)returns(ListModelVersionsResponse); + + // Get model version information. + rpc GetModelVersion(GetModelVersionRequest)returns(ModelVersion); + + // Create model version information. + rpc CreateModelVersion(CreateModelVersionRequest)returns(ModelVersion); + + // Update model version information. + rpc UpdateModelVersion(UpdateModelVersionRequest)returns(ModelVersion); + + // Delete model version information. + rpc DeleteModelVersion(DeleteModelVersionRequest)returns(google.protobuf.Empty); + + // List applications configuration. + rpc ListApplications(ListApplicationsRequest)returns(ListApplicationsResponse); + + // KeepAlive with manager. + rpc KeepAlive(stream KeepAliveRequest)returns(google.protobuf.Empty); +} diff --git a/proto/scheduler.proto b/proto/scheduler.proto new file mode 100644 index 0000000..b75b42b --- /dev/null +++ b/proto/scheduler.proto @@ -0,0 +1,335 @@ +/* + * Copyright 2022 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +package scheduler; + +import "common.proto"; +import "errordetails.proto"; +import "google/protobuf/empty.proto"; + +// RegisterPeerRequest represents peer registered request of AnnouncePeerRequest. +message RegisterPeerRequest { + // Task id. + string task_id = 1; + // Peer id. + string peer_id = 2; + // Task metadata. + common.Metadata metadata = 3; +} + +// DownloadPeerStartedRequest represents peer download started request of AnnouncePeerRequest. +message DownloadPeerStartedRequest { +} + +// DownloadPeerBackToSourceStartedRequest represents peer download back-to-source started request of AnnouncePeerRequest. +message DownloadPeerBackToSourceStartedRequest { + // Download back-to-source reason. + string reason = 1; +} + +// DownloadPeerFinishedRequest represents peer download finished request of AnnouncePeerRequest. +message DownloadPeerFinishedRequest { + // Total content length. + int64 content_length = 1; + // Total piece count. + int64 piece_count = 2; +} + +// DownloadPeerBackToSourceFinishedRequest represents peer download back-to-source finished request of AnnouncePeerRequest. +message DownloadPeerBackToSourceFinishedRequest { + // Total content length. + int64 content_length = 1; + // Total piece count. + int64 piece_count = 2; +} + +// DownloadPieceFinishedRequest represents piece download finished request of AnnouncePeerRequest. +message DownloadPieceFinishedRequest { + // Piece info. + common.Piece piece = 1; +} + +// DownloadPieceBackToSourceFinishedRequest represents piece download back-to-source finished request of AnnouncePeerRequest. +message DownloadPieceBackToSourceFinishedRequest { + // Piece info. + common.Piece piece = 1; +} + +// AnnouncePeerRequest represents request of AnnouncePeer. +message AnnouncePeerRequest { + oneof request { + RegisterPeerRequest register_peer_request = 1; + DownloadPeerStartedRequest download_peer_started_request = 2; + DownloadPeerBackToSourceStartedRequest download_peer_back_to_source_started_request = 3; + DownloadPeerFinishedRequest download_peer_finished_request = 4; + DownloadPeerBackToSourceFinishedRequest download_peer_back_to_source_finished_request = 5; + DownloadPieceFinishedRequest download_piece_finished_request = 6; + DownloadPieceBackToSourceFinishedRequest download_piece_back_to_source_finished_request = 7; + } + + oneof errordetails { + errordetails.DownloadPeerBackToSourceFailed download_peer_back_to_source_failed = 8; + errordetails.DownloadPieceBackToSourceFailed download_piece_back_to_source_failed = 9; + errordetails.SyncPiecesFailed sync_pieces_failed = 10; + errordetails.DownloadPieceFailed download_piece_failed = 11; + } +} + +// TinyTaskResponse represents tiny task response of AnnouncePeerResponse. +message TinyTaskResponse { + bytes data = 1; +} + +// SmallTaskResponse represents small task response of AnnouncePeerResponse. +message SmallTaskResponse { + // Piece info. + common.Piece piece = 1; +} + +// NormalTaskResponse represents normal task response of AnnouncePeerResponse. +message NormalTaskResponse { + // Candidate parents. + repeated common.Peer candidate_parents = 1; + // Concurrent downloading count from main peer. + int32 parallel_count = 2; +} + +// NeedBackToSourceResponse represents need back-to-source response of AnnouncePeerResponse. +message NeedBackToSourceResponse { + // Download back-to-source reason. + string reason = 1; +} + +// AnnouncePeerResponse represents response of AnnouncePeer. +message AnnouncePeerResponse { + oneof response { + TinyTaskResponse tiny_task_response = 1; + SmallTaskResponse small_task_response = 2; + NormalTaskResponse normal_task_response = 3; + NeedBackToSourceResponse need_back_to_source_response = 4; + } + + oneof errordetails { + errordetails.SchedulePeerForbidden schedule_peer_forbidden = 5; + errordetails.SchedulePeerFailed schedule_peer_failed = 6; + } +} + +// StatPeerRequest represents request of StatPeer. +message StatPeerRequest { + // Task id. + string task_id = 1; + // Peer id. + string peer_id = 2; +} + +// TODO exchange peer request definition. +// ExchangePeerRequest represents request of ExchangePeer. +message ExchangePeerRequest { + // Task id. + string task_id = 1; + // Peer id. + string peer_id = 2; +} + +// TODO exchange peer response definition. +// ExchangePeerResponse represents response of ExchangePeer. +message ExchangePeerResponse { +} + +// LeavePeerRequest represents request of LeavePeer. +message LeavePeerRequest { + // Peer id. + string id = 1; +} + +// StatTaskRequest represents request of StatTask. +message StatTaskRequest { + // Task id. + string id = 1; +} + +// AnnounceHostRequest represents request of AnnounceHost. +message AnnounceHostRequest { + // Host id. + string id = 1; + // Host type. + uint32 type = 2; + // Hostname. + string hostname = 3; + // Host ip. + string ip = 4; + // Port of grpc service. + int32 port = 5; + // Port of download server. + int32 download_port = 6; + // Host OS. + string os = 7; + // Host platform. + string platform = 8; + // Host platform family. + string platform_family = 9; + // Host platform version. + string platform_version = 10; + // Host kernel version. + string kernel_version = 11; + // CPU Stat. + CPU cpu = 12; + // Memory Stat. + Memory memory = 13; + // Network Stat. + Network network = 14; + // Disk Stat. + Disk disk = 15; + // Build information. + Build build = 16; +} + +// CPU Stat. +message CPU { + // Number of logical cores in the system. + uint32 logical_count = 1; + // Number of physical cores in the system + uint32 physical_count = 2; + // Percent calculates the percentage of cpu used. + double percent = 3; + // Calculates the percentage of cpu used by process. + double process_percent = 4; + // CPUTimes contains the amounts of time the CPU has spent performing different kinds of work. + CPUTimes times = 5; +} + +// CPUTimes contains the amounts of time the CPU has spent performing different +// kinds of work. Time units are in seconds. +message CPUTimes { + // CPU time of user. + double user = 1; + // CPU time of system. + double system = 2; + // CPU time of idle. + double idle = 3; + // CPU time of nice. + double nice = 4; + // CPU time of iowait. + double iowait = 5; + // CPU time of irq. + double irq = 6; + // CPU time of softirq. + double softirq = 7; + // CPU time of steal. + double steal = 8; + // CPU time of guest. + double guest = 9; + // CPU time of guest nice. + double guest_nice = 10; +} + +// Memory Stat. +message Memory { + // Total amount of RAM on this system. + uint64 total = 1; + // RAM available for programs to allocate. + uint64 available = 2; + // RAM used by programs. + uint64 used = 3; + // Percentage of RAM used by programs. + double used_percent = 4; + // Calculates the percentage of memory used by process. + double process_used_percent = 5; + // This is the kernel's notion of free memory. + uint64 free = 6; +} + +// Network Stat. +message Network { + // Return count of tcp connections opened and status is ESTABLISHED. + uint32 tcp_connection_count = 1; + // Return count of upload tcp connections opened and status is ESTABLISHED. + uint32 upload_tcp_connection_count = 2; + // Security domain for network. + string security_domain = 3; + // Location path(area|country|province|city|...). + string location = 4; + // IDC where the peer host is located + string idc = 5; + // Network topology(switch|router|...). + string net_topology = 6; +} + +// Disk Stat. +message Disk { + // Total amount of disk on the data path of dragonfly. + uint64 total = 1; + // Free amount of disk on the data path of dragonfly. + uint64 free = 2; + // Used amount of disk on the data path of dragonfly. + uint64 used = 3; + // Used percent of disk on the data path of dragonfly directory. + double used_percent = 4; + // Total amount of indoes on the data path of dragonfly directory. + uint64 inodes_total = 5; + // Used amount of indoes on the data path of dragonfly directory. + uint64 inodes_used = 6; + // Free amount of indoes on the data path of dragonfly directory. + uint64 inodes_free = 7; + // Used percent of indoes on the data path of dragonfly directory. + double inodes_used_percent = 8; +} + +// Build information. +message Build { + // Git version. + string git_version = 1; + // Git commit. + string git_commit = 2; + // Golang version. + string go_version = 3; + // Build platform. + string platform = 4; +} + +// LeaveHostRequest represents request of LeaveHost. +message LeaveHostRequest{ + // Host id. + string id = 1; +} + +// Scheduler RPC Service. +service Scheduler{ + // AnnouncePeer announces peer to scheduler. + rpc AnnouncePeer(stream AnnouncePeerRequest) returns(stream AnnouncePeerResponse); + + // Checks information of peer. + rpc StatPeer(StatPeerRequest)returns(common.Peer); + + // LeavePeer releases peer in scheduler. + rpc LeavePeer(LeavePeerRequest)returns(google.protobuf.Empty); + + // TODO exchange peer api definition. + // ExchangePeer exchanges peer information. + rpc ExchangePeer(ExchangePeerRequest)returns(ExchangePeerResponse); + + // Checks information of task. + rpc StatTask(StatTaskRequest)returns(common.Task); + + // AnnounceHost announces host to scheduler. + rpc AnnounceHost(AnnounceHostRequest)returns(google.protobuf.Empty); + + // LeaveHost releases host in scheduler. + rpc LeaveHost(LeaveHostRequest)returns(google.protobuf.Empty); +} diff --git a/proto/security.proto b/proto/security.proto new file mode 100644 index 0000000..cf03b56 --- /dev/null +++ b/proto/security.proto @@ -0,0 +1,51 @@ +/* + * Copyright 2022 The Dragonfly Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; + +package security; + +import "google/protobuf/duration.proto"; + +// Refer: https://github.com/istio/api/blob/master/security/v1alpha1/ca.proto +// Istio defines similar api for signing certificate, but it's not applicable in Dragonfly. + +// Certificate request type. +// Dragonfly supports peers authentication with Mutual TLS(mTLS) +// For mTLS, all peers need to request TLS certificates for communicating +// The server side may overwrite ant requested certificate filed based on its policies. +message CertificateRequest { + // ASN.1 DER form certificate request. + // The public key in the CSR is used to generate the certificate, + // and other fields in the generated certificate may be overwritten by the CA. + bytes csr = 1; + // Optional: requested certificate validity period. + google.protobuf.Duration validity_period = 2; +} + +// Certificate response type. +message CertificateResponse { + // ASN.1 DER form certificate chain. + repeated bytes certificate_chain = 1; +} + +// Service for managing certificates issued by the CA. +service CertificateService { + // Using provided CSR, returns a signed certificate. + rpc IssueCertificate(CertificateRequest) + returns (CertificateResponse) { + } +} diff --git a/src/common.rs b/src/common.rs new file mode 100644 index 0000000..bef35ca --- /dev/null +++ b/src/common.rs @@ -0,0 +1,362 @@ +/// Peer metadata. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Peer { + /// Peer id. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Pieces of peer. + #[prost(message, repeated, tag = "2")] + pub pieces: ::prost::alloc::vec::Vec, + /// Task info. + #[prost(message, optional, tag = "3")] + pub task: ::core::option::Option, + /// Host info. + #[prost(message, optional, tag = "4")] + pub host: ::core::option::Option, + /// Peer state. + #[prost(string, tag = "5")] + pub state: ::prost::alloc::string::String, + /// Peer create time. + #[prost(message, optional, tag = "6")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, + /// Peer update time. + #[prost(message, optional, tag = "7")] + pub updated_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// Task metadata. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Task { + /// Task id. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Host type. + #[prost(string, tag = "2")] + pub r#type: ::prost::alloc::string::String, + /// Task size scope. + #[prost(enumeration = "SizeScope", tag = "3")] + pub size_scope: i32, + /// Pieces of task. + #[prost(message, repeated, tag = "4")] + pub pieces: ::prost::alloc::vec::Vec, + /// Task state. + #[prost(string, tag = "5")] + pub state: ::prost::alloc::string::String, + /// Task metadata. + #[prost(message, optional, tag = "6")] + pub metadata: ::core::option::Option, + /// Task content length. + #[prost(int64, tag = "7")] + pub content_length: i64, + /// Task peer count. + #[prost(int32, tag = "8")] + pub peer_count: i32, + /// Task contains available peer. + #[prost(bool, tag = "9")] + pub has_available_peer: bool, + /// Task create time. + #[prost(message, optional, tag = "10")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, + /// Task update time. + #[prost(message, optional, tag = "11")] + pub updated_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// Host metadata. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Host { + /// Host id. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Host ipv4. + #[prost(string, tag = "2")] + pub ipv4: ::prost::alloc::string::String, + /// Host ipv6. + #[prost(string, tag = "3")] + pub ipv6: ::prost::alloc::string::String, + /// Peer hostname. + #[prost(string, tag = "4")] + pub hostname: ::prost::alloc::string::String, + /// Port of grpc service. + #[prost(int32, tag = "5")] + pub port: i32, + /// Port of download server. + #[prost(int32, tag = "6")] + pub download_port: i32, + /// Security domain for network. + #[prost(string, tag = "7")] + pub security_domain: ::prost::alloc::string::String, + /// Host location(area, country, province, city, etc.). + #[prost(string, repeated, tag = "8")] + pub location: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// IDC where the peer host is located. + #[prost(string, tag = "9")] + pub idc: ::prost::alloc::string::String, + /// Network topology(switch, router, etc.). + #[prost(string, repeated, tag = "10")] + pub net_topology: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Range represents download range. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Range { + /// Begin of range. + #[prost(uint64, tag = "1")] + pub begin: u64, + /// End of range. + #[prost(uint64, tag = "2")] + pub end: u64, +} +/// Metadata represents metadata of task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Metadata { + /// Download url. + #[prost(string, tag = "1")] + pub url: ::prost::alloc::string::String, + /// Digest of the pieces digest, for example md5:xxx or sha256:yyy. + #[prost(string, tag = "2")] + pub digest: ::prost::alloc::string::String, + /// Range is url range of request. + #[prost(message, optional, tag = "3")] + pub range: ::core::option::Option, + /// Task type. + #[prost(enumeration = "TaskType", tag = "4")] + pub r#type: i32, + /// URL tag identifies different task for same url. + #[prost(string, tag = "5")] + pub tag: ::prost::alloc::string::String, + /// Application of task. + #[prost(string, tag = "6")] + pub application: ::prost::alloc::string::String, + /// Peer priority. + #[prost(enumeration = "Priority", tag = "7")] + pub priority: i32, + /// Filter url used to generate task id. + #[prost(string, repeated, tag = "8")] + pub filters: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Task request headers. + #[prost(map = "string, string", tag = "9")] + pub header: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// Task piece size. + #[prost(int32, tag = "10")] + pub piece_size: i32, +} +/// Piece represents information of piece. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Piece { + /// Piece number. + #[prost(uint32, tag = "1")] + pub number: u32, + /// Parent peer id. + #[prost(string, tag = "2")] + pub parent_id: ::prost::alloc::string::String, + /// Piece offset. + #[prost(uint64, tag = "3")] + pub offset: u64, + /// Piece size. + #[prost(uint64, tag = "4")] + pub size: u64, + /// Digest of the piece data, for example md5:xxx or sha256:yyy. + #[prost(string, tag = "5")] + pub digest: ::prost::alloc::string::String, + /// Traffic type. + #[prost(enumeration = "TrafficType", tag = "6")] + pub traffic_type: i32, + /// Downloading piece costs time. + #[prost(message, optional, tag = "7")] + pub cost: ::core::option::Option<::prost_types::Duration>, + /// Piece create time. + #[prost(message, optional, tag = "8")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// ExtendAttribute represents extend of attribution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExtendAttribute { + /// Task response header, eg: HTTP Response Header + #[prost(map = "string, string", tag = "1")] + pub header: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// Task response code, eg: HTTP Status Code + #[prost(int32, tag = "2")] + pub status_code: i32, + /// Task response status, eg: HTTP Status + #[prost(string, tag = "3")] + pub status: ::prost::alloc::string::String, +} +/// SizeScope represents size scope of task. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SizeScope { + /// size > one piece size. + Normal = 0, + /// 128 byte < size <= one piece size and be plain type. + Small = 1, + /// size <= 128 byte and be plain type. + Tiny = 2, + /// size == 0 byte and be plain type. + Empty = 3, +} +impl SizeScope { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SizeScope::Normal => "NORMAL", + SizeScope::Small => "SMALL", + SizeScope::Tiny => "TINY", + SizeScope::Empty => "EMPTY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NORMAL" => Some(Self::Normal), + "SMALL" => Some(Self::Small), + "TINY" => Some(Self::Tiny), + "EMPTY" => Some(Self::Empty), + _ => None, + } + } +} +/// TaskType represents type of task. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum TaskType { + /// DFDAEMON is dfdeamon type of task, + /// dfdeamon task is a normal p2p task. + Dfdaemon = 0, + /// DFCACHE is dfcache type of task, + /// dfcache task is a cache task, and the task url is fake url. + /// It can only be used for caching and cannot be downloaded back to source. + Dfcache = 1, + /// DFSTORE is dfstore type of task, + /// dfstore task is a persistent task in backend. + Dfstore = 2, +} +impl TaskType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + TaskType::Dfdaemon => "DFDAEMON", + TaskType::Dfcache => "DFCACHE", + TaskType::Dfstore => "DFSTORE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DFDAEMON" => Some(Self::Dfdaemon), + "DFCACHE" => Some(Self::Dfcache), + "DFSTORE" => Some(Self::Dfstore), + _ => None, + } + } +} +/// TrafficType represents type of traffic. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum TrafficType { + /// BACK_TO_SOURCE is to download traffic from the source. + BackToSource = 0, + /// REMOTE_PEER is to download traffic from the remote peer. + RemotePeer = 1, + /// LOCAL_PEER is to download traffic from the local peer. + LocalPeer = 2, +} +impl TrafficType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + TrafficType::BackToSource => "BACK_TO_SOURCE", + TrafficType::RemotePeer => "REMOTE_PEER", + TrafficType::LocalPeer => "LOCAL_PEER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "BACK_TO_SOURCE" => Some(Self::BackToSource), + "REMOTE_PEER" => Some(Self::RemotePeer), + "LOCAL_PEER" => Some(Self::LocalPeer), + _ => None, + } + } +} +/// Priority represents priority of application. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum Priority { + /// LEVEL0 has no special meaning for scheduler. + Level0 = 0, + /// LEVEL1 represents the download task is forbidden, + /// and an error code is returned during the registration. + Level1 = 1, + /// LEVEL2 represents when the task is downloaded for the first time, + /// allow peers to download from the other peers, + /// but not back-to-source. When the task is not downloaded for + /// the first time, it is scheduled normally. + Level2 = 2, + /// LEVEL3 represents when the task is downloaded for the first time, + /// the normal peer is first to download back-to-source. + /// When the task is not downloaded for the first time, it is scheduled normally. + Level3 = 3, + /// LEVEL4 represents when the task is downloaded for the first time, + /// the weak peer is first triggered to back-to-source. + /// When the task is not downloaded for the first time, it is scheduled normally. + Level4 = 4, + /// LEVEL5 represents when the task is downloaded for the first time, + /// the strong peer is first triggered to back-to-source. + /// When the task is not downloaded for the first time, it is scheduled normally. + Level5 = 5, + /// LEVEL6 represents when the task is downloaded for the first time, + /// the super peer is first triggered to back-to-source. + /// When the task is not downloaded for the first time, it is scheduled normally. + Level6 = 6, +} +impl Priority { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Priority::Level0 => "LEVEL0", + Priority::Level1 => "LEVEL1", + Priority::Level2 => "LEVEL2", + Priority::Level3 => "LEVEL3", + Priority::Level4 => "LEVEL4", + Priority::Level5 => "LEVEL5", + Priority::Level6 => "LEVEL6", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "LEVEL0" => Some(Self::Level0), + "LEVEL1" => Some(Self::Level1), + "LEVEL2" => Some(Self::Level2), + "LEVEL3" => Some(Self::Level3), + "LEVEL4" => Some(Self::Level4), + "LEVEL5" => Some(Self::Level5), + "LEVEL6" => Some(Self::Level6), + _ => None, + } + } +} diff --git a/src/dfdaemon.rs b/src/dfdaemon.rs new file mode 100644 index 0000000..e9e0c22 --- /dev/null +++ b/src/dfdaemon.rs @@ -0,0 +1,717 @@ +/// InterestedAllPiecesRequest represents interested all pieces request of SyncPiecesRequest. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InterestedAllPiecesRequest {} +/// InterestedPiecesRequest represents interested pieces request of SyncPiecesRequest. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InterestedPiecesRequest { + /// Interested piece numbers. + #[prost(uint32, repeated, tag = "1")] + pub piece_numbers: ::prost::alloc::vec::Vec, +} +/// StatMetadata represents stat metadata request of SyncPiecesRequest. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StatMetadataRequest {} +/// SyncPiecesRequest represents request of AnnouncePeer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncPiecesRequest { + #[prost(oneof = "sync_pieces_request::Request", tags = "1, 2, 3")] + pub request: ::core::option::Option, +} +/// Nested message and enum types in `SyncPiecesRequest`. +pub mod sync_pieces_request { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Request { + #[prost(message, tag = "1")] + InterestedAllPiecesRequest(super::InterestedAllPiecesRequest), + #[prost(message, tag = "2")] + InterestedPiecesRequest(super::InterestedPiecesRequest), + #[prost(message, tag = "3")] + StatMetadataRequest(super::StatMetadataRequest), + } +} +/// InterestedPiecesResponse represents interested pieces response of SyncPiecesResponse. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InterestedPiecesResponse { + /// Interested pieces of task. + #[prost(message, repeated, tag = "1")] + pub pieces: ::prost::alloc::vec::Vec, +} +/// StatMetadata represents stat metadata request of SyncPiecesResponse. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StatMetadataResponse { + /// Task metadata. + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option, +} +/// SyncPiecesResponse represents response of SyncPieces. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncPiecesResponse { + #[prost(oneof = "sync_pieces_response::Response", tags = "1, 2")] + pub response: ::core::option::Option, + #[prost(oneof = "sync_pieces_response::Errordetails", tags = "3, 4")] + pub errordetails: ::core::option::Option, +} +/// Nested message and enum types in `SyncPiecesResponse`. +pub mod sync_pieces_response { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Response { + #[prost(message, tag = "1")] + InterestedPiecesResponse(super::InterestedPiecesResponse), + #[prost(message, tag = "2")] + StatMetadataResponse(super::StatMetadataResponse), + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Errordetails { + #[prost(message, tag = "3")] + SyncPiecesFailed(super::super::errordetails::SyncPiecesFailed), + #[prost(message, tag = "4")] + StatMetadataFailed(super::super::errordetails::StatMetadataFailed), + } +} +/// TriggerTaskRequest represents request of TriggerTask. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TriggerTaskRequest { + /// Task id. + #[prost(string, tag = "1")] + pub task_id: ::prost::alloc::string::String, + /// Task metadata. + #[prost(message, optional, tag = "2")] + pub metadata: ::core::option::Option, +} +/// StatTaskRequest represents request of StatTask. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StatTaskRequest { + /// Task id. + #[prost(string, tag = "1")] + pub task_id: ::prost::alloc::string::String, +} +/// StatTaskResponse represents response of StatTask. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StatTaskResponse { + #[prost(message, optional, tag = "1")] + pub task: ::core::option::Option, +} +/// ImportTaskRequest represents request of ImportTask. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ImportTaskRequest { + /// Task metadata. + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option, + /// File path to be imported. + #[prost(string, tag = "2")] + pub path: ::prost::alloc::string::String, +} +/// ExportTaskRequest represents request of ExportTask. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExportTaskRequest { + /// Task metadata. + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option, + /// File path to be exported. + #[prost(string, tag = "2")] + pub path: ::prost::alloc::string::String, + /// Download timeout. + #[prost(message, optional, tag = "3")] + pub timeout: ::core::option::Option<::prost_types::Duration>, + /// Download rate limit in bytes per second. + #[prost(double, tag = "4")] + pub download_rate_limit: f64, + /// User id. + #[prost(uint64, tag = "5")] + pub uid: u64, + /// Group id. + #[prost(uint64, tag = "6")] + pub gid: u64, +} +/// DeleteTaskRequest represents request of DeleteTask. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteTaskRequest { + /// Task id. + #[prost(string, tag = "1")] + pub task_id: ::prost::alloc::string::String, +} +/// Generated client implementations. +pub mod dfdaemon_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Dfdaemon RPC Service. + #[derive(Debug, Clone)] + pub struct DfdaemonClient { + inner: tonic::client::Grpc, + } + impl DfdaemonClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DfdaemonClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DfdaemonClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + DfdaemonClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// SyncPieces syncs pieces from the other peers. + pub async fn sync_pieces( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/dfdaemon.Dfdaemon/SyncPieces", + ); + self.inner.streaming(request.into_streaming_request(), path, codec).await + } + /// TriggerTask triggers task back-to-source download. + pub async fn trigger_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/dfdaemon.Dfdaemon/TriggerTask", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// StatTask stats task information. + pub async fn stat_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/dfdaemon.Dfdaemon/StatTask", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// ImportTask imports task to p2p network. + pub async fn import_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/dfdaemon.Dfdaemon/ImportTask", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// ExportTask exports task from p2p network. + pub async fn export_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/dfdaemon.Dfdaemon/ExportTask", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// DeleteTask deletes task from p2p network. + pub async fn delete_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/dfdaemon.Dfdaemon/DeleteTask", + ); + self.inner.unary(request.into_request(), path, codec).await + } + } +} +/// Generated server implementations. +pub mod dfdaemon_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with DfdaemonServer. + #[async_trait] + pub trait Dfdaemon: Send + Sync + 'static { + /// Server streaming response type for the SyncPieces method. + type SyncPiecesStream: futures_core::Stream< + Item = Result, + > + + Send + + 'static; + /// SyncPieces syncs pieces from the other peers. + async fn sync_pieces( + &self, + request: tonic::Request>, + ) -> Result, tonic::Status>; + /// TriggerTask triggers task back-to-source download. + async fn trigger_task( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// StatTask stats task information. + async fn stat_task( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// ImportTask imports task to p2p network. + async fn import_task( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// ExportTask exports task from p2p network. + async fn export_task( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// DeleteTask deletes task from p2p network. + async fn delete_task( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + /// Dfdaemon RPC Service. + #[derive(Debug)] + pub struct DfdaemonServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + } + struct _Inner(Arc); + impl DfdaemonServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + } + impl tonic::codegen::Service> for DfdaemonServer + where + T: Dfdaemon, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/dfdaemon.Dfdaemon/SyncPieces" => { + #[allow(non_camel_case_types)] + struct SyncPiecesSvc(pub Arc); + impl< + T: Dfdaemon, + > tonic::server::StreamingService + for SyncPiecesSvc { + type Response = super::SyncPiecesResponse; + type ResponseStream = T::SyncPiecesStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).sync_pieces(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SyncPiecesSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/dfdaemon.Dfdaemon/TriggerTask" => { + #[allow(non_camel_case_types)] + struct TriggerTaskSvc(pub Arc); + impl< + T: Dfdaemon, + > tonic::server::UnaryService + for TriggerTaskSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).trigger_task(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = TriggerTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/dfdaemon.Dfdaemon/StatTask" => { + #[allow(non_camel_case_types)] + struct StatTaskSvc(pub Arc); + impl tonic::server::UnaryService + for StatTaskSvc { + type Response = super::super::common::Task; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).stat_task(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = StatTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/dfdaemon.Dfdaemon/ImportTask" => { + #[allow(non_camel_case_types)] + struct ImportTaskSvc(pub Arc); + impl< + T: Dfdaemon, + > tonic::server::UnaryService + for ImportTaskSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).import_task(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ImportTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/dfdaemon.Dfdaemon/ExportTask" => { + #[allow(non_camel_case_types)] + struct ExportTaskSvc(pub Arc); + impl< + T: Dfdaemon, + > tonic::server::UnaryService + for ExportTaskSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).export_task(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ExportTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/dfdaemon.Dfdaemon/DeleteTask" => { + #[allow(non_camel_case_types)] + struct DeleteTaskSvc(pub Arc); + impl< + T: Dfdaemon, + > tonic::server::UnaryService + for DeleteTaskSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).delete_task(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DeleteTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for DfdaemonServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for DfdaemonServer { + const NAME: &'static str = "dfdaemon.Dfdaemon"; + } +} diff --git a/src/errordetails.rs b/src/errordetails.rs new file mode 100644 index 0000000..cbc98c6 --- /dev/null +++ b/src/errordetails.rs @@ -0,0 +1,83 @@ +/// DownloadPeerBackToSourceFailed is error detail of downloading peer back-to-source. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DownloadPeerBackToSourceFailed { + /// The description of the error. + #[prost(string, tag = "1")] + pub description: ::prost::alloc::string::String, +} +/// DownloadPieceBackToSourceFailed is error detail of downloading piece back-to-source. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DownloadPieceBackToSourceFailed { + /// Temporary recoverable error of source. + #[prost(bool, tag = "1")] + pub temporary: bool, + /// Source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header + #[prost(message, optional, tag = "2")] + pub metadata: ::core::option::Option, + /// The number of piece. + #[prost(uint32, tag = "3")] + pub piece_number: u32, + /// The description of the error. + #[prost(string, tag = "4")] + pub description: ::prost::alloc::string::String, +} +/// DownloadPieceFailed is error detail of downloading piece. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DownloadPieceFailed { + /// Temporary recoverable error of parent peer. + #[prost(bool, tag = "1")] + pub temporary: bool, + /// Source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header + #[prost(message, optional, tag = "2")] + pub metadata: ::core::option::Option, + /// Piece is information of piece. + #[prost(string, tag = "3")] + pub parent_id: ::prost::alloc::string::String, + /// The number of piece. + #[prost(uint32, tag = "4")] + pub piece_number: u32, + /// The description of the error. + #[prost(string, tag = "5")] + pub description: ::prost::alloc::string::String, +} +/// SchedulePeerForbidden is error detail of forbidden. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SchedulePeerForbidden { + /// The description of the error. + #[prost(string, tag = "1")] + pub description: ::prost::alloc::string::String, +} +/// SchedulePeerFailed is error detail of scheduling. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SchedulePeerFailed { + /// The description of the error. + #[prost(string, tag = "1")] + pub description: ::prost::alloc::string::String, +} +/// SyncPiecesFailed is error detail of syncing pieces. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncPiecesFailed { + /// Temporary recoverable error of parent peer. + #[prost(bool, tag = "1")] + pub temporary: bool, + /// Parent peer id. + #[prost(string, tag = "2")] + pub parent_id: ::prost::alloc::string::String, + /// The description of the error. + #[prost(string, tag = "3")] + pub description: ::prost::alloc::string::String, +} +/// StatMetadataFailed is error detail of stat metadata. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StatMetadataFailed { + /// The description of the error. + #[prost(string, tag = "1")] + pub description: ::prost::alloc::string::String, +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..a37e5ed --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,6 @@ +pub mod common; +pub mod dfdaemon; +pub mod errordetails; +pub mod manager; +pub mod scheduler; +pub mod security; diff --git a/src/manager.rs b/src/manager.rs new file mode 100644 index 0000000..b63bee5 --- /dev/null +++ b/src/manager.rs @@ -0,0 +1,2131 @@ +/// SecurityGroup represents security group of cluster. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SecurityGroup { + /// Group id. + #[prost(uint64, tag = "1")] + pub id: u64, + /// Group name. + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + /// Group biography. + #[prost(string, tag = "3")] + pub bio: ::prost::alloc::string::String, + /// Group domain. + #[prost(string, tag = "4")] + pub domain: ::prost::alloc::string::String, + /// Group proxy domain. + #[prost(string, tag = "5")] + pub proxy_domain: ::prost::alloc::string::String, +} +/// SeedPeerCluster represents cluster of seed peer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SeedPeerCluster { + /// Cluster id. + #[prost(uint64, tag = "1")] + pub id: u64, + /// Cluster name. + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + /// Cluster biography. + #[prost(string, tag = "3")] + pub bio: ::prost::alloc::string::String, + /// Cluster configuration. + #[prost(bytes = "vec", tag = "4")] + pub config: ::prost::alloc::vec::Vec, + /// Cluster scopes. + #[prost(bytes = "vec", tag = "5")] + pub scopes: ::prost::alloc::vec::Vec, + /// Security group to which the seed peer cluster belongs. + #[prost(message, optional, tag = "6")] + pub security_group: ::core::option::Option, +} +/// SeedPeer represents seed peer for network. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SeedPeer { + /// Seed peer id. + #[prost(uint64, tag = "1")] + pub id: u64, + /// Seed peer hostname. + #[prost(string, tag = "2")] + pub host_name: ::prost::alloc::string::String, + /// Seed peer type. + #[prost(string, tag = "3")] + pub r#type: ::prost::alloc::string::String, + /// Seed peer idc. + #[prost(string, tag = "5")] + pub idc: ::prost::alloc::string::String, + /// Seed peer network topology. + #[prost(string, tag = "6")] + pub net_topology: ::prost::alloc::string::String, + /// Seed peer location. + #[prost(string, tag = "7")] + pub location: ::prost::alloc::string::String, + /// Seed peer ip. + #[prost(string, tag = "8")] + pub ip: ::prost::alloc::string::String, + /// Seed peer grpc port. + #[prost(int32, tag = "9")] + pub port: i32, + /// Seed peer download port. + #[prost(int32, tag = "10")] + pub download_port: i32, + /// Seed peer state. + #[prost(string, tag = "11")] + pub state: ::prost::alloc::string::String, + /// ID of the cluster to which the seed peer belongs. + #[prost(uint64, tag = "12")] + pub seed_peer_cluster_id: u64, + /// Cluster to which the seed peer belongs. + #[prost(message, optional, tag = "13")] + pub seed_peer_cluster: ::core::option::Option, + /// Schedulers included in seed peer. + #[prost(message, repeated, tag = "14")] + pub schedulers: ::prost::alloc::vec::Vec, + /// Seed peer object storage port. + #[prost(int32, tag = "15")] + pub object_storage_port: i32, +} +/// GetSeedPeerRequest represents request of GetSeedPeer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetSeedPeerRequest { + /// Request source type. + #[prost(enumeration = "SourceType", tag = "1")] + pub source_type: i32, + /// Seed peer hostname. + #[prost(string, tag = "2")] + pub host_name: ::prost::alloc::string::String, + /// ID of the cluster to which the seed peer belongs. + #[prost(uint64, tag = "3")] + pub seed_peer_cluster_id: u64, + /// Seed peer ip. + #[prost(string, tag = "4")] + pub ip: ::prost::alloc::string::String, +} +/// UpdateSeedPeerRequest represents request of UpdateSeedPeer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateSeedPeerRequest { + /// Request source type. + #[prost(enumeration = "SourceType", tag = "1")] + pub source_type: i32, + /// Seed peer hostname. + #[prost(string, tag = "2")] + pub host_name: ::prost::alloc::string::String, + /// Seed peer type. + #[prost(string, tag = "3")] + pub r#type: ::prost::alloc::string::String, + /// Seed peer idc. + #[prost(string, tag = "4")] + pub idc: ::prost::alloc::string::String, + /// Seed peer network topology. + #[prost(string, tag = "5")] + pub net_topology: ::prost::alloc::string::String, + /// Seed peer location. + #[prost(string, tag = "6")] + pub location: ::prost::alloc::string::String, + /// Seed peer ip. + #[prost(string, tag = "7")] + pub ip: ::prost::alloc::string::String, + /// Seed peer port. + #[prost(int32, tag = "8")] + pub port: i32, + /// Seed peer download port. + #[prost(int32, tag = "9")] + pub download_port: i32, + /// ID of the cluster to which the seed peer belongs. + #[prost(uint64, tag = "10")] + pub seed_peer_cluster_id: u64, + /// Seed peer object storage port. + #[prost(int32, tag = "11")] + pub object_storage_port: i32, +} +/// SeedPeerCluster represents cluster of scheduler. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SchedulerCluster { + /// Cluster id. + #[prost(uint64, tag = "1")] + pub id: u64, + /// Cluster name. + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + /// Cluster biography. + #[prost(string, tag = "3")] + pub bio: ::prost::alloc::string::String, + /// Cluster config. + #[prost(bytes = "vec", tag = "4")] + pub config: ::prost::alloc::vec::Vec, + /// Cluster client config. + #[prost(bytes = "vec", tag = "5")] + pub client_config: ::prost::alloc::vec::Vec, + /// Cluster scopes. + #[prost(bytes = "vec", tag = "6")] + pub scopes: ::prost::alloc::vec::Vec, + /// Security group to which the scheduler cluster belongs. + #[prost(message, optional, tag = "7")] + pub security_group: ::core::option::Option, +} +/// SeedPeerCluster represents scheduler for network. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Scheduler { + /// Scheduler id. + #[prost(uint64, tag = "1")] + pub id: u64, + /// Scheduler hostname. + #[prost(string, tag = "2")] + pub host_name: ::prost::alloc::string::String, + /// Deprecated: Do not use. + #[prost(string, tag = "3")] + pub vips: ::prost::alloc::string::String, + /// Scheduler idc. + #[prost(string, tag = "4")] + pub idc: ::prost::alloc::string::String, + /// Scheduler location. + #[prost(string, tag = "5")] + pub location: ::prost::alloc::string::String, + /// Deprecated: Use net_topology instead. + #[prost(bytes = "vec", tag = "6")] + pub net_config: ::prost::alloc::vec::Vec, + /// Scheduler ip. + #[prost(string, tag = "7")] + pub ip: ::prost::alloc::string::String, + /// Scheduler grpc port. + #[prost(int32, tag = "8")] + pub port: i32, + /// Scheduler state. + #[prost(string, tag = "9")] + pub state: ::prost::alloc::string::String, + /// ID of the cluster to which the scheduler belongs. + #[prost(uint64, tag = "10")] + pub scheduler_cluster_id: u64, + /// Cluster to which the scheduler belongs. + #[prost(message, optional, tag = "11")] + pub scheduler_cluster: ::core::option::Option, + /// Seed peers to which the scheduler belongs. + #[prost(message, repeated, tag = "12")] + pub seed_peers: ::prost::alloc::vec::Vec, + /// Scheduler network topology. + #[prost(string, tag = "13")] + pub net_topology: ::prost::alloc::string::String, +} +/// GetSchedulerRequest represents request of GetScheduler. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetSchedulerRequest { + /// Request source type. + #[prost(enumeration = "SourceType", tag = "1")] + pub source_type: i32, + /// Scheduler hostname. + #[prost(string, tag = "2")] + pub host_name: ::prost::alloc::string::String, + /// ID of the cluster to which the scheduler belongs. + #[prost(uint64, tag = "3")] + pub scheduler_cluster_id: u64, + /// Scheduler ip. + #[prost(string, tag = "4")] + pub ip: ::prost::alloc::string::String, +} +/// UpdateSchedulerRequest represents request of UpdateScheduler. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateSchedulerRequest { + /// Request source type. + #[prost(enumeration = "SourceType", tag = "1")] + pub source_type: i32, + /// Scheduler hostname. + #[prost(string, tag = "2")] + pub host_name: ::prost::alloc::string::String, + /// ID of the cluster to which the scheduler belongs. + #[prost(uint64, tag = "3")] + pub scheduler_cluster_id: u64, + /// Deprecated: Do not use. + #[prost(string, tag = "4")] + pub vips: ::prost::alloc::string::String, + /// Scheduler idc. + #[prost(string, tag = "5")] + pub idc: ::prost::alloc::string::String, + /// Scheduler location. + #[prost(string, tag = "6")] + pub location: ::prost::alloc::string::String, + /// Deprecated: Use net_topology instead. + #[prost(bytes = "vec", tag = "7")] + pub net_config: ::prost::alloc::vec::Vec, + /// Scheduler ip. + #[prost(string, tag = "8")] + pub ip: ::prost::alloc::string::String, + /// Scheduler port. + #[prost(int32, tag = "9")] + pub port: i32, + /// Scheduler network topology. + #[prost(string, tag = "10")] + pub net_topology: ::prost::alloc::string::String, +} +/// ListSchedulersRequest represents request of ListSchedulers. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListSchedulersRequest { + /// Request source type. + #[prost(enumeration = "SourceType", tag = "1")] + pub source_type: i32, + /// Source service hostname. + #[prost(string, tag = "2")] + pub host_name: ::prost::alloc::string::String, + /// Source service ip. + #[prost(string, tag = "3")] + pub ip: ::prost::alloc::string::String, + /// Source service host information. + #[prost(map = "string, string", tag = "4")] + pub host_info: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// Dfdaemon version. + #[prost(string, tag = "5")] + pub version: ::prost::alloc::string::String, + /// Dfdaemon commit. + #[prost(string, tag = "6")] + pub commit: ::prost::alloc::string::String, +} +/// ListSchedulersResponse represents response of ListSchedulers. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListSchedulersResponse { + /// Schedulers to which the source service belongs. + #[prost(message, repeated, tag = "1")] + pub schedulers: ::prost::alloc::vec::Vec, +} +/// ObjectStorage represents config of object storage. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ObjectStorage { + /// Object storage name of type. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// Storage region. + #[prost(string, tag = "2")] + pub region: ::prost::alloc::string::String, + /// Datacenter endpoint. + #[prost(string, tag = "3")] + pub endpoint: ::prost::alloc::string::String, + /// Access key id. + #[prost(string, tag = "4")] + pub access_key: ::prost::alloc::string::String, + /// Access key secret. + #[prost(string, tag = "5")] + pub secret_key: ::prost::alloc::string::String, +} +/// GetObjectStorageRequest represents request of GetObjectStorage. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetObjectStorageRequest { + /// Request source type. + #[prost(enumeration = "SourceType", tag = "1")] + pub source_type: i32, + /// Source service hostname. + #[prost(string, tag = "2")] + pub host_name: ::prost::alloc::string::String, + /// Source service ip. + #[prost(string, tag = "3")] + pub ip: ::prost::alloc::string::String, +} +/// Bucket represents config of bucket. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Bucket { + /// Bucket name. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// ListSchedulersRequest represents request of ListBuckets. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListBucketsRequest { + /// Request source type. + #[prost(enumeration = "SourceType", tag = "1")] + pub source_type: i32, + /// Source service hostname. + #[prost(string, tag = "2")] + pub host_name: ::prost::alloc::string::String, + /// Source service ip. + #[prost(string, tag = "3")] + pub ip: ::prost::alloc::string::String, +} +/// ListBucketsResponse represents response of ListBuckets. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListBucketsResponse { + /// Bucket configs. + #[prost(message, repeated, tag = "1")] + pub buckets: ::prost::alloc::vec::Vec, +} +/// Model represents information of model. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Model { + /// Model id. + #[prost(string, tag = "1")] + pub model_id: ::prost::alloc::string::String, + /// Model name. + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + /// Model version id. + #[prost(string, tag = "3")] + pub version_id: ::prost::alloc::string::String, + /// Scheduler id. + #[prost(uint64, tag = "4")] + pub scheduler_id: u64, + /// Scheduler hostname. + #[prost(string, tag = "5")] + pub host_name: ::prost::alloc::string::String, + /// Scheduler ip. + #[prost(string, tag = "6")] + pub ip: ::prost::alloc::string::String, + /// Model create time. + #[prost(message, optional, tag = "7")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, + /// Model update time. + #[prost(message, optional, tag = "8")] + pub updated_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// ListModelsRequest represents request of ListModels. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListModelsRequest { + /// Scheduler id. + #[prost(uint64, tag = "1")] + pub scheduler_id: u64, +} +/// ListModelsResponse represents response of ListModels. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListModelsResponse { + /// Model informations. + #[prost(message, repeated, tag = "1")] + pub models: ::prost::alloc::vec::Vec, +} +/// GetModelRequest represents request of GetModel. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetModelRequest { + /// Scheduler id. + #[prost(uint64, tag = "1")] + pub scheduler_id: u64, + /// Model id. + #[prost(string, tag = "2")] + pub model_id: ::prost::alloc::string::String, +} +/// CreateModelRequest represents request of CreateModel. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateModelRequest { + /// Model id. + #[prost(string, tag = "1")] + pub model_id: ::prost::alloc::string::String, + /// Model name. + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + /// Model version id. + #[prost(string, tag = "3")] + pub version_id: ::prost::alloc::string::String, + /// Scheduler id. + #[prost(uint64, tag = "4")] + pub scheduler_id: u64, + /// Scheduler hostname. + #[prost(string, tag = "5")] + pub host_name: ::prost::alloc::string::String, + /// Scheduler ip. + #[prost(string, tag = "6")] + pub ip: ::prost::alloc::string::String, +} +/// UpdateModelRequest represents request of UpdateModel. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateModelRequest { + /// Model id. + #[prost(string, tag = "1")] + pub model_id: ::prost::alloc::string::String, + /// Model name. + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + /// Model version id. + #[prost(string, tag = "3")] + pub version_id: ::prost::alloc::string::String, + /// Scheduler id. + #[prost(uint64, tag = "4")] + pub scheduler_id: u64, + /// Scheduler hostname. + #[prost(string, tag = "5")] + pub host_name: ::prost::alloc::string::String, + /// Scheduler ip. + #[prost(string, tag = "6")] + pub ip: ::prost::alloc::string::String, +} +/// DeleteModelRequest represents request of DeleteModel. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteModelRequest { + /// Scheduler id. + #[prost(uint64, tag = "1")] + pub scheduler_id: u64, + /// Model id. + #[prost(string, tag = "2")] + pub model_id: ::prost::alloc::string::String, +} +/// ModelVersion represents information of model version. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ModelVersion { + /// Model version id. + #[prost(string, tag = "1")] + pub version_id: ::prost::alloc::string::String, + /// Model version data. + #[prost(bytes = "vec", tag = "2")] + pub data: ::prost::alloc::vec::Vec, + /// Model version mae. + #[prost(double, tag = "3")] + pub mae: f64, + /// Model version mse. + #[prost(double, tag = "4")] + pub mse: f64, + /// Model version rmse. + #[prost(double, tag = "5")] + pub rmse: f64, + /// Model version r^2. + #[prost(double, tag = "6")] + pub r2: f64, + /// Model create time. + #[prost(message, optional, tag = "7")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, + /// Model update time. + #[prost(message, optional, tag = "8")] + pub updated_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// ListModelVersionsRequest represents request of ListModelVersions. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListModelVersionsRequest { + /// Scheduler id. + #[prost(uint64, tag = "1")] + pub scheduler_id: u64, + /// Model id. + #[prost(string, tag = "2")] + pub model_id: ::prost::alloc::string::String, +} +/// ListModelVersionsResponse represents response of ListModelVersions. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListModelVersionsResponse { + /// Model version informations. + #[prost(message, repeated, tag = "1")] + pub model_versions: ::prost::alloc::vec::Vec, +} +/// GetModelVersionRequest represents request of GetModelVersion. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetModelVersionRequest { + /// Scheduler id. + #[prost(uint64, tag = "1")] + pub scheduler_id: u64, + /// Model id. + #[prost(string, tag = "2")] + pub model_id: ::prost::alloc::string::String, + /// Model version id. + #[prost(string, tag = "3")] + pub version_id: ::prost::alloc::string::String, +} +/// CreateModelVersionRequest represents request of CreateModelVersion. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateModelVersionRequest { + /// Scheduler id. + #[prost(uint64, tag = "1")] + pub scheduler_id: u64, + /// Model id. + #[prost(string, tag = "2")] + pub model_id: ::prost::alloc::string::String, + /// Model version data. + #[prost(bytes = "vec", tag = "3")] + pub data: ::prost::alloc::vec::Vec, + /// Model version mae. + #[prost(double, tag = "4")] + pub mae: f64, + /// Model version mse. + #[prost(double, tag = "5")] + pub mse: f64, + /// Model version rmse. + #[prost(double, tag = "6")] + pub rmse: f64, + /// Model version r^2. + #[prost(double, tag = "7")] + pub r2: f64, +} +/// UpdateModelVersionRequest represents request of UpdateModelVersion. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateModelVersionRequest { + /// Model version id. + #[prost(string, tag = "1")] + pub version_id: ::prost::alloc::string::String, + /// Scheduler id. + #[prost(uint64, tag = "2")] + pub scheduler_id: u64, + /// Model id. + #[prost(string, tag = "3")] + pub model_id: ::prost::alloc::string::String, + /// Model version data. + #[prost(bytes = "vec", tag = "4")] + pub data: ::prost::alloc::vec::Vec, + /// Model version mae. + #[prost(double, tag = "5")] + pub mae: f64, + /// Model version mse. + #[prost(double, tag = "6")] + pub mse: f64, + /// Model version rmse. + #[prost(double, tag = "7")] + pub rmse: f64, + /// Model version r^2. + #[prost(double, tag = "8")] + pub r2: f64, +} +/// DeleteModelVersionRequest represents request of DeleteModelVersion. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteModelVersionRequest { + /// Scheduler id. + #[prost(uint64, tag = "1")] + pub scheduler_id: u64, + /// Model id. + #[prost(string, tag = "2")] + pub model_id: ::prost::alloc::string::String, + /// Model version id. + #[prost(string, tag = "3")] + pub version_id: ::prost::alloc::string::String, +} +/// URLPriority represents config of url priority. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UrlPriority { + /// URL regex. + #[prost(string, tag = "1")] + pub regex: ::prost::alloc::string::String, + /// URL priority value. + #[prost(enumeration = "super::common::Priority", tag = "2")] + pub value: i32, +} +/// ApplicationPriority represents config of application priority. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ApplicationPriority { + /// Priority value. + #[prost(enumeration = "super::common::Priority", tag = "1")] + pub value: i32, + /// URL priority. + #[prost(message, repeated, tag = "2")] + pub urls: ::prost::alloc::vec::Vec, +} +/// Application represents config of application. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Application { + /// Application id. + #[prost(uint64, tag = "1")] + pub id: u64, + /// Application name. + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + /// Application url. + #[prost(string, tag = "3")] + pub url: ::prost::alloc::string::String, + /// Application biography. + #[prost(string, tag = "4")] + pub bio: ::prost::alloc::string::String, + /// Application priority. + #[prost(message, optional, tag = "5")] + pub priority: ::core::option::Option, +} +/// ListApplicationsRequest represents request of ListApplications. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListApplicationsRequest { + /// Request source type. + #[prost(enumeration = "SourceType", tag = "1")] + pub source_type: i32, + /// Source service hostname. + #[prost(string, tag = "2")] + pub host_name: ::prost::alloc::string::String, + /// Source service ip. + #[prost(string, tag = "3")] + pub ip: ::prost::alloc::string::String, +} +/// ListApplicationsResponse represents response of ListApplications. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListApplicationsResponse { + /// Application configs. + #[prost(message, repeated, tag = "1")] + pub applications: ::prost::alloc::vec::Vec, +} +/// KeepAliveRequest represents request of KeepAlive. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeepAliveRequest { + /// Request source type. + #[prost(enumeration = "SourceType", tag = "1")] + pub source_type: i32, + /// Source service hostname. + #[prost(string, tag = "2")] + pub host_name: ::prost::alloc::string::String, + /// ID of the cluster to which the source service belongs. + #[prost(uint64, tag = "3")] + pub cluster_id: u64, + /// Source service ip. + #[prost(string, tag = "4")] + pub ip: ::prost::alloc::string::String, +} +/// Request source type. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SourceType { + /// Scheduler service. + SchedulerSource = 0, + /// Peer service. + PeerSource = 1, + /// SeedPeer service. + SeedPeerSource = 2, +} +impl SourceType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SourceType::SchedulerSource => "SCHEDULER_SOURCE", + SourceType::PeerSource => "PEER_SOURCE", + SourceType::SeedPeerSource => "SEED_PEER_SOURCE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SCHEDULER_SOURCE" => Some(Self::SchedulerSource), + "PEER_SOURCE" => Some(Self::PeerSource), + "SEED_PEER_SOURCE" => Some(Self::SeedPeerSource), + _ => None, + } + } +} +/// Generated client implementations. +pub mod manager_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Manager RPC Service. + #[derive(Debug, Clone)] + pub struct ManagerClient { + inner: tonic::client::Grpc, + } + impl ManagerClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ManagerClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ManagerClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + ManagerClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Get SeedPeer and SeedPeer cluster configuration. + pub async fn get_seed_peer( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/GetSeedPeer", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Update SeedPeer configuration. + pub async fn update_seed_peer( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/UpdateSeedPeer", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Get Scheduler and Scheduler cluster configuration. + pub async fn get_scheduler( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/GetScheduler", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Update scheduler configuration. + pub async fn update_scheduler( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/UpdateScheduler", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// List acitve schedulers configuration. + pub async fn list_schedulers( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/ListSchedulers", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Get ObjectStorage configuration. + pub async fn get_object_storage( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/GetObjectStorage", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// List buckets configuration. + pub async fn list_buckets( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/ListBuckets", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// List models information. + pub async fn list_models( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/ListModels", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Get model information. + pub async fn get_model( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/manager.Manager/GetModel"); + self.inner.unary(request.into_request(), path, codec).await + } + /// Create model information. + pub async fn create_model( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/CreateModel", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Update model information. + pub async fn update_model( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/UpdateModel", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Delete model information. + pub async fn delete_model( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/DeleteModel", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// List model versions information. + pub async fn list_model_versions( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/ListModelVersions", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Get model version information. + pub async fn get_model_version( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/GetModelVersion", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Create model version information. + pub async fn create_model_version( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/CreateModelVersion", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Update model version information. + pub async fn update_model_version( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/UpdateModelVersion", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Delete model version information. + pub async fn delete_model_version( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/DeleteModelVersion", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// List applications configuration. + pub async fn list_applications( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/ListApplications", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// KeepAlive with manager. + pub async fn keep_alive( + &mut self, + request: impl tonic::IntoStreamingRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/manager.Manager/KeepAlive", + ); + self.inner + .client_streaming(request.into_streaming_request(), path, codec) + .await + } + } +} +/// Generated server implementations. +pub mod manager_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with ManagerServer. + #[async_trait] + pub trait Manager: Send + Sync + 'static { + /// Get SeedPeer and SeedPeer cluster configuration. + async fn get_seed_peer( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Update SeedPeer configuration. + async fn update_seed_peer( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Get Scheduler and Scheduler cluster configuration. + async fn get_scheduler( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Update scheduler configuration. + async fn update_scheduler( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// List acitve schedulers configuration. + async fn list_schedulers( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Get ObjectStorage configuration. + async fn get_object_storage( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// List buckets configuration. + async fn list_buckets( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// List models information. + async fn list_models( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Get model information. + async fn get_model( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Create model information. + async fn create_model( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Update model information. + async fn update_model( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Delete model information. + async fn delete_model( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// List model versions information. + async fn list_model_versions( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Get model version information. + async fn get_model_version( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Create model version information. + async fn create_model_version( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Update model version information. + async fn update_model_version( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Delete model version information. + async fn delete_model_version( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// List applications configuration. + async fn list_applications( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// KeepAlive with manager. + async fn keep_alive( + &self, + request: tonic::Request>, + ) -> Result, tonic::Status>; + } + /// Manager RPC Service. + #[derive(Debug)] + pub struct ManagerServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + } + struct _Inner(Arc); + impl ManagerServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + } + impl tonic::codegen::Service> for ManagerServer + where + T: Manager, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/manager.Manager/GetSeedPeer" => { + #[allow(non_camel_case_types)] + struct GetSeedPeerSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for GetSeedPeerSvc { + type Response = super::SeedPeer; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).get_seed_peer(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetSeedPeerSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/UpdateSeedPeer" => { + #[allow(non_camel_case_types)] + struct UpdateSeedPeerSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for UpdateSeedPeerSvc { + type Response = super::SeedPeer; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).update_seed_peer(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateSeedPeerSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/GetScheduler" => { + #[allow(non_camel_case_types)] + struct GetSchedulerSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for GetSchedulerSvc { + type Response = super::Scheduler; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).get_scheduler(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetSchedulerSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/UpdateScheduler" => { + #[allow(non_camel_case_types)] + struct UpdateSchedulerSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for UpdateSchedulerSvc { + type Response = super::Scheduler; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).update_scheduler(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateSchedulerSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/ListSchedulers" => { + #[allow(non_camel_case_types)] + struct ListSchedulersSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for ListSchedulersSvc { + type Response = super::ListSchedulersResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).list_schedulers(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListSchedulersSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/GetObjectStorage" => { + #[allow(non_camel_case_types)] + struct GetObjectStorageSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for GetObjectStorageSvc { + type Response = super::ObjectStorage; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).get_object_storage(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetObjectStorageSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/ListBuckets" => { + #[allow(non_camel_case_types)] + struct ListBucketsSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for ListBucketsSvc { + type Response = super::ListBucketsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).list_buckets(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListBucketsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/ListModels" => { + #[allow(non_camel_case_types)] + struct ListModelsSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for ListModelsSvc { + type Response = super::ListModelsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).list_models(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListModelsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/GetModel" => { + #[allow(non_camel_case_types)] + struct GetModelSvc(pub Arc); + impl tonic::server::UnaryService + for GetModelSvc { + type Response = super::Model; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).get_model(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetModelSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/CreateModel" => { + #[allow(non_camel_case_types)] + struct CreateModelSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for CreateModelSvc { + type Response = super::Model; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).create_model(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateModelSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/UpdateModel" => { + #[allow(non_camel_case_types)] + struct UpdateModelSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for UpdateModelSvc { + type Response = super::Model; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).update_model(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateModelSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/DeleteModel" => { + #[allow(non_camel_case_types)] + struct DeleteModelSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for DeleteModelSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).delete_model(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DeleteModelSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/ListModelVersions" => { + #[allow(non_camel_case_types)] + struct ListModelVersionsSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for ListModelVersionsSvc { + type Response = super::ListModelVersionsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).list_model_versions(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListModelVersionsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/GetModelVersion" => { + #[allow(non_camel_case_types)] + struct GetModelVersionSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for GetModelVersionSvc { + type Response = super::ModelVersion; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).get_model_version(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetModelVersionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/CreateModelVersion" => { + #[allow(non_camel_case_types)] + struct CreateModelVersionSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for CreateModelVersionSvc { + type Response = super::ModelVersion; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).create_model_version(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateModelVersionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/UpdateModelVersion" => { + #[allow(non_camel_case_types)] + struct UpdateModelVersionSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for UpdateModelVersionSvc { + type Response = super::ModelVersion; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).update_model_version(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = UpdateModelVersionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/DeleteModelVersion" => { + #[allow(non_camel_case_types)] + struct DeleteModelVersionSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for DeleteModelVersionSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).delete_model_version(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DeleteModelVersionSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/ListApplications" => { + #[allow(non_camel_case_types)] + struct ListApplicationsSvc(pub Arc); + impl< + T: Manager, + > tonic::server::UnaryService + for ListApplicationsSvc { + type Response = super::ListApplicationsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).list_applications(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListApplicationsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/manager.Manager/KeepAlive" => { + #[allow(non_camel_case_types)] + struct KeepAliveSvc(pub Arc); + impl< + T: Manager, + > tonic::server::ClientStreamingService + for KeepAliveSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).keep_alive(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = KeepAliveSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.client_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for ManagerServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for ManagerServer { + const NAME: &'static str = "manager.Manager"; + } +} diff --git a/src/scheduler.rs b/src/scheduler.rs new file mode 100644 index 0000000..f764610 --- /dev/null +++ b/src/scheduler.rs @@ -0,0 +1,1077 @@ +/// RegisterPeerRequest represents peer registered request of AnnouncePeerRequest. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RegisterPeerRequest { + /// Task id. + #[prost(string, tag = "1")] + pub task_id: ::prost::alloc::string::String, + /// Peer id. + #[prost(string, tag = "2")] + pub peer_id: ::prost::alloc::string::String, + /// Task metadata. + #[prost(message, optional, tag = "3")] + pub metadata: ::core::option::Option, +} +/// DownloadPeerStartedRequest represents peer download started request of AnnouncePeerRequest. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DownloadPeerStartedRequest {} +/// DownloadPeerBackToSourceStartedRequest represents peer download back-to-source started request of AnnouncePeerRequest. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DownloadPeerBackToSourceStartedRequest { + /// Download back-to-source reason. + #[prost(string, tag = "1")] + pub reason: ::prost::alloc::string::String, +} +/// DownloadPeerFinishedRequest represents peer download finished request of AnnouncePeerRequest. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DownloadPeerFinishedRequest { + /// Total content length. + #[prost(int64, tag = "1")] + pub content_length: i64, + /// Total piece count. + #[prost(int64, tag = "2")] + pub piece_count: i64, +} +/// DownloadPeerBackToSourceFinishedRequest represents peer download back-to-source finished request of AnnouncePeerRequest. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DownloadPeerBackToSourceFinishedRequest { + /// Total content length. + #[prost(int64, tag = "1")] + pub content_length: i64, + /// Total piece count. + #[prost(int64, tag = "2")] + pub piece_count: i64, +} +/// DownloadPieceFinishedRequest represents piece download finished request of AnnouncePeerRequest. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DownloadPieceFinishedRequest { + /// Piece info. + #[prost(message, optional, tag = "1")] + pub piece: ::core::option::Option, +} +/// DownloadPieceBackToSourceFinishedRequest represents piece download back-to-source finished request of AnnouncePeerRequest. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DownloadPieceBackToSourceFinishedRequest { + /// Piece info. + #[prost(message, optional, tag = "1")] + pub piece: ::core::option::Option, +} +/// AnnouncePeerRequest represents request of AnnouncePeer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AnnouncePeerRequest { + #[prost(oneof = "announce_peer_request::Request", tags = "1, 2, 3, 4, 5, 6, 7")] + pub request: ::core::option::Option, + #[prost(oneof = "announce_peer_request::Errordetails", tags = "8, 9, 10, 11")] + pub errordetails: ::core::option::Option, +} +/// Nested message and enum types in `AnnouncePeerRequest`. +pub mod announce_peer_request { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Request { + #[prost(message, tag = "1")] + RegisterPeerRequest(super::RegisterPeerRequest), + #[prost(message, tag = "2")] + DownloadPeerStartedRequest(super::DownloadPeerStartedRequest), + #[prost(message, tag = "3")] + DownloadPeerBackToSourceStartedRequest( + super::DownloadPeerBackToSourceStartedRequest, + ), + #[prost(message, tag = "4")] + DownloadPeerFinishedRequest(super::DownloadPeerFinishedRequest), + #[prost(message, tag = "5")] + DownloadPeerBackToSourceFinishedRequest( + super::DownloadPeerBackToSourceFinishedRequest, + ), + #[prost(message, tag = "6")] + DownloadPieceFinishedRequest(super::DownloadPieceFinishedRequest), + #[prost(message, tag = "7")] + DownloadPieceBackToSourceFinishedRequest( + super::DownloadPieceBackToSourceFinishedRequest, + ), + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Errordetails { + #[prost(message, tag = "8")] + DownloadPeerBackToSourceFailed( + super::super::errordetails::DownloadPeerBackToSourceFailed, + ), + #[prost(message, tag = "9")] + DownloadPieceBackToSourceFailed( + super::super::errordetails::DownloadPieceBackToSourceFailed, + ), + #[prost(message, tag = "10")] + SyncPiecesFailed(super::super::errordetails::SyncPiecesFailed), + #[prost(message, tag = "11")] + DownloadPieceFailed(super::super::errordetails::DownloadPieceFailed), + } +} +/// TinyTaskResponse represents tiny task response of AnnouncePeerResponse. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TinyTaskResponse { + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, +} +/// SmallTaskResponse represents small task response of AnnouncePeerResponse. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SmallTaskResponse { + /// Piece info. + #[prost(message, optional, tag = "1")] + pub piece: ::core::option::Option, +} +/// NormalTaskResponse represents normal task response of AnnouncePeerResponse. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NormalTaskResponse { + /// Candidate parents. + #[prost(message, repeated, tag = "1")] + pub candidate_parents: ::prost::alloc::vec::Vec, + /// Concurrent downloading count from main peer. + #[prost(int32, tag = "2")] + pub parallel_count: i32, +} +/// NeedBackToSourceResponse represents need back-to-source response of AnnouncePeerResponse. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NeedBackToSourceResponse { + /// Download back-to-source reason. + #[prost(string, tag = "1")] + pub reason: ::prost::alloc::string::String, +} +/// AnnouncePeerResponse represents response of AnnouncePeer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AnnouncePeerResponse { + #[prost(oneof = "announce_peer_response::Response", tags = "1, 2, 3, 4")] + pub response: ::core::option::Option, + #[prost(oneof = "announce_peer_response::Errordetails", tags = "5, 6")] + pub errordetails: ::core::option::Option, +} +/// Nested message and enum types in `AnnouncePeerResponse`. +pub mod announce_peer_response { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Response { + #[prost(message, tag = "1")] + TinyTaskResponse(super::TinyTaskResponse), + #[prost(message, tag = "2")] + SmallTaskResponse(super::SmallTaskResponse), + #[prost(message, tag = "3")] + NormalTaskResponse(super::NormalTaskResponse), + #[prost(message, tag = "4")] + NeedBackToSourceResponse(super::NeedBackToSourceResponse), + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Errordetails { + #[prost(message, tag = "5")] + SchedulePeerForbidden(super::super::errordetails::SchedulePeerForbidden), + #[prost(message, tag = "6")] + SchedulePeerFailed(super::super::errordetails::SchedulePeerFailed), + } +} +/// StatPeerRequest represents request of StatPeer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StatPeerRequest { + /// Task id. + #[prost(string, tag = "1")] + pub task_id: ::prost::alloc::string::String, + /// Peer id. + #[prost(string, tag = "2")] + pub peer_id: ::prost::alloc::string::String, +} +/// TODO exchange peer request definition. +/// ExchangePeerRequest represents request of ExchangePeer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExchangePeerRequest { + /// Task id. + #[prost(string, tag = "1")] + pub task_id: ::prost::alloc::string::String, + /// Peer id. + #[prost(string, tag = "2")] + pub peer_id: ::prost::alloc::string::String, +} +/// TODO exchange peer response definition. +/// ExchangePeerResponse represents response of ExchangePeer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExchangePeerResponse {} +/// LeavePeerRequest represents request of LeavePeer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LeavePeerRequest { + /// Peer id. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, +} +/// StatTaskRequest represents request of StatTask. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StatTaskRequest { + /// Task id. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, +} +/// AnnounceHostRequest represents request of AnnounceHost. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AnnounceHostRequest { + /// Host id. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Host type. + #[prost(uint32, tag = "2")] + pub r#type: u32, + /// Hostname. + #[prost(string, tag = "3")] + pub hostname: ::prost::alloc::string::String, + /// Host ip. + #[prost(string, tag = "4")] + pub ip: ::prost::alloc::string::String, + /// Port of grpc service. + #[prost(int32, tag = "5")] + pub port: i32, + /// Port of download server. + #[prost(int32, tag = "6")] + pub download_port: i32, + /// Host OS. + #[prost(string, tag = "7")] + pub os: ::prost::alloc::string::String, + /// Host platform. + #[prost(string, tag = "8")] + pub platform: ::prost::alloc::string::String, + /// Host platform family. + #[prost(string, tag = "9")] + pub platform_family: ::prost::alloc::string::String, + /// Host platform version. + #[prost(string, tag = "10")] + pub platform_version: ::prost::alloc::string::String, + /// Host kernel version. + #[prost(string, tag = "11")] + pub kernel_version: ::prost::alloc::string::String, + /// CPU Stat. + #[prost(message, optional, tag = "12")] + pub cpu: ::core::option::Option, + /// Memory Stat. + #[prost(message, optional, tag = "13")] + pub memory: ::core::option::Option, + /// Network Stat. + #[prost(message, optional, tag = "14")] + pub network: ::core::option::Option, + /// Disk Stat. + #[prost(message, optional, tag = "15")] + pub disk: ::core::option::Option, + /// Build information. + #[prost(message, optional, tag = "16")] + pub build: ::core::option::Option, +} +/// CPU Stat. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Cpu { + /// Number of logical cores in the system. + #[prost(uint32, tag = "1")] + pub logical_count: u32, + /// Number of physical cores in the system + #[prost(uint32, tag = "2")] + pub physical_count: u32, + /// Percent calculates the percentage of cpu used. + #[prost(double, tag = "3")] + pub percent: f64, + /// Calculates the percentage of cpu used by process. + #[prost(double, tag = "4")] + pub process_percent: f64, + /// CPUTimes contains the amounts of time the CPU has spent performing different kinds of work. + #[prost(message, optional, tag = "5")] + pub times: ::core::option::Option, +} +/// CPUTimes contains the amounts of time the CPU has spent performing different +/// kinds of work. Time units are in seconds. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CpuTimes { + /// CPU time of user. + #[prost(double, tag = "1")] + pub user: f64, + /// CPU time of system. + #[prost(double, tag = "2")] + pub system: f64, + /// CPU time of idle. + #[prost(double, tag = "3")] + pub idle: f64, + /// CPU time of nice. + #[prost(double, tag = "4")] + pub nice: f64, + /// CPU time of iowait. + #[prost(double, tag = "5")] + pub iowait: f64, + /// CPU time of irq. + #[prost(double, tag = "6")] + pub irq: f64, + /// CPU time of softirq. + #[prost(double, tag = "7")] + pub softirq: f64, + /// CPU time of steal. + #[prost(double, tag = "8")] + pub steal: f64, + /// CPU time of guest. + #[prost(double, tag = "9")] + pub guest: f64, + /// CPU time of guest nice. + #[prost(double, tag = "10")] + pub guest_nice: f64, +} +/// Memory Stat. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Memory { + /// Total amount of RAM on this system. + #[prost(uint64, tag = "1")] + pub total: u64, + /// RAM available for programs to allocate. + #[prost(uint64, tag = "2")] + pub available: u64, + /// RAM used by programs. + #[prost(uint64, tag = "3")] + pub used: u64, + /// Percentage of RAM used by programs. + #[prost(double, tag = "4")] + pub used_percent: f64, + /// Calculates the percentage of memory used by process. + #[prost(double, tag = "5")] + pub process_used_percent: f64, + /// This is the kernel's notion of free memory. + #[prost(uint64, tag = "6")] + pub free: u64, +} +/// Network Stat. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Network { + /// Return count of tcp connections opened and status is ESTABLISHED. + #[prost(uint32, tag = "1")] + pub tcp_connection_count: u32, + /// Return count of upload tcp connections opened and status is ESTABLISHED. + #[prost(uint32, tag = "2")] + pub upload_tcp_connection_count: u32, + /// Security domain for network. + #[prost(string, tag = "3")] + pub security_domain: ::prost::alloc::string::String, + /// Location path(area|country|province|city|...). + #[prost(string, tag = "4")] + pub location: ::prost::alloc::string::String, + /// IDC where the peer host is located + #[prost(string, tag = "5")] + pub idc: ::prost::alloc::string::String, + /// Network topology(switch|router|...). + #[prost(string, tag = "6")] + pub net_topology: ::prost::alloc::string::String, +} +/// Disk Stat. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Disk { + /// Total amount of disk on the data path of dragonfly. + #[prost(uint64, tag = "1")] + pub total: u64, + /// Free amount of disk on the data path of dragonfly. + #[prost(uint64, tag = "2")] + pub free: u64, + /// Used amount of disk on the data path of dragonfly. + #[prost(uint64, tag = "3")] + pub used: u64, + /// Used percent of disk on the data path of dragonfly directory. + #[prost(double, tag = "4")] + pub used_percent: f64, + /// Total amount of indoes on the data path of dragonfly directory. + #[prost(uint64, tag = "5")] + pub inodes_total: u64, + /// Used amount of indoes on the data path of dragonfly directory. + #[prost(uint64, tag = "6")] + pub inodes_used: u64, + /// Free amount of indoes on the data path of dragonfly directory. + #[prost(uint64, tag = "7")] + pub inodes_free: u64, + /// Used percent of indoes on the data path of dragonfly directory. + #[prost(double, tag = "8")] + pub inodes_used_percent: f64, +} +/// Build information. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Build { + /// Git version. + #[prost(string, tag = "1")] + pub git_version: ::prost::alloc::string::String, + /// Git commit. + #[prost(string, tag = "2")] + pub git_commit: ::prost::alloc::string::String, + /// Golang version. + #[prost(string, tag = "3")] + pub go_version: ::prost::alloc::string::String, + /// Build platform. + #[prost(string, tag = "4")] + pub platform: ::prost::alloc::string::String, +} +/// LeaveHostRequest represents request of LeaveHost. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LeaveHostRequest { + /// Host id. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, +} +/// Generated client implementations. +pub mod scheduler_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Scheduler RPC Service. + #[derive(Debug, Clone)] + pub struct SchedulerClient { + inner: tonic::client::Grpc, + } + impl SchedulerClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl SchedulerClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> SchedulerClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + SchedulerClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// AnnouncePeer announces peer to scheduler. + pub async fn announce_peer( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::AnnouncePeerRequest, + >, + ) -> Result< + tonic::Response>, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/scheduler.Scheduler/AnnouncePeer", + ); + self.inner.streaming(request.into_streaming_request(), path, codec).await + } + /// Checks information of peer. + pub async fn stat_peer( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/scheduler.Scheduler/StatPeer", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// LeavePeer releases peer in scheduler. + pub async fn leave_peer( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/scheduler.Scheduler/LeavePeer", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// TODO exchange peer api definition. + /// ExchangePeer exchanges peer information. + pub async fn exchange_peer( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/scheduler.Scheduler/ExchangePeer", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// Checks information of task. + pub async fn stat_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/scheduler.Scheduler/StatTask", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// AnnounceHost announces host to scheduler. + pub async fn announce_host( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/scheduler.Scheduler/AnnounceHost", + ); + self.inner.unary(request.into_request(), path, codec).await + } + /// LeaveHost releases host in scheduler. + pub async fn leave_host( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/scheduler.Scheduler/LeaveHost", + ); + self.inner.unary(request.into_request(), path, codec).await + } + } +} +/// Generated server implementations. +pub mod scheduler_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with SchedulerServer. + #[async_trait] + pub trait Scheduler: Send + Sync + 'static { + /// Server streaming response type for the AnnouncePeer method. + type AnnouncePeerStream: futures_core::Stream< + Item = Result, + > + + Send + + 'static; + /// AnnouncePeer announces peer to scheduler. + async fn announce_peer( + &self, + request: tonic::Request>, + ) -> Result, tonic::Status>; + /// Checks information of peer. + async fn stat_peer( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// LeavePeer releases peer in scheduler. + async fn leave_peer( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// TODO exchange peer api definition. + /// ExchangePeer exchanges peer information. + async fn exchange_peer( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// Checks information of task. + async fn stat_task( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// AnnounceHost announces host to scheduler. + async fn announce_host( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + /// LeaveHost releases host in scheduler. + async fn leave_host( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + /// Scheduler RPC Service. + #[derive(Debug)] + pub struct SchedulerServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + } + struct _Inner(Arc); + impl SchedulerServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + } + impl tonic::codegen::Service> for SchedulerServer + where + T: Scheduler, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/scheduler.Scheduler/AnnouncePeer" => { + #[allow(non_camel_case_types)] + struct AnnouncePeerSvc(pub Arc); + impl< + T: Scheduler, + > tonic::server::StreamingService + for AnnouncePeerSvc { + type Response = super::AnnouncePeerResponse; + type ResponseStream = T::AnnouncePeerStream; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + tonic::Streaming, + >, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).announce_peer(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = AnnouncePeerSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/scheduler.Scheduler/StatPeer" => { + #[allow(non_camel_case_types)] + struct StatPeerSvc(pub Arc); + impl< + T: Scheduler, + > tonic::server::UnaryService + for StatPeerSvc { + type Response = super::super::common::Peer; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).stat_peer(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = StatPeerSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/scheduler.Scheduler/LeavePeer" => { + #[allow(non_camel_case_types)] + struct LeavePeerSvc(pub Arc); + impl< + T: Scheduler, + > tonic::server::UnaryService + for LeavePeerSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).leave_peer(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = LeavePeerSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/scheduler.Scheduler/ExchangePeer" => { + #[allow(non_camel_case_types)] + struct ExchangePeerSvc(pub Arc); + impl< + T: Scheduler, + > tonic::server::UnaryService + for ExchangePeerSvc { + type Response = super::ExchangePeerResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).exchange_peer(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ExchangePeerSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/scheduler.Scheduler/StatTask" => { + #[allow(non_camel_case_types)] + struct StatTaskSvc(pub Arc); + impl< + T: Scheduler, + > tonic::server::UnaryService + for StatTaskSvc { + type Response = super::super::common::Task; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).stat_task(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = StatTaskSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/scheduler.Scheduler/AnnounceHost" => { + #[allow(non_camel_case_types)] + struct AnnounceHostSvc(pub Arc); + impl< + T: Scheduler, + > tonic::server::UnaryService + for AnnounceHostSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).announce_host(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = AnnounceHostSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/scheduler.Scheduler/LeaveHost" => { + #[allow(non_camel_case_types)] + struct LeaveHostSvc(pub Arc); + impl< + T: Scheduler, + > tonic::server::UnaryService + for LeaveHostSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { (*inner).leave_host(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = LeaveHostSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for SchedulerServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for SchedulerServer { + const NAME: &'static str = "scheduler.Scheduler"; + } +} diff --git a/src/security.rs b/src/security.rs new file mode 100644 index 0000000..f3fe013 --- /dev/null +++ b/src/security.rs @@ -0,0 +1,269 @@ +/// Certificate request type. +/// Dragonfly supports peers authentication with Mutual TLS(mTLS) +/// For mTLS, all peers need to request TLS certificates for communicating +/// The server side may overwrite ant requested certificate filed based on its policies. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CertificateRequest { + /// ASN.1 DER form certificate request. + /// The public key in the CSR is used to generate the certificate, + /// and other fields in the generated certificate may be overwritten by the CA. + #[prost(bytes = "vec", tag = "1")] + pub csr: ::prost::alloc::vec::Vec, + /// Optional: requested certificate validity period. + #[prost(message, optional, tag = "2")] + pub validity_period: ::core::option::Option<::prost_types::Duration>, +} +/// Certificate response type. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CertificateResponse { + /// ASN.1 DER form certificate chain. + #[prost(bytes = "vec", repeated, tag = "1")] + pub certificate_chain: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +/// Generated client implementations. +pub mod certificate_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Service for managing certificates issued by the CA. + #[derive(Debug, Clone)] + pub struct CertificateServiceClient { + inner: tonic::client::Grpc, + } + impl CertificateServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl CertificateServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> CertificateServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + CertificateServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Using provided CSR, returns a signed certificate. + pub async fn issue_certificate( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/security.CertificateService/IssueCertificate", + ); + self.inner.unary(request.into_request(), path, codec).await + } + } +} +/// Generated server implementations. +pub mod certificate_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with CertificateServiceServer. + #[async_trait] + pub trait CertificateService: Send + Sync + 'static { + /// Using provided CSR, returns a signed certificate. + async fn issue_certificate( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + /// Service for managing certificates issued by the CA. + #[derive(Debug)] + pub struct CertificateServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + } + struct _Inner(Arc); + impl CertificateServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + } + impl tonic::codegen::Service> for CertificateServiceServer + where + T: CertificateService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/security.CertificateService/IssueCertificate" => { + #[allow(non_camel_case_types)] + struct IssueCertificateSvc(pub Arc); + impl< + T: CertificateService, + > tonic::server::UnaryService + for IssueCertificateSvc { + type Response = super::CertificateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { + (*inner).issue_certificate(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = IssueCertificateSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for CertificateServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService + for CertificateServiceServer { + const NAME: &'static str = "security.CertificateService"; + } +}