feat: init rust grpc protoc (#51)

Signed-off-by: Gaius <gaius.qi@gmail.com>
This commit is contained in:
Gaius 2023-01-04 15:35:17 +08:00 committed by GitHub
parent 58fdcc6ff5
commit 7b8158b461
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 6240 additions and 83 deletions

View File

@ -6,12 +6,9 @@ on:
pull_request:
branches: [ main ]
env:
GO_VERSION: 1.19
jobs:
lint:
name: Lint
golang-lint:
name: Golang Lint
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
@ -23,6 +20,43 @@ jobs:
with:
version: v1.46.2
rust-lint:
name: Rust Lint
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Install protobuf-compiler
run: sudo apt-get install protobuf-compiler
- name: Install toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Run cargo check
uses: actions-rs/cargo@v1
with:
command: check
- name: Cargo clippy
uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings
markdown-lint:
name: Markdown Lint
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Markdown lint
uses: docker://avtodev/markdown-lint:v1
with:

14
.gitignore vendored
View File

@ -62,3 +62,17 @@ Temporary Items
.apdisk
artifacts
# Generated by Cargo
# will have compiled files and executables
/target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# Added by cargo
/target

19
Cargo.toml Normal file
View File

@ -0,0 +1,19 @@
[package]
name = "api"
version = "0.1.0"
authors = ["Gaius <gaius.qi@gmail.com>"]
edition = "2021"
license = "Apache-2.0"
homepage = "https://d7y.io"
repository = "https://github.com/dragonflyoss/api"
readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tonic = "0.8.3"
prost = "0.11"
prost-types = "0.11"
tokio = { version = "1.8.2", features = ["rt-multi-thread", "macros"] }
[build-dependencies]
tonic-build = "0.8.3"

View File

@ -32,10 +32,21 @@ generate: protoc
.PHONY: generate
# Generate grpc protos
protoc:
@./hack/protoc.sh
protoc: go-protoc rust-protoc
.PHONY: protoc
# Generate grpc protos of golang
go-protoc:
@echo "Begin to generate grpc protos of golang."
@./hack/protoc.sh
.PHONY: go-protoc
# Generate grpc protos of rust
rust-protoc:
@echo "Begin to generate grpc protos of rust."
@cargo build --release
.PHONY: rust-protoc
# Clear compiled files
clean:
@go clean
@ -47,4 +58,6 @@ help:
@echo "make markdownlint run markdown lint"
@echo "make generate run go generate"
@echo "make protoc generate grpc protos"
@echo "make go-protoc generate grpc protos of golang"
@echo "make rust-protoc generate grpc protos of rust"
@echo "make clean clean"

18
build.rs Normal file
View File

@ -0,0 +1,18 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::configure()
.build_client(true)
.build_server(true)
.out_dir("src")
.compile(
&[
"proto/common.proto",
"proto/security.proto",
"proto/errordetails.proto",
"proto/dfdaemon.proto",
"proto/manager.proto",
"proto/scheduler.proto",
],
&["proto/"],
)?;
Ok(())
}

View File

@ -22,14 +22,13 @@
package v1
import (
reflect "reflect"
sync "sync"
v1 "d7y.io/api/pkg/apis/common/v1"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
)
const (

View File

@ -284,27 +284,27 @@ type SeedPeer struct {
// Seed peer type.
Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"`
// Seed peer idc.
Idc string `protobuf:"bytes,5,opt,name=idc,proto3" json:"idc,omitempty"`
Idc string `protobuf:"bytes,4,opt,name=idc,proto3" json:"idc,omitempty"`
// Seed peer network topology.
NetTopology string `protobuf:"bytes,6,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"`
NetTopology string `protobuf:"bytes,5,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"`
// Seed peer location.
Location string `protobuf:"bytes,7,opt,name=location,proto3" json:"location,omitempty"`
Location string `protobuf:"bytes,6,opt,name=location,proto3" json:"location,omitempty"`
// Seed peer ip.
Ip string `protobuf:"bytes,8,opt,name=ip,proto3" json:"ip,omitempty"`
Ip string `protobuf:"bytes,7,opt,name=ip,proto3" json:"ip,omitempty"`
// Seed peer grpc port.
Port int32 `protobuf:"varint,9,opt,name=port,proto3" json:"port,omitempty"`
Port int32 `protobuf:"varint,8,opt,name=port,proto3" json:"port,omitempty"`
// Seed peer download port.
DownloadPort int32 `protobuf:"varint,10,opt,name=download_port,json=downloadPort,proto3" json:"download_port,omitempty"`
DownloadPort int32 `protobuf:"varint,9,opt,name=download_port,json=downloadPort,proto3" json:"download_port,omitempty"`
// Seed peer state.
State string `protobuf:"bytes,11,opt,name=state,proto3" json:"state,omitempty"`
State string `protobuf:"bytes,10,opt,name=state,proto3" json:"state,omitempty"`
// ID of the cluster to which the seed peer belongs.
SeedPeerClusterId uint64 `protobuf:"varint,12,opt,name=seed_peer_cluster_id,json=seedPeerClusterId,proto3" json:"seed_peer_cluster_id,omitempty"`
SeedPeerClusterId uint64 `protobuf:"varint,11,opt,name=seed_peer_cluster_id,json=seedPeerClusterId,proto3" json:"seed_peer_cluster_id,omitempty"`
// Cluster to which the seed peer belongs.
SeedPeerCluster *SeedPeerCluster `protobuf:"bytes,13,opt,name=seed_peer_cluster,json=seedPeerCluster,proto3" json:"seed_peer_cluster,omitempty"`
SeedPeerCluster *SeedPeerCluster `protobuf:"bytes,12,opt,name=seed_peer_cluster,json=seedPeerCluster,proto3" json:"seed_peer_cluster,omitempty"`
// Schedulers included in seed peer.
Schedulers []*Scheduler `protobuf:"bytes,14,rep,name=schedulers,proto3" json:"schedulers,omitempty"`
Schedulers []*Scheduler `protobuf:"bytes,13,rep,name=schedulers,proto3" json:"schedulers,omitempty"`
// Seed peer object storage port.
ObjectStoragePort int32 `protobuf:"varint,15,opt,name=object_storage_port,json=objectStoragePort,proto3" json:"object_storage_port,omitempty"`
ObjectStoragePort int32 `protobuf:"varint,14,opt,name=object_storage_port,json=objectStoragePort,proto3" json:"object_storage_port,omitempty"`
}
func (x *SeedPeer) Reset() {
@ -526,21 +526,21 @@ type UpdateSeedPeerRequest struct {
// Seed peer type.
Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"`
// Seed peer idc.
Idc string `protobuf:"bytes,5,opt,name=idc,proto3" json:"idc,omitempty"`
Idc string `protobuf:"bytes,4,opt,name=idc,proto3" json:"idc,omitempty"`
// Seed peer network topology.
NetTopology string `protobuf:"bytes,6,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"`
NetTopology string `protobuf:"bytes,5,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"`
// Seed peer location.
Location string `protobuf:"bytes,7,opt,name=location,proto3" json:"location,omitempty"`
Location string `protobuf:"bytes,6,opt,name=location,proto3" json:"location,omitempty"`
// Seed peer ip.
Ip string `protobuf:"bytes,8,opt,name=ip,proto3" json:"ip,omitempty"`
Ip string `protobuf:"bytes,7,opt,name=ip,proto3" json:"ip,omitempty"`
// Seed peer port.
Port int32 `protobuf:"varint,9,opt,name=port,proto3" json:"port,omitempty"`
Port int32 `protobuf:"varint,8,opt,name=port,proto3" json:"port,omitempty"`
// Seed peer download port.
DownloadPort int32 `protobuf:"varint,10,opt,name=download_port,json=downloadPort,proto3" json:"download_port,omitempty"`
DownloadPort int32 `protobuf:"varint,9,opt,name=download_port,json=downloadPort,proto3" json:"download_port,omitempty"`
// ID of the cluster to which the seed peer belongs.
SeedPeerClusterId uint64 `protobuf:"varint,11,opt,name=seed_peer_cluster_id,json=seedPeerClusterId,proto3" json:"seed_peer_cluster_id,omitempty"`
SeedPeerClusterId uint64 `protobuf:"varint,10,opt,name=seed_peer_cluster_id,json=seedPeerClusterId,proto3" json:"seed_peer_cluster_id,omitempty"`
// Seed peer object storage port.
ObjectStoragePort int32 `protobuf:"varint,12,opt,name=object_storage_port,json=objectStoragePort,proto3" json:"object_storage_port,omitempty"`
ObjectStoragePort int32 `protobuf:"varint,11,opt,name=object_storage_port,json=objectStoragePort,proto3" json:"object_storage_port,omitempty"`
}
func (x *UpdateSeedPeerRequest) Reset() {
@ -784,9 +784,9 @@ type Scheduler struct {
// Cluster to which the scheduler belongs.
SchedulerCluster *SchedulerCluster `protobuf:"bytes,11,opt,name=scheduler_cluster,json=schedulerCluster,proto3" json:"scheduler_cluster,omitempty"`
// Seed peers to which the scheduler belongs.
SeedPeers []*SeedPeer `protobuf:"bytes,13,rep,name=seed_peers,json=seedPeers,proto3" json:"seed_peers,omitempty"`
SeedPeers []*SeedPeer `protobuf:"bytes,12,rep,name=seed_peers,json=seedPeers,proto3" json:"seed_peers,omitempty"`
// Scheduler network topology.
NetTopology string `protobuf:"bytes,14,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"`
NetTopology string `protobuf:"bytes,13,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"`
}
func (x *Scheduler) Reset() {
@ -1131,11 +1131,11 @@ type ListSchedulersRequest struct {
// Source service ip.
Ip string `protobuf:"bytes,3,opt,name=ip,proto3" json:"ip,omitempty"`
// Source service host information.
HostInfo map[string]string `protobuf:"bytes,5,rep,name=host_info,json=hostInfo,proto3" json:"host_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
HostInfo map[string]string `protobuf:"bytes,4,rep,name=host_info,json=hostInfo,proto3" json:"host_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Dfdaemon version.
Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
// Dfdaemon commit.
Commit string `protobuf:"bytes,7,opt,name=commit,proto3" json:"commit,omitempty"`
Commit string `protobuf:"bytes,6,opt,name=commit,proto3" json:"commit,omitempty"`
}
func (x *ListSchedulersRequest) Reset() {
@ -3091,30 +3091,30 @@ var file_pkg_apis_manager_v2_manager_proto_rawDesc = []byte{
0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79,
0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10,
0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63,
0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63,
0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79,
0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c,
0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c,
0x6f, 0x67, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12,
0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70,
0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12,
0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70,
0x6f, 0x72, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
0x70, 0x6f, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e,
0x70, 0x6f, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e,
0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74,
0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f,
0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f,
0x0a, 0x14, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73,
0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x65,
0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x65,
0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12,
0x44, 0x0a, 0x11, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75,
0x73, 0x74, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e,
0x73, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e,
0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6c, 0x75,
0x73, 0x74, 0x65, 0x72, 0x52, 0x0f, 0x73, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6c,
0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61,
0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61,
0x67, 0x65, 0x72, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x0a, 0x73,
0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6f, 0x62, 0x6a,
0x65, 0x63, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74,
0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74,
0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74,
0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xd0, 0x01, 0x0a, 0x12, 0x47, 0x65,
0x74, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
@ -3140,26 +3140,26 @@ var file_pkg_apis_manager_v2_manager_proto_rawDesc = []byte{
0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1a, 0xfa, 0x42, 0x17, 0x72,
0x15, 0x52, 0x05, 0x73, 0x75, 0x70, 0x65, 0x72, 0x52, 0x06, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67,
0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x03,
0x69, 0x64, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08,
0x69, 0x64, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08,
0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x30, 0x0a,
0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x18, 0x06, 0x20,
0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x18, 0x05, 0x20,
0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0,
0x01, 0x01, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x12,
0x27, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28,
0x27, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28,
0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x08,
0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x08,
0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x07,
0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69,
0x70, 0x12, 0x20, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x42,
0x70, 0x12, 0x20, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x42,
0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x04, 0x70,
0x6f, 0x72, 0x74, 0x12, 0x31, 0x0a, 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
0x70, 0x6f, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a,
0x70, 0x6f, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a,
0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f,
0x61, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x38, 0x0a, 0x14, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70,
0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0b,
0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0a,
0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x01, 0x52, 0x11, 0x73,
0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64,
0x12, 0x3e, 0x0a, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61,
0x67, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0e, 0xfa,
0x67, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0e, 0xfa,
0x42, 0x0b, 0x1a, 0x09, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x40, 0x01, 0x52, 0x11, 0x6f,
0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x72, 0x74,
0x22, 0xdc, 0x01, 0x0a, 0x10, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6c,
@ -3197,11 +3197,11 @@ var file_pkg_apis_manager_v2_manager_proto_rawDesc = []byte{
0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x63, 0x68,
0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x10, 0x73,
0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12,
0x30, 0x0a, 0x0a, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20,
0x30, 0x0a, 0x0a, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65,
0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x09, 0x73, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72,
0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67,
0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f,
0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f,
0x6c, 0x6f, 0x67, 0x79, 0x22, 0xd2, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65,
0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0b,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
@ -3254,14 +3254,14 @@ var file_pkg_apis_manager_v2_manager_proto_rawDesc = []byte{
0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x02, 0x69,
0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01,
0x52, 0x02, 0x69, 0x70, 0x12, 0x53, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x6e, 0x66,
0x6f, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x9a, 0x01, 0x02, 0x30, 0x01, 0x52,
0x08, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x27, 0x0a, 0x07, 0x76, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72,
0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01,
0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01,
0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01,
0x01, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x1a, 0x3b, 0x0a, 0x0d, 0x48, 0x6f, 0x73,
0x74, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,

View File

@ -74,27 +74,27 @@ message SeedPeer {
// Seed peer type.
string type = 3;
// Seed peer idc.
string idc = 5;
string idc = 4;
// Seed peer network topology.
string net_topology = 6;
string net_topology = 5;
// Seed peer location.
string location = 7;
string location = 6;
// Seed peer ip.
string ip = 8;
string ip = 7;
// Seed peer grpc port.
int32 port = 9;
int32 port = 8;
// Seed peer download port.
int32 download_port = 10;
int32 download_port = 9;
// Seed peer state.
string state = 11;
string state = 10;
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 12;
uint64 seed_peer_cluster_id = 11;
// Cluster to which the seed peer belongs.
SeedPeerCluster seed_peer_cluster = 13;
SeedPeerCluster seed_peer_cluster = 12;
// Schedulers included in seed peer.
repeated Scheduler schedulers = 14;
repeated Scheduler schedulers = 13;
// Seed peer object storage port.
int32 object_storage_port = 15;
int32 object_storage_port = 14;
}
// GetSeedPeerRequest represents request of GetSeedPeer.
@ -118,21 +118,21 @@ message UpdateSeedPeerRequest {
// Seed peer type.
string type = 3 [(validate.rules).string = {in: ["super", "strong", "weak"]}];
// Seed peer idc.
string idc = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
string idc = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Seed peer network topology.
string net_topology = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
string net_topology = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Seed peer location.
string location = 7 [(validate.rules).string = {max_len: 1024, ignore_empty: true}];
string location = 6 [(validate.rules).string = {max_len: 1024, ignore_empty: true}];
// Seed peer ip.
string ip = 8 [(validate.rules).string = {ip: true}];
string ip = 7 [(validate.rules).string = {ip: true}];
// Seed peer port.
int32 port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
int32 port = 8 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Seed peer download port.
int32 download_port = 10 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
int32 download_port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 11 [(validate.rules).uint64 = {gte: 1}];
uint64 seed_peer_cluster_id = 10 [(validate.rules).uint64 = {gte: 1}];
// Seed peer object storage port.
int32 object_storage_port = 12 [(validate.rules).int32 = {gte: 1024, lt: 65535, ignore_empty: true}];
int32 object_storage_port = 11 [(validate.rules).int32 = {gte: 1024, lt: 65535, ignore_empty: true}];
}
// SeedPeerCluster represents cluster of scheduler.
@ -178,9 +178,9 @@ message Scheduler {
// Cluster to which the scheduler belongs.
SchedulerCluster scheduler_cluster = 11;
// Seed peers to which the scheduler belongs.
repeated SeedPeer seed_peers = 13;
repeated SeedPeer seed_peers = 12;
// Scheduler network topology.
string net_topology = 14;
string net_topology = 13;
}
// GetSchedulerRequest represents request of GetScheduler.
@ -228,11 +228,11 @@ message ListSchedulersRequest {
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
// Source service host information.
map<string, string> host_info = 5 [(validate.rules).map.ignore_empty = true];
map<string, string> host_info = 4 [(validate.rules).map.ignore_empty = true];
// Dfdaemon version.
string version = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
string version = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Dfdaemon commit.
string commit = 7 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
string commit = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
}
// ListSchedulersResponse represents response of ListSchedulers.

231
proto/common.proto Normal file
View File

@ -0,0 +1,231 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package common;
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
// SizeScope represents size scope of task.
enum SizeScope {
// size > one piece size.
NORMAL = 0;
// 128 byte < size <= one piece size and be plain type.
SMALL = 1;
// size <= 128 byte and be plain type.
TINY = 2;
// size == 0 byte and be plain type.
EMPTY = 3;
}
// TaskType represents type of task.
enum TaskType {
// DFDAEMON is dfdeamon type of task,
// dfdeamon task is a normal p2p task.
DFDAEMON = 0;
// DFCACHE is dfcache type of task,
// dfcache task is a cache task, and the task url is fake url.
// It can only be used for caching and cannot be downloaded back to source.
DFCACHE = 1;
// DFSTORE is dfstore type of task,
// dfstore task is a persistent task in backend.
DFSTORE = 2;
}
// TrafficType represents type of traffic.
enum TrafficType {
// BACK_TO_SOURCE is to download traffic from the source.
BACK_TO_SOURCE = 0;
// REMOTE_PEER is to download traffic from the remote peer.
REMOTE_PEER = 1;
// LOCAL_PEER is to download traffic from the local peer.
LOCAL_PEER = 2;
}
// Priority represents priority of application.
enum Priority {
// LEVEL0 has no special meaning for scheduler.
LEVEL0 = 0;
// LEVEL1 represents the download task is forbidden,
// and an error code is returned during the registration.
LEVEL1 = 1;
// LEVEL2 represents when the task is downloaded for the first time,
// allow peers to download from the other peers,
// but not back-to-source. When the task is not downloaded for
// the first time, it is scheduled normally.
LEVEL2 = 2;
// LEVEL3 represents when the task is downloaded for the first time,
// the normal peer is first to download back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL3 = 3;
// LEVEL4 represents when the task is downloaded for the first time,
// the weak peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL4 = 4;
// LEVEL5 represents when the task is downloaded for the first time,
// the strong peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL5 = 5;
// LEVEL6 represents when the task is downloaded for the first time,
// the super peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL6 = 6;
}
// Peer metadata.
message Peer {
// Peer id.
string id = 1;
// Pieces of peer.
repeated Piece pieces = 2;
// Task info.
Task task = 3;
// Host info.
Host host = 4;
// Peer state.
string state = 5;
// Peer create time.
google.protobuf.Timestamp created_at = 6;
// Peer update time.
google.protobuf.Timestamp updated_at = 7;
}
// Task metadata.
message Task {
// Task id.
string id = 1;
// Host type.
string type = 2;
// Task size scope.
SizeScope size_scope = 3;
// Pieces of task.
repeated Piece pieces = 4;
// Task state.
string state = 5;
// Task metadata.
Metadata metadata = 6;
// Task content length.
int64 content_length = 7;
// Task peer count.
int32 peer_count = 8;
// Task contains available peer.
bool hasAvailablePeer = 9;
// Task create time.
google.protobuf.Timestamp created_at = 10;
// Task update time.
google.protobuf.Timestamp updated_at = 11;
}
// Host metadata.
message Host {
// Host id.
string id = 1;
// Host ipv4.
string ipv4 = 2;
// Host ipv6.
string ipv6 = 3;
// Peer hostname.
string hostname = 4;
// Port of grpc service.
int32 port = 5;
// Port of download server.
int32 download_port = 6;
// Security domain for network.
string security_domain = 7;
// Host location(area, country, province, city, etc.).
repeated string location = 8;
// IDC where the peer host is located.
string idc = 9;
// Network topology(switch, router, etc.).
repeated string net_topology = 10;
}
// Range represents download range.
message Range {
// Begin of range.
uint64 begin = 1;
// End of range.
uint64 end = 2;
}
// Metadata represents metadata of task.
message Metadata {
// Download url.
string url = 1;
// Digest of the pieces digest, for example md5:xxx or sha256:yyy.
string digest = 2;
// Range is url range of request.
Range range = 3;
// Task type.
common.TaskType type = 4;
// URL tag identifies different task for same url.
string tag = 5;
// Application of task.
string application = 6;
// Peer priority.
Priority priority = 7;
// Filter url used to generate task id.
repeated string filters = 8;
// Task request headers.
map<string, string> header = 9;
// Task piece size.
int32 piece_size = 10;
}
// Piece represents information of piece.
message Piece {
// Piece number.
uint32 number = 1;
// Parent peer id.
string parent_id = 2;
// Piece offset.
uint64 offset = 3;
// Piece size.
uint64 size = 4;
// Digest of the piece data, for example md5:xxx or sha256:yyy.
string digest = 5;
// Traffic type.
TrafficType traffic_type = 6;
// Downloading piece costs time.
google.protobuf.Duration cost = 7;
// Piece create time.
google.protobuf.Timestamp created_at = 8;
}
// ExtendAttribute represents extend of attribution.
message ExtendAttribute {
// Task response header, eg: HTTP Response Header
map<string, string> header = 1;
// Task response code, eg: HTTP Status Code
int32 status_code = 2;
// Task response status, eg: HTTP Status
string status = 3;
}

142
proto/dfdaemon.proto Normal file
View File

@ -0,0 +1,142 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package dfdaemon;
import "common.proto";
import "errordetails.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/empty.proto";
// InterestedAllPiecesRequest represents interested all pieces request of SyncPiecesRequest.
message InterestedAllPiecesRequest {
}
// InterestedPiecesRequest represents interested pieces request of SyncPiecesRequest.
message InterestedPiecesRequest {
// Interested piece numbers.
repeated uint32 piece_numbers = 1;
}
// StatMetadata represents stat metadata request of SyncPiecesRequest.
message StatMetadataRequest {
}
// SyncPiecesRequest represents request of AnnouncePeer.
message SyncPiecesRequest{
oneof request {
InterestedAllPiecesRequest interested_all_pieces_request = 1;
InterestedPiecesRequest interested_pieces_request = 2;
StatMetadataRequest stat_metadata_request = 3;
}
}
// InterestedPiecesResponse represents interested pieces response of SyncPiecesResponse.
message InterestedPiecesResponse {
// Interested pieces of task.
repeated common.Piece pieces = 1;
}
// StatMetadata represents stat metadata request of SyncPiecesResponse.
message StatMetadataResponse {
// Task metadata.
common.Metadata metadata = 1;
}
// SyncPiecesResponse represents response of SyncPieces.
message SyncPiecesResponse {
oneof response {
InterestedPiecesResponse interested_pieces_response = 1;
StatMetadataResponse stat_metadata_response = 2;
}
oneof errordetails {
errordetails.SyncPiecesFailed sync_pieces_failed = 3;
errordetails.StatMetadataFailed stat_metadata_failed = 4;
}
}
// TriggerTaskRequest represents request of TriggerTask.
message TriggerTaskRequest {
// Task id.
string task_id = 1;
// Task metadata.
common.Metadata metadata = 2;
}
// StatTaskRequest represents request of StatTask.
message StatTaskRequest {
// Task id.
string task_id = 1;
}
// StatTaskResponse represents response of StatTask.
message StatTaskResponse {
common.Task task = 1;
}
// ImportTaskRequest represents request of ImportTask.
message ImportTaskRequest {
// Task metadata.
common.Metadata metadata = 1;
// File path to be imported.
string path = 2;
}
// ExportTaskRequest represents request of ExportTask.
message ExportTaskRequest {
// Task metadata.
common.Metadata metadata = 1;
// File path to be exported.
string path = 2;
// Download timeout.
google.protobuf.Duration timeout = 3;
// Download rate limit in bytes per second.
double download_rate_limit = 4;
// User id.
uint64 uid = 5;
// Group id.
uint64 gid = 6;
}
// DeleteTaskRequest represents request of DeleteTask.
message DeleteTaskRequest {
// Task id.
string task_id = 1;
}
// Dfdaemon RPC Service.
service Dfdaemon{
// SyncPieces syncs pieces from the other peers.
rpc SyncPieces(stream SyncPiecesRequest)returns(stream SyncPiecesResponse);
// TriggerTask triggers task back-to-source download.
rpc TriggerTask(TriggerTaskRequest) returns(google.protobuf.Empty);
// StatTask stats task information.
rpc StatTask(StatTaskRequest) returns(common.Task);
// ImportTask imports task to p2p network.
rpc ImportTask(ImportTaskRequest) returns(google.protobuf.Empty);
// ExportTask exports task from p2p network.
rpc ExportTask(ExportTaskRequest) returns(google.protobuf.Empty);
// DeleteTask deletes task from p2p network.
rpc DeleteTask(DeleteTaskRequest) returns(google.protobuf.Empty);
}

81
proto/errordetails.proto Normal file
View File

@ -0,0 +1,81 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package errordetails;
import "common.proto";
// DownloadPeerBackToSourceFailed is error detail of downloading peer back-to-source.
message DownloadPeerBackToSourceFailed {
// The description of the error.
string description = 1;
}
// DownloadPieceBackToSourceFailed is error detail of downloading piece back-to-source.
message DownloadPieceBackToSourceFailed {
// Temporary recoverable error of source.
bool temporary = 1;
// Source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header
common.ExtendAttribute metadata = 2;
// The number of piece.
uint32 piece_number = 3;
// The description of the error.
string description = 4;
}
// DownloadPieceFailed is error detail of downloading piece.
message DownloadPieceFailed {
// Temporary recoverable error of parent peer.
bool temporary = 1;
// Source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header
common.ExtendAttribute metadata = 2;
// Piece is information of piece.
string parent_id = 3;
// The number of piece.
uint32 piece_number = 4;
// The description of the error.
string description = 5;
}
// SchedulePeerForbidden is error detail of forbidden.
message SchedulePeerForbidden {
// The description of the error.
string description = 1;
}
// SchedulePeerFailed is error detail of scheduling.
message SchedulePeerFailed {
// The description of the error.
string description = 1;
}
// SyncPiecesFailed is error detail of syncing pieces.
message SyncPiecesFailed {
// Temporary recoverable error of parent peer.
bool temporary = 1;
// Parent peer id.
string parent_id = 2;
// The description of the error.
string description = 3;
}
// StatMetadataFailed is error detail of stat metadata.
message StatMetadataFailed {
// The description of the error.
string description = 1;
}

575
proto/manager.proto Normal file
View File

@ -0,0 +1,575 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package manager;
import "common.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
// Request source type.
enum SourceType {
// Scheduler service.
SCHEDULER_SOURCE = 0;
// Peer service.
PEER_SOURCE = 1;
// SeedPeer service.
SEED_PEER_SOURCE = 2;
}
// SecurityGroup represents security group of cluster.
message SecurityGroup {
// Group id.
uint64 id = 1;
// Group name.
string name = 2;
// Group biography.
string bio = 3;
// Group domain.
string domain = 4;
// Group proxy domain.
string proxy_domain = 5;
}
// SeedPeerCluster represents cluster of seed peer.
message SeedPeerCluster {
// Cluster id.
uint64 id = 1;
// Cluster name.
string name = 2;
// Cluster biography.
string bio = 3;
// Cluster configuration.
bytes config = 4;
// Cluster scopes.
bytes scopes = 5;
// Security group to which the seed peer cluster belongs.
SecurityGroup security_group = 6;
}
// SeedPeer represents seed peer for network.
message SeedPeer {
// Seed peer id.
uint64 id = 1;
// Seed peer hostname.
string host_name = 2;
// Seed peer type.
string type = 3;
// Seed peer idc.
string idc = 5;
// Seed peer network topology.
string net_topology = 6;
// Seed peer location.
string location = 7;
// Seed peer ip.
string ip = 8;
// Seed peer grpc port.
int32 port = 9;
// Seed peer download port.
int32 download_port = 10;
// Seed peer state.
string state = 11;
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 12;
// Cluster to which the seed peer belongs.
SeedPeerCluster seed_peer_cluster = 13;
// Schedulers included in seed peer.
repeated Scheduler schedulers = 14;
// Seed peer object storage port.
int32 object_storage_port = 15;
}
// GetSeedPeerRequest represents request of GetSeedPeer.
message GetSeedPeerRequest {
// Request source type.
SourceType source_type = 1;
// Seed peer hostname.
string host_name = 2;
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 3;
// Seed peer ip.
string ip = 4;
}
// UpdateSeedPeerRequest represents request of UpdateSeedPeer.
message UpdateSeedPeerRequest {
// Request source type.
SourceType source_type = 1;
// Seed peer hostname.
string host_name = 2;
// Seed peer type.
string type = 3;
// Seed peer idc.
string idc = 4;
// Seed peer network topology.
string net_topology = 5;
// Seed peer location.
string location = 6;
// Seed peer ip.
string ip = 7;
// Seed peer port.
int32 port = 8;
// Seed peer download port.
int32 download_port = 9;
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 10;
// Seed peer object storage port.
int32 object_storage_port = 11;
}
// SeedPeerCluster represents cluster of scheduler.
message SchedulerCluster {
// Cluster id.
uint64 id = 1;
// Cluster name.
string name = 2;
// Cluster biography.
string bio = 3;
// Cluster config.
bytes config = 4;
// Cluster client config.
bytes client_config = 5;
// Cluster scopes.
bytes scopes = 6;
// Security group to which the scheduler cluster belongs.
SecurityGroup security_group = 7;
}
// SeedPeerCluster represents scheduler for network.
message Scheduler {
// Scheduler id.
uint64 id = 1;
// Scheduler hostname.
string host_name = 2;
// Deprecated: Do not use.
string vips = 3;
// Scheduler idc.
string idc = 4;
// Scheduler location.
string location = 5;
// Deprecated: Use net_topology instead.
bytes net_config = 6;
// Scheduler ip.
string ip = 7;
// Scheduler grpc port.
int32 port = 8;
// Scheduler state.
string state = 9;
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 10;
// Cluster to which the scheduler belongs.
SchedulerCluster scheduler_cluster = 11;
// Seed peers to which the scheduler belongs.
repeated SeedPeer seed_peers = 12;
// Scheduler network topology.
string net_topology = 13;
}
// GetSchedulerRequest represents request of GetScheduler.
message GetSchedulerRequest {
// Request source type.
SourceType source_type = 1;
// Scheduler hostname.
string host_name = 2;
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 3;
// Scheduler ip.
string ip = 4;
}
// UpdateSchedulerRequest represents request of UpdateScheduler.
message UpdateSchedulerRequest {
// Request source type.
SourceType source_type = 1;
// Scheduler hostname.
string host_name = 2;
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 3;
// Deprecated: Do not use.
string vips = 4;
// Scheduler idc.
string idc = 5;
// Scheduler location.
string location = 6;
// Deprecated: Use net_topology instead.
bytes net_config = 7;
// Scheduler ip.
string ip = 8;
// Scheduler port.
int32 port = 9;
// Scheduler network topology.
string net_topology = 10;
}
// ListSchedulersRequest represents request of ListSchedulers.
message ListSchedulersRequest {
// Request source type.
SourceType source_type = 1;
// Source service hostname.
string host_name = 2;
// Source service ip.
string ip = 3;
// Source service host information.
map<string, string> host_info = 4;
// Dfdaemon version.
string version = 5;
// Dfdaemon commit.
string commit = 6;
}
// ListSchedulersResponse represents response of ListSchedulers.
message ListSchedulersResponse {
// Schedulers to which the source service belongs.
repeated Scheduler schedulers = 1;
}
// ObjectStorage represents config of object storage.
message ObjectStorage {
// Object storage name of type.
string name = 1;
// Storage region.
string region = 2;
// Datacenter endpoint.
string endpoint = 3;
// Access key id.
string access_key = 4;
// Access key secret.
string secret_key = 5;
}
// GetObjectStorageRequest represents request of GetObjectStorage.
message GetObjectStorageRequest {
// Request source type.
SourceType source_type = 1;
// Source service hostname.
string host_name = 2;
// Source service ip.
string ip = 3;
}
// Bucket represents config of bucket.
message Bucket {
// Bucket name.
string name = 1;
}
// ListSchedulersRequest represents request of ListBuckets.
message ListBucketsRequest {
// Request source type.
SourceType source_type = 1;
// Source service hostname.
string host_name = 2;
// Source service ip.
string ip = 3;
}
// ListBucketsResponse represents response of ListBuckets.
message ListBucketsResponse {
// Bucket configs.
repeated Bucket buckets = 1;
}
// Model represents information of model.
message Model {
// Model id.
string model_id = 1;
// Model name.
string name = 2;
// Model version id.
string version_id = 3;
// Scheduler id.
uint64 scheduler_id = 4;
// Scheduler hostname.
string host_name = 5;
// Scheduler ip.
string ip = 6;
// Model create time.
google.protobuf.Timestamp created_at = 7;
// Model update time.
google.protobuf.Timestamp updated_at = 8;
}
// ListModelsRequest represents request of ListModels.
message ListModelsRequest {
// Scheduler id.
uint64 scheduler_id = 1;
}
// ListModelsResponse represents response of ListModels.
message ListModelsResponse {
// Model informations.
repeated Model models = 1;
}
// GetModelRequest represents request of GetModel.
message GetModelRequest {
// Scheduler id.
uint64 scheduler_id = 1;
// Model id.
string model_id = 2;
}
// CreateModelRequest represents request of CreateModel.
message CreateModelRequest {
// Model id.
string model_id = 1;
// Model name.
string name = 2;
// Model version id.
string version_id = 3;
// Scheduler id.
uint64 scheduler_id = 4;
// Scheduler hostname.
string host_name = 5;
// Scheduler ip.
string ip = 6;
}
// UpdateModelRequest represents request of UpdateModel.
message UpdateModelRequest {
// Model id.
string model_id = 1;
// Model name.
string name = 2;
// Model version id.
string version_id = 3;
// Scheduler id.
uint64 scheduler_id = 4;
// Scheduler hostname.
string host_name = 5;
// Scheduler ip.
string ip = 6;
}
// DeleteModelRequest represents request of DeleteModel.
message DeleteModelRequest {
// Scheduler id.
uint64 scheduler_id = 1;
// Model id.
string model_id = 2;
}
// ModelVersion represents information of model version.
message ModelVersion {
// Model version id.
string version_id = 1;
// Model version data.
bytes data = 2;
// Model version mae.
double mae = 3;
// Model version mse.
double mse = 4;
// Model version rmse.
double rmse = 5;
// Model version r^2.
double r2 = 6;
// Model create time.
google.protobuf.Timestamp created_at = 7;
// Model update time.
google.protobuf.Timestamp updated_at = 8;
}
// ListModelVersionsRequest represents request of ListModelVersions.
message ListModelVersionsRequest {
// Scheduler id.
uint64 scheduler_id = 1;
// Model id.
string model_id = 2;
}
// ListModelVersionsResponse represents response of ListModelVersions.
message ListModelVersionsResponse {
// Model version informations.
repeated ModelVersion model_versions = 1;
}
// GetModelVersionRequest represents request of GetModelVersion.
message GetModelVersionRequest {
// Scheduler id.
uint64 scheduler_id = 1;
// Model id.
string model_id = 2;
// Model version id.
string version_id = 3;
}
// CreateModelVersionRequest represents request of CreateModelVersion.
message CreateModelVersionRequest {
// Scheduler id.
uint64 scheduler_id = 1;
// Model id.
string model_id = 2;
// Model version data.
bytes data = 3;
// Model version mae.
double mae = 4;
// Model version mse.
double mse = 5;
// Model version rmse.
double rmse = 6;
// Model version r^2.
double r2 = 7;
}
// UpdateModelVersionRequest represents request of UpdateModelVersion.
message UpdateModelVersionRequest {
// Model version id.
string version_id = 1;
// Scheduler id.
uint64 scheduler_id = 2;
// Model id.
string model_id = 3;
// Model version data.
bytes data = 4;
// Model version mae.
double mae = 5;
// Model version mse.
double mse = 6;
// Model version rmse.
double rmse = 7;
// Model version r^2.
double r2 = 8;
}
// DeleteModelVersionRequest represents request of DeleteModelVersion.
message DeleteModelVersionRequest {
// Scheduler id.
uint64 scheduler_id = 1;
// Model id.
string model_id = 2;
// Model version id.
string version_id = 3;
}
// URLPriority represents config of url priority.
message URLPriority {
// URL regex.
string regex = 1;
// URL priority value.
common.Priority value = 2;
}
// ApplicationPriority represents config of application priority.
message ApplicationPriority {
// Priority value.
common.Priority value = 1;
// URL priority.
repeated URLPriority urls = 2;
}
// Application represents config of application.
message Application {
// Application id.
uint64 id = 1;
// Application name.
string name = 2;
// Application url.
string url = 3;
// Application biography.
string bio = 4;
// Application priority.
ApplicationPriority priority = 5;
}
// ListApplicationsRequest represents request of ListApplications.
message ListApplicationsRequest {
// Request source type.
SourceType source_type = 1;
// Source service hostname.
string host_name = 2;
// Source service ip.
string ip = 3;
}
// ListApplicationsResponse represents response of ListApplications.
message ListApplicationsResponse {
// Application configs.
repeated Application applications = 1;
}
// KeepAliveRequest represents request of KeepAlive.
message KeepAliveRequest {
// Request source type.
SourceType source_type = 1;
// Source service hostname.
string host_name = 2;
// ID of the cluster to which the source service belongs.
uint64 cluster_id = 3;
// Source service ip.
string ip = 4;
}
// Manager RPC Service.
service Manager {
// Get SeedPeer and SeedPeer cluster configuration.
rpc GetSeedPeer(GetSeedPeerRequest) returns(SeedPeer);
// Update SeedPeer configuration.
rpc UpdateSeedPeer(UpdateSeedPeerRequest) returns(SeedPeer);
// Get Scheduler and Scheduler cluster configuration.
rpc GetScheduler(GetSchedulerRequest)returns(Scheduler);
// Update scheduler configuration.
rpc UpdateScheduler(UpdateSchedulerRequest) returns(Scheduler);
// List acitve schedulers configuration.
rpc ListSchedulers(ListSchedulersRequest)returns(ListSchedulersResponse);
// Get ObjectStorage configuration.
rpc GetObjectStorage(GetObjectStorageRequest) returns(ObjectStorage);
// List buckets configuration.
rpc ListBuckets(ListBucketsRequest)returns(ListBucketsResponse);
// List models information.
rpc ListModels(ListModelsRequest)returns(ListModelsResponse);
// Get model information.
rpc GetModel(GetModelRequest)returns(Model);
// Create model information.
rpc CreateModel(CreateModelRequest)returns(Model);
// Update model information.
rpc UpdateModel(UpdateModelRequest)returns(Model);
// Delete model information.
rpc DeleteModel(DeleteModelRequest)returns(google.protobuf.Empty);
// List model versions information.
rpc ListModelVersions(ListModelVersionsRequest)returns(ListModelVersionsResponse);
// Get model version information.
rpc GetModelVersion(GetModelVersionRequest)returns(ModelVersion);
// Create model version information.
rpc CreateModelVersion(CreateModelVersionRequest)returns(ModelVersion);
// Update model version information.
rpc UpdateModelVersion(UpdateModelVersionRequest)returns(ModelVersion);
// Delete model version information.
rpc DeleteModelVersion(DeleteModelVersionRequest)returns(google.protobuf.Empty);
// List applications configuration.
rpc ListApplications(ListApplicationsRequest)returns(ListApplicationsResponse);
// KeepAlive with manager.
rpc KeepAlive(stream KeepAliveRequest)returns(google.protobuf.Empty);
}

335
proto/scheduler.proto Normal file
View File

@ -0,0 +1,335 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package scheduler;
import "common.proto";
import "errordetails.proto";
import "google/protobuf/empty.proto";
// RegisterPeerRequest represents peer registered request of AnnouncePeerRequest.
message RegisterPeerRequest {
// Task id.
string task_id = 1;
// Peer id.
string peer_id = 2;
// Task metadata.
common.Metadata metadata = 3;
}
// DownloadPeerStartedRequest represents peer download started request of AnnouncePeerRequest.
message DownloadPeerStartedRequest {
}
// DownloadPeerBackToSourceStartedRequest represents peer download back-to-source started request of AnnouncePeerRequest.
message DownloadPeerBackToSourceStartedRequest {
// Download back-to-source reason.
string reason = 1;
}
// DownloadPeerFinishedRequest represents peer download finished request of AnnouncePeerRequest.
message DownloadPeerFinishedRequest {
// Total content length.
int64 content_length = 1;
// Total piece count.
int64 piece_count = 2;
}
// DownloadPeerBackToSourceFinishedRequest represents peer download back-to-source finished request of AnnouncePeerRequest.
message DownloadPeerBackToSourceFinishedRequest {
// Total content length.
int64 content_length = 1;
// Total piece count.
int64 piece_count = 2;
}
// DownloadPieceFinishedRequest represents piece download finished request of AnnouncePeerRequest.
message DownloadPieceFinishedRequest {
// Piece info.
common.Piece piece = 1;
}
// DownloadPieceBackToSourceFinishedRequest represents piece download back-to-source finished request of AnnouncePeerRequest.
message DownloadPieceBackToSourceFinishedRequest {
// Piece info.
common.Piece piece = 1;
}
// AnnouncePeerRequest represents request of AnnouncePeer.
message AnnouncePeerRequest {
oneof request {
RegisterPeerRequest register_peer_request = 1;
DownloadPeerStartedRequest download_peer_started_request = 2;
DownloadPeerBackToSourceStartedRequest download_peer_back_to_source_started_request = 3;
DownloadPeerFinishedRequest download_peer_finished_request = 4;
DownloadPeerBackToSourceFinishedRequest download_peer_back_to_source_finished_request = 5;
DownloadPieceFinishedRequest download_piece_finished_request = 6;
DownloadPieceBackToSourceFinishedRequest download_piece_back_to_source_finished_request = 7;
}
oneof errordetails {
errordetails.DownloadPeerBackToSourceFailed download_peer_back_to_source_failed = 8;
errordetails.DownloadPieceBackToSourceFailed download_piece_back_to_source_failed = 9;
errordetails.SyncPiecesFailed sync_pieces_failed = 10;
errordetails.DownloadPieceFailed download_piece_failed = 11;
}
}
// TinyTaskResponse represents tiny task response of AnnouncePeerResponse.
message TinyTaskResponse {
bytes data = 1;
}
// SmallTaskResponse represents small task response of AnnouncePeerResponse.
message SmallTaskResponse {
// Piece info.
common.Piece piece = 1;
}
// NormalTaskResponse represents normal task response of AnnouncePeerResponse.
message NormalTaskResponse {
// Candidate parents.
repeated common.Peer candidate_parents = 1;
// Concurrent downloading count from main peer.
int32 parallel_count = 2;
}
// NeedBackToSourceResponse represents need back-to-source response of AnnouncePeerResponse.
message NeedBackToSourceResponse {
// Download back-to-source reason.
string reason = 1;
}
// AnnouncePeerResponse represents response of AnnouncePeer.
message AnnouncePeerResponse {
oneof response {
TinyTaskResponse tiny_task_response = 1;
SmallTaskResponse small_task_response = 2;
NormalTaskResponse normal_task_response = 3;
NeedBackToSourceResponse need_back_to_source_response = 4;
}
oneof errordetails {
errordetails.SchedulePeerForbidden schedule_peer_forbidden = 5;
errordetails.SchedulePeerFailed schedule_peer_failed = 6;
}
}
// StatPeerRequest represents request of StatPeer.
message StatPeerRequest {
// Task id.
string task_id = 1;
// Peer id.
string peer_id = 2;
}
// TODO exchange peer request definition.
// ExchangePeerRequest represents request of ExchangePeer.
message ExchangePeerRequest {
// Task id.
string task_id = 1;
// Peer id.
string peer_id = 2;
}
// TODO exchange peer response definition.
// ExchangePeerResponse represents response of ExchangePeer.
message ExchangePeerResponse {
}
// LeavePeerRequest represents request of LeavePeer.
message LeavePeerRequest {
// Peer id.
string id = 1;
}
// StatTaskRequest represents request of StatTask.
message StatTaskRequest {
// Task id.
string id = 1;
}
// AnnounceHostRequest represents request of AnnounceHost.
message AnnounceHostRequest {
// Host id.
string id = 1;
// Host type.
uint32 type = 2;
// Hostname.
string hostname = 3;
// Host ip.
string ip = 4;
// Port of grpc service.
int32 port = 5;
// Port of download server.
int32 download_port = 6;
// Host OS.
string os = 7;
// Host platform.
string platform = 8;
// Host platform family.
string platform_family = 9;
// Host platform version.
string platform_version = 10;
// Host kernel version.
string kernel_version = 11;
// CPU Stat.
CPU cpu = 12;
// Memory Stat.
Memory memory = 13;
// Network Stat.
Network network = 14;
// Disk Stat.
Disk disk = 15;
// Build information.
Build build = 16;
}
// CPU Stat.
message CPU {
// Number of logical cores in the system.
uint32 logical_count = 1;
// Number of physical cores in the system
uint32 physical_count = 2;
// Percent calculates the percentage of cpu used.
double percent = 3;
// Calculates the percentage of cpu used by process.
double process_percent = 4;
// CPUTimes contains the amounts of time the CPU has spent performing different kinds of work.
CPUTimes times = 5;
}
// CPUTimes contains the amounts of time the CPU has spent performing different
// kinds of work. Time units are in seconds.
message CPUTimes {
// CPU time of user.
double user = 1;
// CPU time of system.
double system = 2;
// CPU time of idle.
double idle = 3;
// CPU time of nice.
double nice = 4;
// CPU time of iowait.
double iowait = 5;
// CPU time of irq.
double irq = 6;
// CPU time of softirq.
double softirq = 7;
// CPU time of steal.
double steal = 8;
// CPU time of guest.
double guest = 9;
// CPU time of guest nice.
double guest_nice = 10;
}
// Memory Stat.
message Memory {
// Total amount of RAM on this system.
uint64 total = 1;
// RAM available for programs to allocate.
uint64 available = 2;
// RAM used by programs.
uint64 used = 3;
// Percentage of RAM used by programs.
double used_percent = 4;
// Calculates the percentage of memory used by process.
double process_used_percent = 5;
// This is the kernel's notion of free memory.
uint64 free = 6;
}
// Network Stat.
message Network {
// Return count of tcp connections opened and status is ESTABLISHED.
uint32 tcp_connection_count = 1;
// Return count of upload tcp connections opened and status is ESTABLISHED.
uint32 upload_tcp_connection_count = 2;
// Security domain for network.
string security_domain = 3;
// Location path(area|country|province|city|...).
string location = 4;
// IDC where the peer host is located
string idc = 5;
// Network topology(switch|router|...).
string net_topology = 6;
}
// Disk Stat.
message Disk {
// Total amount of disk on the data path of dragonfly.
uint64 total = 1;
// Free amount of disk on the data path of dragonfly.
uint64 free = 2;
// Used amount of disk on the data path of dragonfly.
uint64 used = 3;
// Used percent of disk on the data path of dragonfly directory.
double used_percent = 4;
// Total amount of indoes on the data path of dragonfly directory.
uint64 inodes_total = 5;
// Used amount of indoes on the data path of dragonfly directory.
uint64 inodes_used = 6;
// Free amount of indoes on the data path of dragonfly directory.
uint64 inodes_free = 7;
// Used percent of indoes on the data path of dragonfly directory.
double inodes_used_percent = 8;
}
// Build information.
message Build {
// Git version.
string git_version = 1;
// Git commit.
string git_commit = 2;
// Golang version.
string go_version = 3;
// Build platform.
string platform = 4;
}
// LeaveHostRequest represents request of LeaveHost.
message LeaveHostRequest{
// Host id.
string id = 1;
}
// Scheduler RPC Service.
service Scheduler{
// AnnouncePeer announces peer to scheduler.
rpc AnnouncePeer(stream AnnouncePeerRequest) returns(stream AnnouncePeerResponse);
// Checks information of peer.
rpc StatPeer(StatPeerRequest)returns(common.Peer);
// LeavePeer releases peer in scheduler.
rpc LeavePeer(LeavePeerRequest)returns(google.protobuf.Empty);
// TODO exchange peer api definition.
// ExchangePeer exchanges peer information.
rpc ExchangePeer(ExchangePeerRequest)returns(ExchangePeerResponse);
// Checks information of task.
rpc StatTask(StatTaskRequest)returns(common.Task);
// AnnounceHost announces host to scheduler.
rpc AnnounceHost(AnnounceHostRequest)returns(google.protobuf.Empty);
// LeaveHost releases host in scheduler.
rpc LeaveHost(LeaveHostRequest)returns(google.protobuf.Empty);
}

51
proto/security.proto Normal file
View File

@ -0,0 +1,51 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package security;
import "google/protobuf/duration.proto";
// Refer: https://github.com/istio/api/blob/master/security/v1alpha1/ca.proto
// Istio defines similar api for signing certificate, but it's not applicable in Dragonfly.
// Certificate request type.
// Dragonfly supports peers authentication with Mutual TLS(mTLS)
// For mTLS, all peers need to request TLS certificates for communicating
// The server side may overwrite ant requested certificate filed based on its policies.
message CertificateRequest {
// ASN.1 DER form certificate request.
// The public key in the CSR is used to generate the certificate,
// and other fields in the generated certificate may be overwritten by the CA.
bytes csr = 1;
// Optional: requested certificate validity period.
google.protobuf.Duration validity_period = 2;
}
// Certificate response type.
message CertificateResponse {
// ASN.1 DER form certificate chain.
repeated bytes certificate_chain = 1;
}
// Service for managing certificates issued by the CA.
service CertificateService {
// Using provided CSR, returns a signed certificate.
rpc IssueCertificate(CertificateRequest)
returns (CertificateResponse) {
}
}

362
src/common.rs Normal file
View File

@ -0,0 +1,362 @@
/// Peer metadata.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Peer {
/// Peer id.
#[prost(string, tag = "1")]
pub id: ::prost::alloc::string::String,
/// Pieces of peer.
#[prost(message, repeated, tag = "2")]
pub pieces: ::prost::alloc::vec::Vec<Piece>,
/// Task info.
#[prost(message, optional, tag = "3")]
pub task: ::core::option::Option<Task>,
/// Host info.
#[prost(message, optional, tag = "4")]
pub host: ::core::option::Option<Host>,
/// Peer state.
#[prost(string, tag = "5")]
pub state: ::prost::alloc::string::String,
/// Peer create time.
#[prost(message, optional, tag = "6")]
pub created_at: ::core::option::Option<::prost_types::Timestamp>,
/// Peer update time.
#[prost(message, optional, tag = "7")]
pub updated_at: ::core::option::Option<::prost_types::Timestamp>,
}
/// Task metadata.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Task {
/// Task id.
#[prost(string, tag = "1")]
pub id: ::prost::alloc::string::String,
/// Host type.
#[prost(string, tag = "2")]
pub r#type: ::prost::alloc::string::String,
/// Task size scope.
#[prost(enumeration = "SizeScope", tag = "3")]
pub size_scope: i32,
/// Pieces of task.
#[prost(message, repeated, tag = "4")]
pub pieces: ::prost::alloc::vec::Vec<Piece>,
/// Task state.
#[prost(string, tag = "5")]
pub state: ::prost::alloc::string::String,
/// Task metadata.
#[prost(message, optional, tag = "6")]
pub metadata: ::core::option::Option<Metadata>,
/// Task content length.
#[prost(int64, tag = "7")]
pub content_length: i64,
/// Task peer count.
#[prost(int32, tag = "8")]
pub peer_count: i32,
/// Task contains available peer.
#[prost(bool, tag = "9")]
pub has_available_peer: bool,
/// Task create time.
#[prost(message, optional, tag = "10")]
pub created_at: ::core::option::Option<::prost_types::Timestamp>,
/// Task update time.
#[prost(message, optional, tag = "11")]
pub updated_at: ::core::option::Option<::prost_types::Timestamp>,
}
/// Host metadata.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Host {
/// Host id.
#[prost(string, tag = "1")]
pub id: ::prost::alloc::string::String,
/// Host ipv4.
#[prost(string, tag = "2")]
pub ipv4: ::prost::alloc::string::String,
/// Host ipv6.
#[prost(string, tag = "3")]
pub ipv6: ::prost::alloc::string::String,
/// Peer hostname.
#[prost(string, tag = "4")]
pub hostname: ::prost::alloc::string::String,
/// Port of grpc service.
#[prost(int32, tag = "5")]
pub port: i32,
/// Port of download server.
#[prost(int32, tag = "6")]
pub download_port: i32,
/// Security domain for network.
#[prost(string, tag = "7")]
pub security_domain: ::prost::alloc::string::String,
/// Host location(area, country, province, city, etc.).
#[prost(string, repeated, tag = "8")]
pub location: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// IDC where the peer host is located.
#[prost(string, tag = "9")]
pub idc: ::prost::alloc::string::String,
/// Network topology(switch, router, etc.).
#[prost(string, repeated, tag = "10")]
pub net_topology: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
/// Range represents download range.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Range {
/// Begin of range.
#[prost(uint64, tag = "1")]
pub begin: u64,
/// End of range.
#[prost(uint64, tag = "2")]
pub end: u64,
}
/// Metadata represents metadata of task.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Metadata {
/// Download url.
#[prost(string, tag = "1")]
pub url: ::prost::alloc::string::String,
/// Digest of the pieces digest, for example md5:xxx or sha256:yyy.
#[prost(string, tag = "2")]
pub digest: ::prost::alloc::string::String,
/// Range is url range of request.
#[prost(message, optional, tag = "3")]
pub range: ::core::option::Option<Range>,
/// Task type.
#[prost(enumeration = "TaskType", tag = "4")]
pub r#type: i32,
/// URL tag identifies different task for same url.
#[prost(string, tag = "5")]
pub tag: ::prost::alloc::string::String,
/// Application of task.
#[prost(string, tag = "6")]
pub application: ::prost::alloc::string::String,
/// Peer priority.
#[prost(enumeration = "Priority", tag = "7")]
pub priority: i32,
/// Filter url used to generate task id.
#[prost(string, repeated, tag = "8")]
pub filters: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// Task request headers.
#[prost(map = "string, string", tag = "9")]
pub header: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost::alloc::string::String,
>,
/// Task piece size.
#[prost(int32, tag = "10")]
pub piece_size: i32,
}
/// Piece represents information of piece.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Piece {
/// Piece number.
#[prost(uint32, tag = "1")]
pub number: u32,
/// Parent peer id.
#[prost(string, tag = "2")]
pub parent_id: ::prost::alloc::string::String,
/// Piece offset.
#[prost(uint64, tag = "3")]
pub offset: u64,
/// Piece size.
#[prost(uint64, tag = "4")]
pub size: u64,
/// Digest of the piece data, for example md5:xxx or sha256:yyy.
#[prost(string, tag = "5")]
pub digest: ::prost::alloc::string::String,
/// Traffic type.
#[prost(enumeration = "TrafficType", tag = "6")]
pub traffic_type: i32,
/// Downloading piece costs time.
#[prost(message, optional, tag = "7")]
pub cost: ::core::option::Option<::prost_types::Duration>,
/// Piece create time.
#[prost(message, optional, tag = "8")]
pub created_at: ::core::option::Option<::prost_types::Timestamp>,
}
/// ExtendAttribute represents extend of attribution.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ExtendAttribute {
/// Task response header, eg: HTTP Response Header
#[prost(map = "string, string", tag = "1")]
pub header: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost::alloc::string::String,
>,
/// Task response code, eg: HTTP Status Code
#[prost(int32, tag = "2")]
pub status_code: i32,
/// Task response status, eg: HTTP Status
#[prost(string, tag = "3")]
pub status: ::prost::alloc::string::String,
}
/// SizeScope represents size scope of task.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum SizeScope {
/// size > one piece size.
Normal = 0,
/// 128 byte < size <= one piece size and be plain type.
Small = 1,
/// size <= 128 byte and be plain type.
Tiny = 2,
/// size == 0 byte and be plain type.
Empty = 3,
}
impl SizeScope {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
SizeScope::Normal => "NORMAL",
SizeScope::Small => "SMALL",
SizeScope::Tiny => "TINY",
SizeScope::Empty => "EMPTY",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"NORMAL" => Some(Self::Normal),
"SMALL" => Some(Self::Small),
"TINY" => Some(Self::Tiny),
"EMPTY" => Some(Self::Empty),
_ => None,
}
}
}
/// TaskType represents type of task.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum TaskType {
/// DFDAEMON is dfdeamon type of task,
/// dfdeamon task is a normal p2p task.
Dfdaemon = 0,
/// DFCACHE is dfcache type of task,
/// dfcache task is a cache task, and the task url is fake url.
/// It can only be used for caching and cannot be downloaded back to source.
Dfcache = 1,
/// DFSTORE is dfstore type of task,
/// dfstore task is a persistent task in backend.
Dfstore = 2,
}
impl TaskType {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
TaskType::Dfdaemon => "DFDAEMON",
TaskType::Dfcache => "DFCACHE",
TaskType::Dfstore => "DFSTORE",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"DFDAEMON" => Some(Self::Dfdaemon),
"DFCACHE" => Some(Self::Dfcache),
"DFSTORE" => Some(Self::Dfstore),
_ => None,
}
}
}
/// TrafficType represents type of traffic.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum TrafficType {
/// BACK_TO_SOURCE is to download traffic from the source.
BackToSource = 0,
/// REMOTE_PEER is to download traffic from the remote peer.
RemotePeer = 1,
/// LOCAL_PEER is to download traffic from the local peer.
LocalPeer = 2,
}
impl TrafficType {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
TrafficType::BackToSource => "BACK_TO_SOURCE",
TrafficType::RemotePeer => "REMOTE_PEER",
TrafficType::LocalPeer => "LOCAL_PEER",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"BACK_TO_SOURCE" => Some(Self::BackToSource),
"REMOTE_PEER" => Some(Self::RemotePeer),
"LOCAL_PEER" => Some(Self::LocalPeer),
_ => None,
}
}
}
/// Priority represents priority of application.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Priority {
/// LEVEL0 has no special meaning for scheduler.
Level0 = 0,
/// LEVEL1 represents the download task is forbidden,
/// and an error code is returned during the registration.
Level1 = 1,
/// LEVEL2 represents when the task is downloaded for the first time,
/// allow peers to download from the other peers,
/// but not back-to-source. When the task is not downloaded for
/// the first time, it is scheduled normally.
Level2 = 2,
/// LEVEL3 represents when the task is downloaded for the first time,
/// the normal peer is first to download back-to-source.
/// When the task is not downloaded for the first time, it is scheduled normally.
Level3 = 3,
/// LEVEL4 represents when the task is downloaded for the first time,
/// the weak peer is first triggered to back-to-source.
/// When the task is not downloaded for the first time, it is scheduled normally.
Level4 = 4,
/// LEVEL5 represents when the task is downloaded for the first time,
/// the strong peer is first triggered to back-to-source.
/// When the task is not downloaded for the first time, it is scheduled normally.
Level5 = 5,
/// LEVEL6 represents when the task is downloaded for the first time,
/// the super peer is first triggered to back-to-source.
/// When the task is not downloaded for the first time, it is scheduled normally.
Level6 = 6,
}
impl Priority {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Priority::Level0 => "LEVEL0",
Priority::Level1 => "LEVEL1",
Priority::Level2 => "LEVEL2",
Priority::Level3 => "LEVEL3",
Priority::Level4 => "LEVEL4",
Priority::Level5 => "LEVEL5",
Priority::Level6 => "LEVEL6",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"LEVEL0" => Some(Self::Level0),
"LEVEL1" => Some(Self::Level1),
"LEVEL2" => Some(Self::Level2),
"LEVEL3" => Some(Self::Level3),
"LEVEL4" => Some(Self::Level4),
"LEVEL5" => Some(Self::Level5),
"LEVEL6" => Some(Self::Level6),
_ => None,
}
}
}

717
src/dfdaemon.rs Normal file
View File

@ -0,0 +1,717 @@
/// InterestedAllPiecesRequest represents interested all pieces request of SyncPiecesRequest.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InterestedAllPiecesRequest {}
/// InterestedPiecesRequest represents interested pieces request of SyncPiecesRequest.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InterestedPiecesRequest {
/// Interested piece numbers.
#[prost(uint32, repeated, tag = "1")]
pub piece_numbers: ::prost::alloc::vec::Vec<u32>,
}
/// StatMetadata represents stat metadata request of SyncPiecesRequest.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StatMetadataRequest {}
/// SyncPiecesRequest represents request of AnnouncePeer.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SyncPiecesRequest {
#[prost(oneof = "sync_pieces_request::Request", tags = "1, 2, 3")]
pub request: ::core::option::Option<sync_pieces_request::Request>,
}
/// Nested message and enum types in `SyncPiecesRequest`.
pub mod sync_pieces_request {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Request {
#[prost(message, tag = "1")]
InterestedAllPiecesRequest(super::InterestedAllPiecesRequest),
#[prost(message, tag = "2")]
InterestedPiecesRequest(super::InterestedPiecesRequest),
#[prost(message, tag = "3")]
StatMetadataRequest(super::StatMetadataRequest),
}
}
/// InterestedPiecesResponse represents interested pieces response of SyncPiecesResponse.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InterestedPiecesResponse {
/// Interested pieces of task.
#[prost(message, repeated, tag = "1")]
pub pieces: ::prost::alloc::vec::Vec<super::common::Piece>,
}
/// StatMetadata represents stat metadata request of SyncPiecesResponse.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StatMetadataResponse {
/// Task metadata.
#[prost(message, optional, tag = "1")]
pub metadata: ::core::option::Option<super::common::Metadata>,
}
/// SyncPiecesResponse represents response of SyncPieces.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SyncPiecesResponse {
#[prost(oneof = "sync_pieces_response::Response", tags = "1, 2")]
pub response: ::core::option::Option<sync_pieces_response::Response>,
#[prost(oneof = "sync_pieces_response::Errordetails", tags = "3, 4")]
pub errordetails: ::core::option::Option<sync_pieces_response::Errordetails>,
}
/// Nested message and enum types in `SyncPiecesResponse`.
pub mod sync_pieces_response {
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Response {
#[prost(message, tag = "1")]
InterestedPiecesResponse(super::InterestedPiecesResponse),
#[prost(message, tag = "2")]
StatMetadataResponse(super::StatMetadataResponse),
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Errordetails {
#[prost(message, tag = "3")]
SyncPiecesFailed(super::super::errordetails::SyncPiecesFailed),
#[prost(message, tag = "4")]
StatMetadataFailed(super::super::errordetails::StatMetadataFailed),
}
}
/// TriggerTaskRequest represents request of TriggerTask.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TriggerTaskRequest {
/// Task id.
#[prost(string, tag = "1")]
pub task_id: ::prost::alloc::string::String,
/// Task metadata.
#[prost(message, optional, tag = "2")]
pub metadata: ::core::option::Option<super::common::Metadata>,
}
/// StatTaskRequest represents request of StatTask.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StatTaskRequest {
/// Task id.
#[prost(string, tag = "1")]
pub task_id: ::prost::alloc::string::String,
}
/// StatTaskResponse represents response of StatTask.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StatTaskResponse {
#[prost(message, optional, tag = "1")]
pub task: ::core::option::Option<super::common::Task>,
}
/// ImportTaskRequest represents request of ImportTask.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ImportTaskRequest {
/// Task metadata.
#[prost(message, optional, tag = "1")]
pub metadata: ::core::option::Option<super::common::Metadata>,
/// File path to be imported.
#[prost(string, tag = "2")]
pub path: ::prost::alloc::string::String,
}
/// ExportTaskRequest represents request of ExportTask.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ExportTaskRequest {
/// Task metadata.
#[prost(message, optional, tag = "1")]
pub metadata: ::core::option::Option<super::common::Metadata>,
/// File path to be exported.
#[prost(string, tag = "2")]
pub path: ::prost::alloc::string::String,
/// Download timeout.
#[prost(message, optional, tag = "3")]
pub timeout: ::core::option::Option<::prost_types::Duration>,
/// Download rate limit in bytes per second.
#[prost(double, tag = "4")]
pub download_rate_limit: f64,
/// User id.
#[prost(uint64, tag = "5")]
pub uid: u64,
/// Group id.
#[prost(uint64, tag = "6")]
pub gid: u64,
}
/// DeleteTaskRequest represents request of DeleteTask.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteTaskRequest {
/// Task id.
#[prost(string, tag = "1")]
pub task_id: ::prost::alloc::string::String,
}
/// Generated client implementations.
pub mod dfdaemon_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
/// Dfdaemon RPC Service.
#[derive(Debug, Clone)]
pub struct DfdaemonClient<T> {
inner: tonic::client::Grpc<T>,
}
impl DfdaemonClient<tonic::transport::Channel> {
/// Attempt to create a new client by connecting to a given endpoint.
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: std::convert::TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> DfdaemonClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::Error: Into<StdError>,
T::ResponseBody: Body<Data = Bytes> + Send + 'static,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_origin(inner: T, origin: Uri) -> Self {
let inner = tonic::client::Grpc::with_origin(inner, origin);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> DfdaemonClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T::ResponseBody: Default,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
>>::Error: Into<StdError> + Send + Sync,
{
DfdaemonClient::new(InterceptedService::new(inner, interceptor))
}
/// Compress requests with the given encoding.
///
/// This requires the server to support it otherwise it might respond with an
/// error.
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.send_compressed(encoding);
self
}
/// Enable decompressing responses.
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.accept_compressed(encoding);
self
}
/// SyncPieces syncs pieces from the other peers.
pub async fn sync_pieces(
&mut self,
request: impl tonic::IntoStreamingRequest<Message = super::SyncPiecesRequest>,
) -> Result<
tonic::Response<tonic::codec::Streaming<super::SyncPiecesResponse>>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/dfdaemon.Dfdaemon/SyncPieces",
);
self.inner.streaming(request.into_streaming_request(), path, codec).await
}
/// TriggerTask triggers task back-to-source download.
pub async fn trigger_task(
&mut self,
request: impl tonic::IntoRequest<super::TriggerTaskRequest>,
) -> Result<tonic::Response<()>, tonic::Status> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/dfdaemon.Dfdaemon/TriggerTask",
);
self.inner.unary(request.into_request(), path, codec).await
}
/// StatTask stats task information.
pub async fn stat_task(
&mut self,
request: impl tonic::IntoRequest<super::StatTaskRequest>,
) -> Result<tonic::Response<super::super::common::Task>, tonic::Status> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/dfdaemon.Dfdaemon/StatTask",
);
self.inner.unary(request.into_request(), path, codec).await
}
/// ImportTask imports task to p2p network.
pub async fn import_task(
&mut self,
request: impl tonic::IntoRequest<super::ImportTaskRequest>,
) -> Result<tonic::Response<()>, tonic::Status> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/dfdaemon.Dfdaemon/ImportTask",
);
self.inner.unary(request.into_request(), path, codec).await
}
/// ExportTask exports task from p2p network.
pub async fn export_task(
&mut self,
request: impl tonic::IntoRequest<super::ExportTaskRequest>,
) -> Result<tonic::Response<()>, tonic::Status> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/dfdaemon.Dfdaemon/ExportTask",
);
self.inner.unary(request.into_request(), path, codec).await
}
/// DeleteTask deletes task from p2p network.
pub async fn delete_task(
&mut self,
request: impl tonic::IntoRequest<super::DeleteTaskRequest>,
) -> Result<tonic::Response<()>, tonic::Status> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/dfdaemon.Dfdaemon/DeleteTask",
);
self.inner.unary(request.into_request(), path, codec).await
}
}
}
/// Generated server implementations.
pub mod dfdaemon_server {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
/// Generated trait containing gRPC methods that should be implemented for use with DfdaemonServer.
#[async_trait]
pub trait Dfdaemon: Send + Sync + 'static {
/// Server streaming response type for the SyncPieces method.
type SyncPiecesStream: futures_core::Stream<
Item = Result<super::SyncPiecesResponse, tonic::Status>,
>
+ Send
+ 'static;
/// SyncPieces syncs pieces from the other peers.
async fn sync_pieces(
&self,
request: tonic::Request<tonic::Streaming<super::SyncPiecesRequest>>,
) -> Result<tonic::Response<Self::SyncPiecesStream>, tonic::Status>;
/// TriggerTask triggers task back-to-source download.
async fn trigger_task(
&self,
request: tonic::Request<super::TriggerTaskRequest>,
) -> Result<tonic::Response<()>, tonic::Status>;
/// StatTask stats task information.
async fn stat_task(
&self,
request: tonic::Request<super::StatTaskRequest>,
) -> Result<tonic::Response<super::super::common::Task>, tonic::Status>;
/// ImportTask imports task to p2p network.
async fn import_task(
&self,
request: tonic::Request<super::ImportTaskRequest>,
) -> Result<tonic::Response<()>, tonic::Status>;
/// ExportTask exports task from p2p network.
async fn export_task(
&self,
request: tonic::Request<super::ExportTaskRequest>,
) -> Result<tonic::Response<()>, tonic::Status>;
/// DeleteTask deletes task from p2p network.
async fn delete_task(
&self,
request: tonic::Request<super::DeleteTaskRequest>,
) -> Result<tonic::Response<()>, tonic::Status>;
}
/// Dfdaemon RPC Service.
#[derive(Debug)]
pub struct DfdaemonServer<T: Dfdaemon> {
inner: _Inner<T>,
accept_compression_encodings: EnabledCompressionEncodings,
send_compression_encodings: EnabledCompressionEncodings,
}
struct _Inner<T>(Arc<T>);
impl<T: Dfdaemon> DfdaemonServer<T> {
pub fn new(inner: T) -> Self {
Self::from_arc(Arc::new(inner))
}
pub fn from_arc(inner: Arc<T>) -> Self {
let inner = _Inner(inner);
Self {
inner,
accept_compression_encodings: Default::default(),
send_compression_encodings: Default::default(),
}
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> InterceptedService<Self, F>
where
F: tonic::service::Interceptor,
{
InterceptedService::new(Self::new(inner), interceptor)
}
/// Enable decompressing requests with the given encoding.
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.accept_compression_encodings.enable(encoding);
self
}
/// Compress responses with the given encoding, if the client supports it.
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.send_compression_encodings.enable(encoding);
self
}
}
impl<T, B> tonic::codegen::Service<http::Request<B>> for DfdaemonServer<T>
where
T: Dfdaemon,
B: Body + Send + 'static,
B::Error: Into<StdError> + Send + 'static,
{
type Response = http::Response<tonic::body::BoxBody>;
type Error = std::convert::Infallible;
type Future = BoxFuture<Self::Response, Self::Error>;
fn poll_ready(
&mut self,
_cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: http::Request<B>) -> Self::Future {
let inner = self.inner.clone();
match req.uri().path() {
"/dfdaemon.Dfdaemon/SyncPieces" => {
#[allow(non_camel_case_types)]
struct SyncPiecesSvc<T: Dfdaemon>(pub Arc<T>);
impl<
T: Dfdaemon,
> tonic::server::StreamingService<super::SyncPiecesRequest>
for SyncPiecesSvc<T> {
type Response = super::SyncPiecesResponse;
type ResponseStream = T::SyncPiecesStream;
type Future = BoxFuture<
tonic::Response<Self::ResponseStream>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<
tonic::Streaming<super::SyncPiecesRequest>,
>,
) -> Self::Future {
let inner = self.0.clone();
let fut = async move { (*inner).sync_pieces(request).await };
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = SyncPiecesSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
);
let res = grpc.streaming(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/dfdaemon.Dfdaemon/TriggerTask" => {
#[allow(non_camel_case_types)]
struct TriggerTaskSvc<T: Dfdaemon>(pub Arc<T>);
impl<
T: Dfdaemon,
> tonic::server::UnaryService<super::TriggerTaskRequest>
for TriggerTaskSvc<T> {
type Response = ();
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::TriggerTaskRequest>,
) -> Self::Future {
let inner = self.0.clone();
let fut = async move {
(*inner).trigger_task(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = TriggerTaskSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/dfdaemon.Dfdaemon/StatTask" => {
#[allow(non_camel_case_types)]
struct StatTaskSvc<T: Dfdaemon>(pub Arc<T>);
impl<T: Dfdaemon> tonic::server::UnaryService<super::StatTaskRequest>
for StatTaskSvc<T> {
type Response = super::super::common::Task;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::StatTaskRequest>,
) -> Self::Future {
let inner = self.0.clone();
let fut = async move { (*inner).stat_task(request).await };
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = StatTaskSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/dfdaemon.Dfdaemon/ImportTask" => {
#[allow(non_camel_case_types)]
struct ImportTaskSvc<T: Dfdaemon>(pub Arc<T>);
impl<
T: Dfdaemon,
> tonic::server::UnaryService<super::ImportTaskRequest>
for ImportTaskSvc<T> {
type Response = ();
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::ImportTaskRequest>,
) -> Self::Future {
let inner = self.0.clone();
let fut = async move { (*inner).import_task(request).await };
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = ImportTaskSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/dfdaemon.Dfdaemon/ExportTask" => {
#[allow(non_camel_case_types)]
struct ExportTaskSvc<T: Dfdaemon>(pub Arc<T>);
impl<
T: Dfdaemon,
> tonic::server::UnaryService<super::ExportTaskRequest>
for ExportTaskSvc<T> {
type Response = ();
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::ExportTaskRequest>,
) -> Self::Future {
let inner = self.0.clone();
let fut = async move { (*inner).export_task(request).await };
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = ExportTaskSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/dfdaemon.Dfdaemon/DeleteTask" => {
#[allow(non_camel_case_types)]
struct DeleteTaskSvc<T: Dfdaemon>(pub Arc<T>);
impl<
T: Dfdaemon,
> tonic::server::UnaryService<super::DeleteTaskRequest>
for DeleteTaskSvc<T> {
type Response = ();
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::DeleteTaskRequest>,
) -> Self::Future {
let inner = self.0.clone();
let fut = async move { (*inner).delete_task(request).await };
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = DeleteTaskSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
_ => {
Box::pin(async move {
Ok(
http::Response::builder()
.status(200)
.header("grpc-status", "12")
.header("content-type", "application/grpc")
.body(empty_body())
.unwrap(),
)
})
}
}
}
}
impl<T: Dfdaemon> Clone for DfdaemonServer<T> {
fn clone(&self) -> Self {
let inner = self.inner.clone();
Self {
inner,
accept_compression_encodings: self.accept_compression_encodings,
send_compression_encodings: self.send_compression_encodings,
}
}
}
impl<T: Dfdaemon> Clone for _Inner<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self.0)
}
}
impl<T: Dfdaemon> tonic::server::NamedService for DfdaemonServer<T> {
const NAME: &'static str = "dfdaemon.Dfdaemon";
}
}

83
src/errordetails.rs Normal file
View File

@ -0,0 +1,83 @@
/// DownloadPeerBackToSourceFailed is error detail of downloading peer back-to-source.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DownloadPeerBackToSourceFailed {
/// The description of the error.
#[prost(string, tag = "1")]
pub description: ::prost::alloc::string::String,
}
/// DownloadPieceBackToSourceFailed is error detail of downloading piece back-to-source.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DownloadPieceBackToSourceFailed {
/// Temporary recoverable error of source.
#[prost(bool, tag = "1")]
pub temporary: bool,
/// Source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header
#[prost(message, optional, tag = "2")]
pub metadata: ::core::option::Option<super::common::ExtendAttribute>,
/// The number of piece.
#[prost(uint32, tag = "3")]
pub piece_number: u32,
/// The description of the error.
#[prost(string, tag = "4")]
pub description: ::prost::alloc::string::String,
}
/// DownloadPieceFailed is error detail of downloading piece.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DownloadPieceFailed {
/// Temporary recoverable error of parent peer.
#[prost(bool, tag = "1")]
pub temporary: bool,
/// Source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header
#[prost(message, optional, tag = "2")]
pub metadata: ::core::option::Option<super::common::ExtendAttribute>,
/// Piece is information of piece.
#[prost(string, tag = "3")]
pub parent_id: ::prost::alloc::string::String,
/// The number of piece.
#[prost(uint32, tag = "4")]
pub piece_number: u32,
/// The description of the error.
#[prost(string, tag = "5")]
pub description: ::prost::alloc::string::String,
}
/// SchedulePeerForbidden is error detail of forbidden.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SchedulePeerForbidden {
/// The description of the error.
#[prost(string, tag = "1")]
pub description: ::prost::alloc::string::String,
}
/// SchedulePeerFailed is error detail of scheduling.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SchedulePeerFailed {
/// The description of the error.
#[prost(string, tag = "1")]
pub description: ::prost::alloc::string::String,
}
/// SyncPiecesFailed is error detail of syncing pieces.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SyncPiecesFailed {
/// Temporary recoverable error of parent peer.
#[prost(bool, tag = "1")]
pub temporary: bool,
/// Parent peer id.
#[prost(string, tag = "2")]
pub parent_id: ::prost::alloc::string::String,
/// The description of the error.
#[prost(string, tag = "3")]
pub description: ::prost::alloc::string::String,
}
/// StatMetadataFailed is error detail of stat metadata.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StatMetadataFailed {
/// The description of the error.
#[prost(string, tag = "1")]
pub description: ::prost::alloc::string::String,
}

6
src/lib.rs Normal file
View File

@ -0,0 +1,6 @@
pub mod common;
pub mod dfdaemon;
pub mod errordetails;
pub mod manager;
pub mod scheduler;
pub mod security;

2131
src/manager.rs Normal file

File diff suppressed because it is too large Load Diff

1077
src/scheduler.rs Normal file

File diff suppressed because it is too large Load Diff

269
src/security.rs Normal file
View File

@ -0,0 +1,269 @@
/// Certificate request type.
/// Dragonfly supports peers authentication with Mutual TLS(mTLS)
/// For mTLS, all peers need to request TLS certificates for communicating
/// The server side may overwrite ant requested certificate filed based on its policies.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CertificateRequest {
/// ASN.1 DER form certificate request.
/// The public key in the CSR is used to generate the certificate,
/// and other fields in the generated certificate may be overwritten by the CA.
#[prost(bytes = "vec", tag = "1")]
pub csr: ::prost::alloc::vec::Vec<u8>,
/// Optional: requested certificate validity period.
#[prost(message, optional, tag = "2")]
pub validity_period: ::core::option::Option<::prost_types::Duration>,
}
/// Certificate response type.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CertificateResponse {
/// ASN.1 DER form certificate chain.
#[prost(bytes = "vec", repeated, tag = "1")]
pub certificate_chain: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
}
/// Generated client implementations.
pub mod certificate_service_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
use tonic::codegen::http::Uri;
/// Service for managing certificates issued by the CA.
#[derive(Debug, Clone)]
pub struct CertificateServiceClient<T> {
inner: tonic::client::Grpc<T>,
}
impl CertificateServiceClient<tonic::transport::Channel> {
/// Attempt to create a new client by connecting to a given endpoint.
pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
where
D: std::convert::TryInto<tonic::transport::Endpoint>,
D::Error: Into<StdError>,
{
let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
Ok(Self::new(conn))
}
}
impl<T> CertificateServiceClient<T>
where
T: tonic::client::GrpcService<tonic::body::BoxBody>,
T::Error: Into<StdError>,
T::ResponseBody: Body<Data = Bytes> + Send + 'static,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
{
pub fn new(inner: T) -> Self {
let inner = tonic::client::Grpc::new(inner);
Self { inner }
}
pub fn with_origin(inner: T, origin: Uri) -> Self {
let inner = tonic::client::Grpc::with_origin(inner, origin);
Self { inner }
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> CertificateServiceClient<InterceptedService<T, F>>
where
F: tonic::service::Interceptor,
T::ResponseBody: Default,
T: tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
Response = http::Response<
<T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
>,
>,
<T as tonic::codegen::Service<
http::Request<tonic::body::BoxBody>,
>>::Error: Into<StdError> + Send + Sync,
{
CertificateServiceClient::new(InterceptedService::new(inner, interceptor))
}
/// Compress requests with the given encoding.
///
/// This requires the server to support it otherwise it might respond with an
/// error.
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.send_compressed(encoding);
self
}
/// Enable decompressing responses.
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.inner = self.inner.accept_compressed(encoding);
self
}
/// Using provided CSR, returns a signed certificate.
pub async fn issue_certificate(
&mut self,
request: impl tonic::IntoRequest<super::CertificateRequest>,
) -> Result<tonic::Response<super::CertificateResponse>, tonic::Status> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/security.CertificateService/IssueCertificate",
);
self.inner.unary(request.into_request(), path, codec).await
}
}
}
/// Generated server implementations.
pub mod certificate_service_server {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
use tonic::codegen::*;
/// Generated trait containing gRPC methods that should be implemented for use with CertificateServiceServer.
#[async_trait]
pub trait CertificateService: Send + Sync + 'static {
/// Using provided CSR, returns a signed certificate.
async fn issue_certificate(
&self,
request: tonic::Request<super::CertificateRequest>,
) -> Result<tonic::Response<super::CertificateResponse>, tonic::Status>;
}
/// Service for managing certificates issued by the CA.
#[derive(Debug)]
pub struct CertificateServiceServer<T: CertificateService> {
inner: _Inner<T>,
accept_compression_encodings: EnabledCompressionEncodings,
send_compression_encodings: EnabledCompressionEncodings,
}
struct _Inner<T>(Arc<T>);
impl<T: CertificateService> CertificateServiceServer<T> {
pub fn new(inner: T) -> Self {
Self::from_arc(Arc::new(inner))
}
pub fn from_arc(inner: Arc<T>) -> Self {
let inner = _Inner(inner);
Self {
inner,
accept_compression_encodings: Default::default(),
send_compression_encodings: Default::default(),
}
}
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> InterceptedService<Self, F>
where
F: tonic::service::Interceptor,
{
InterceptedService::new(Self::new(inner), interceptor)
}
/// Enable decompressing requests with the given encoding.
#[must_use]
pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.accept_compression_encodings.enable(encoding);
self
}
/// Compress responses with the given encoding, if the client supports it.
#[must_use]
pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
self.send_compression_encodings.enable(encoding);
self
}
}
impl<T, B> tonic::codegen::Service<http::Request<B>> for CertificateServiceServer<T>
where
T: CertificateService,
B: Body + Send + 'static,
B::Error: Into<StdError> + Send + 'static,
{
type Response = http::Response<tonic::body::BoxBody>;
type Error = std::convert::Infallible;
type Future = BoxFuture<Self::Response, Self::Error>;
fn poll_ready(
&mut self,
_cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: http::Request<B>) -> Self::Future {
let inner = self.inner.clone();
match req.uri().path() {
"/security.CertificateService/IssueCertificate" => {
#[allow(non_camel_case_types)]
struct IssueCertificateSvc<T: CertificateService>(pub Arc<T>);
impl<
T: CertificateService,
> tonic::server::UnaryService<super::CertificateRequest>
for IssueCertificateSvc<T> {
type Response = super::CertificateResponse;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::CertificateRequest>,
) -> Self::Future {
let inner = self.0.clone();
let fut = async move {
(*inner).issue_certificate(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = IssueCertificateSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
_ => {
Box::pin(async move {
Ok(
http::Response::builder()
.status(200)
.header("grpc-status", "12")
.header("content-type", "application/grpc")
.body(empty_body())
.unwrap(),
)
})
}
}
}
}
impl<T: CertificateService> Clone for CertificateServiceServer<T> {
fn clone(&self) -> Self {
let inner = self.inner.clone();
Self {
inner,
accept_compression_encodings: self.accept_compression_encodings,
send_compression_encodings: self.send_compression_encodings,
}
}
}
impl<T: CertificateService> Clone for _Inner<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T: std::fmt::Debug> std::fmt::Debug for _Inner<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self.0)
}
}
impl<T: CertificateService> tonic::server::NamedService
for CertificateServiceServer<T> {
const NAME: &'static str = "security.CertificateService";
}
}