Compare commits

..

No commits in common. "main" and "v1.0.3" have entirely different histories.
main ... v1.0.3

84 changed files with 4356 additions and 59212 deletions

View File

@ -1,25 +0,0 @@
---
name: Bug Report
about: Report a bug for dragonfly api definition
labels: bug
---
### Bug report:
<!-- Please describe what is actually happening -->
### Expected behavior:
<!-- Please describe what you expect to happen -->
### How to reproduce it:
<!-- How can a maintainer reproduce this issue (please be detailed) -->
### Environment:
- Dragonfly version:
- OS:
- Kernel (e.g. `uname -a`):
- Others:

View File

@ -1,5 +1,5 @@
---
name: Custom
about: Custom issue template for dragonfly api definition
labels: kind/custom
---

View File

@ -1,8 +1,7 @@
---
name: Feature Request
about: Request a new feature for dragonfly api definition
labels: enhancement
labels: kind/feature
---
### Feature request:

View File

@ -4,11 +4,3 @@ updates:
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

View File

@ -22,15 +22,15 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@v3
uses: github/codeql-action/autobuild@v2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
uses: github/codeql-action/analyze@v2

View File

@ -6,58 +6,22 @@ on:
pull_request:
branches: [ main ]
env:
GO_VERSION: 1.18
jobs:
golang-lint:
name: Golang Lint
lint:
name: Lint
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: '0'
uses: actions/checkout@v2
- name: Golangci lint
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v2
with:
version: v2.1.6
rust-lint:
name: Rust Lint
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install protobuf-compiler
run: sudo apt-get install protobuf-compiler
- name: Install toolchain
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- name: Run cargo check
uses: actions-rs/cargo@v1
with:
command: check
- name: Run cargo clippy
uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings
markdown-lint:
name: Markdown Lint
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
version: v1.46.2
- name: Markdown lint
uses: docker://avtodev/markdown-lint:v1

10
.gitignore vendored
View File

@ -62,13 +62,3 @@ Temporary Items
.apdisk
artifacts
# Generated by Cargo
# will have compiled files and executables
/target/
# These are backup files generated by rustfmt
**/*.rs.bk
# Added by cargo
/target

View File

@ -1,53 +1,37 @@
version: "2"
run:
deadline: 3m
modules-download-mode: readonly
linters:
default: none
enable:
- errcheck
- goconst
- gocyclo
- govet
- misspell
- staticcheck
settings:
gocyclo:
min-complexity: 60
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
rules:
- linters:
- staticcheck
text: 'SA1019:'
paths:
- third_party$
- builtin$
- examples$
linters-settings:
gocyclo:
min-complexity: 60
gci:
sections:
- standard
- default
issues:
new: true
formatters:
exclude-rules:
- linters:
- staticcheck
text: "SA1019:"
linters:
disable-all: true
enable:
- gci
- gofmt
settings:
gci:
sections:
- standard
- default
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$
- golint
- misspell
- govet
- goconst
- deadcode
- gocyclo
- staticcheck
- errcheck
output:
formats:
text:
path: stdout
print-linter-name: true
print-issued-lines: true
format: colored-line-number
print-issued-lines: true
print-linter-name: true

1314
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +0,0 @@
[package]
name = "dragonfly-api"
version = "2.1.47"
authors = ["Gaius <gaius.qi@gmail.com>"]
edition = "2021"
license = "Apache-2.0"
homepage = "https://d7y.io"
repository = "https://github.com/dragonflyoss/api"
description = "Canonical location of the Dragonfly API definition"
readme = "README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tonic = "0.12.3"
prost = "0.13.5"
prost-types = "0.14.1"
tokio = { version = "1.46.1", features = ["rt-multi-thread", "macros"] }
serde = { version = "1.0", features = ["derive"] }
prost-wkt-types = "0.6"
[build-dependencies]
tonic-build = "0.12.1"

View File

@ -17,7 +17,7 @@ all: help
# Run code lint
lint: markdownlint
@echo "Begin to golangci-lint."
@./hack/golanglint.sh
@golangci-lint run
.PHONY: lint
# Run markdown lint
@ -32,20 +32,9 @@ generate: protoc
.PHONY: generate
# Generate grpc protos
protoc: go-protoc rust-protoc
.PHONY: protoc
# Generate grpc protos of golang
go-protoc:
@echo "Begin to generate grpc protos of golang."
protoc:
@./hack/protoc.sh
.PHONY: go-protoc
# Generate grpc protos of rust
rust-protoc:
@echo "Begin to generate grpc protos of rust."
@cargo build --release
.PHONY: rust-protoc
.PHONY: protoc
# Clear compiled files
clean:
@ -58,6 +47,4 @@ help:
@echo "make markdownlint run markdown lint"
@echo "make generate run go generate"
@echo "make protoc generate grpc protos"
@echo "make go-protoc generate grpc protos of golang"
@echo "make rust-protoc generate grpc protos of rust"
@echo "make clean clean"

View File

@ -1,6 +1,6 @@
# api
[![Discussions](https://img.shields.io/badge/discussions-on%20github-blue?style=flat-square)](https://github.com/dragonflyoss/dragonfly/discussions)
[![Discussions](https://img.shields.io/badge/discussions-on%20github-blue?style=flat-square)](https://github.com/dragonflyoss/Dragonfly2/discussions)
[![LICENSE](https://img.shields.io/github/license/dragonflyoss/api.svg?style=flat-square)](https://github.com/dragonflyoss/api/blob/main/LICENSE)
Canonical location of the Dragonfly API definition.
@ -9,24 +9,26 @@ The project includes the api definitions of dragonfly services and the mocks of
## Note to developers
If developers need to change dragonfly api definition,
please contact [dragonfly maintainers](https://github.com/dragonflyoss/dragonfly/blob/main/MAINTAINERS.md).
please contact [dragonfly maintainers](https://github.com/dragonflyoss/Dragonfly2/blob/main/MAINTAINERS.md).
## Community
Join the conversation and help the community.
Welcome developers to actively participate in community discussions
and contribute code to Dragonfly. We will remain
concerned about the issues discussed in the community and respond quickly.
- **Slack Channel**: [#dragonfly](https://cloud-native.slack.com/messages/dragonfly/) on [CNCF Slack](https://slack.cncf.io/)
- **Github Discussions**: [Dragonfly Discussion Forum](https://github.com/dragonflyoss/dragonfly/discussions)
- **Discussion Group**: <dragonfly-discuss@googlegroups.com>
- **Developer Group**: <dragonfly-developers@googlegroups.com>
- **Maintainer Group**: <dragonfly-maintainers@googlegroups.com>
- **Github Discussions**: [Dragonfly Discussion Forum][discussion]
- **Twitter**: [@dragonfly_oss](https://twitter.com/dragonfly_oss)
- **DingTalk**: [22880028764](https://qr.dingtalk.com/action/joingroup?code=v1,k1,pkV9IbsSyDusFQdByPSK3HfCG61ZCLeb8b/lpQ3uUqI=&_dt_no_comment=1&origin=11)
- **DingTalk**: [23304666](https://h5.dingtalk.com/circle/healthCheckin.html?dtaction=os&corpId=ding0ba5f94d8290b9f7f235fbadcd45de0c&f4462ef5-a7d=9bec3e94-b34&cbdbhh=qwertyuiop)
## Contributing
You should check out our
[CONTRIBUTING](https://github.com/dragonflyoss/dragonfly/blob/main/CONTRIBUTING.md) and develop the project together.
[CONTRIBUTING](https://github.com/dragonflyoss/Dragonfly2/blob/main/CONTRIBUTING.md) and develop the project together.
## Code of Conduct
Please refer to our [Code of Conduct](https://github.com/dragonflyoss/dragonfly/blob/main/CODE_OF_CONDUCT.md).
Please refer to our [Code of Conduct](https://github.com/dragonflyoss/Dragonfly2/blob/main/CODE_OF_CONDUCT.md).

View File

@ -1,24 +0,0 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
tonic_build::configure()
.file_descriptor_set_path("src/descriptor.bin")
.protoc_arg("--experimental_allow_proto3_optional")
.type_attribute(".", "#[derive(serde::Serialize, serde::Deserialize)]")
.type_attribute(
"scheduler.v2.AnnouncePeerRequest.request",
"#[allow(clippy::large_enum_variant)]",
)
.extern_path(".google.protobuf.Timestamp", "::prost_wkt_types::Timestamp")
.extern_path(".google.protobuf.Duration", "::prost_wkt_types::Duration")
.out_dir("src")
.compile(
&[
"proto/common.proto",
"proto/errordetails.proto",
"proto/dfdaemon.proto",
"proto/manager.proto",
"proto/scheduler.proto",
],
&["proto/"],
)?;
Ok(())
}

21
go.mod
View File

@ -1,17 +1,18 @@
module d7y.io/api/v2
module d7y.io/api
go 1.23.8
go 1.18
require (
github.com/envoyproxy/protoc-gen-validate v1.2.1
go.uber.org/mock v0.5.2
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6
github.com/envoyproxy/protoc-gen-validate v0.6.7
github.com/golang/mock v1.6.0
google.golang.org/grpc v1.48.0
google.golang.org/protobuf v1.28.1
)
require (
golang.org/x/net v0.38.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/text v0.23.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
github.com/golang/protobuf v1.5.2 // indirect
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462 // indirect
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/genproto v0.0.0-20220728213248-dd149ef739b9 // indirect
)

211
go.sum
View File

@ -1,38 +1,173 @@
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.6.7 h1:qcZcULcd/abmQg6dwigimCNEyi4gg31M/xaciQlDml8=
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462 h1:UreQrH7DbFXSi9ZFox6FNT3WBooWmdANpU+IfkT1T4I=
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912 h1:uCLL3g5wH2xjxVREVuAbP9JM5PPKjRbXKRa6IBjkzmU=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20220728213248-dd149ef739b9 h1:d3fKQZK+1rWQMg3xLKQbPMirUCo29I/NRdI2WarSzTg=
google.golang.org/genproto v0.0.0-20220728213248-dd149ef739b9/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -1,11 +0,0 @@
#!/bin/bash
LINT_DIR=/data
GOLANGCI_IMAGE=golangci/golangci-lint:v1.54
docker run --rm \
-w "${LINT_DIR}" \
-v "$(pwd):${LINT_DIR}:ro" \
${GOLANGCI_IMAGE} \
golangci-lint \
run -v

View File

@ -1,11 +1,10 @@
#!/bin/bash
PROTOC_ALL_IMAGE=${PROTOC_ALL_IMAGE:-"namely/protoc-all:1.51_2"}
PROTOC_ALL_IMAGE=${PROTOC_ALL_IMAGE:-"namely/protoc-all:1.47_2"}
PROTO_PATH=pkg/apis
LANGUAGE=go
proto_modules="common/v1 common/v2 cdnsystem/v1 dfdaemon/v1 dfdaemon/v2
errordetails/v1 errordetails/v2 manager/v1 manager/v2 scheduler/v1 scheduler/v2"
proto_modules="common/v1 cdnsystem/v1 dfdaemon/v1 errordetails/v1 manager/v1 scheduler/v1"
echo "generate protos..."
@ -21,3 +20,4 @@ for module in ${proto_modules}; do
echo "generate protos ${module} failed"
fi
done

View File

@ -15,15 +15,19 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.21.6
// protoc-gen-go v1.28.0
// protoc v3.19.4
// source: pkg/apis/cdnsystem/v1/cdnsystem.proto
package cdnsystem
package v1
import (
v1 "d7y.io/api/v2/pkg/apis/common/v1"
context "context"
v1 "d7y.io/api/pkg/apis/common/v1"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@ -107,13 +111,10 @@ type PieceSeed struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// reuse already downloaded peer
Reuse bool `protobuf:"varint,1,opt,name=reuse,proto3" json:"reuse,omitempty"`
// peer id for cdn node, need suffix with _CDN
PeerId string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
// cdn host id
HostId string `protobuf:"bytes,3,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"`
// piece info
HostId string `protobuf:"bytes,3,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"`
PieceInfo *v1.PieceInfo `protobuf:"bytes,4,opt,name=piece_info,json=pieceInfo,proto3" json:"piece_info,omitempty"`
// whether or not all seeds are downloaded
Done bool `protobuf:"varint,5,opt,name=done,proto3" json:"done,omitempty"`
@ -161,13 +162,6 @@ func (*PieceSeed) Descriptor() ([]byte, []int) {
return file_pkg_apis_cdnsystem_v1_cdnsystem_proto_rawDescGZIP(), []int{1}
}
func (x *PieceSeed) GetReuse() bool {
if x != nil {
return x.Reuse
}
return false
}
func (x *PieceSeed) GetPeerId() string {
if x != nil {
return x.PeerId
@ -248,47 +242,45 @@ var file_pkg_apis_cdnsystem_v1_cdnsystem_proto_rawDesc = []byte{
0x03, 0x88, 0x01, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x2a, 0x0a, 0x08, 0x75, 0x72, 0x6c,
0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x6f,
0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x07, 0x75, 0x72,
0x6c, 0x4d, 0x65, 0x74, 0x61, 0x22, 0xfc, 0x02, 0x0a, 0x09, 0x50, 0x69, 0x65, 0x63, 0x65, 0x53,
0x65, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x75, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x75, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65,
0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
0x02, 0x10, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x68,
0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42,
0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x64, 0x12, 0x30, 0x0a,
0x0a, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65,
0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64,
0x6f, 0x6e, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c,
0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e,
0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f,
0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63,
0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f,
0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x65, 0x67, 0x69,
0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d,
0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65,
0x12, 0x42, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69,
0x62, 0x75, 0x74, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6d,
0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62,
0x75, 0x74, 0x65, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69,
0x62, 0x75, 0x74, 0x65, 0x32, 0xcc, 0x01, 0x0a, 0x06, 0x53, 0x65, 0x65, 0x64, 0x65, 0x72, 0x12,
0x3d, 0x0a, 0x0b, 0x4f, 0x62, 0x74, 0x61, 0x69, 0x6e, 0x53, 0x65, 0x65, 0x64, 0x73, 0x12, 0x16,
0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x53, 0x65, 0x65, 0x64, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74,
0x65, 0x6d, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x53, 0x65, 0x65, 0x64, 0x30, 0x01, 0x12, 0x3e,
0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12,
0x18, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61,
0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
0x6f, 0x6e, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x43,
0x0a, 0x0e, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73,
0x12, 0x18, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54,
0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x6d,
0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x28,
0x01, 0x30, 0x01, 0x42, 0x2f, 0x5a, 0x2d, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70,
0x69, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x64,
0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x64, 0x6e, 0x73, 0x79,
0x73, 0x74, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x6c, 0x4d, 0x65, 0x74, 0x61, 0x22, 0xe6, 0x02, 0x0a, 0x09, 0x50, 0x69, 0x65, 0x63, 0x65, 0x53,
0x65, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70,
0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64,
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52,
0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0a, 0x70, 0x69, 0x65, 0x63, 0x65,
0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f,
0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09,
0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e,
0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x25, 0x0a,
0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18,
0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65,
0x6e, 0x67, 0x74, 0x68, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69,
0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52,
0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08,
0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12,
0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
0x04, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x10, 0x65, 0x78,
0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x0a,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x78,
0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0f, 0x65,
0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x32, 0xcc,
0x01, 0x0a, 0x06, 0x53, 0x65, 0x65, 0x64, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0b, 0x4f, 0x62, 0x74,
0x61, 0x69, 0x6e, 0x53, 0x65, 0x65, 0x64, 0x73, 0x12, 0x16, 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79,
0x73, 0x74, 0x65, 0x6d, 0x2e, 0x53, 0x65, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x14, 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x50, 0x69, 0x65,
0x63, 0x65, 0x53, 0x65, 0x65, 0x64, 0x30, 0x01, 0x12, 0x3e, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50,
0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x18, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
0x6f, 0x6e, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x69, 0x65,
0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x43, 0x0a, 0x0e, 0x53, 0x79, 0x6e, 0x63,
0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x18, 0x2e, 0x63, 0x6f, 0x6d,
0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x69,
0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x28, 0x01, 0x30, 0x01, 0x42, 0x22, 0x5a,
0x20, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6b, 0x67, 0x2f,
0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x76,
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -380,3 +372,221 @@ func file_pkg_apis_cdnsystem_v1_cdnsystem_proto_init() {
file_pkg_apis_cdnsystem_v1_cdnsystem_proto_goTypes = nil
file_pkg_apis_cdnsystem_v1_cdnsystem_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// SeederClient is the client API for Seeder service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type SeederClient interface {
// Generate seeds and return to scheduler
ObtainSeeds(ctx context.Context, in *SeedRequest, opts ...grpc.CallOption) (Seeder_ObtainSeedsClient, error)
// Get piece tasks from cdn
GetPieceTasks(ctx context.Context, in *v1.PieceTaskRequest, opts ...grpc.CallOption) (*v1.PiecePacket, error)
// Sync piece tasks with other peers
SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Seeder_SyncPieceTasksClient, error)
}
type seederClient struct {
cc grpc.ClientConnInterface
}
func NewSeederClient(cc grpc.ClientConnInterface) SeederClient {
return &seederClient{cc}
}
func (c *seederClient) ObtainSeeds(ctx context.Context, in *SeedRequest, opts ...grpc.CallOption) (Seeder_ObtainSeedsClient, error) {
stream, err := c.cc.NewStream(ctx, &_Seeder_serviceDesc.Streams[0], "/cdnsystem.Seeder/ObtainSeeds", opts...)
if err != nil {
return nil, err
}
x := &seederObtainSeedsClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type Seeder_ObtainSeedsClient interface {
Recv() (*PieceSeed, error)
grpc.ClientStream
}
type seederObtainSeedsClient struct {
grpc.ClientStream
}
func (x *seederObtainSeedsClient) Recv() (*PieceSeed, error) {
m := new(PieceSeed)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *seederClient) GetPieceTasks(ctx context.Context, in *v1.PieceTaskRequest, opts ...grpc.CallOption) (*v1.PiecePacket, error) {
out := new(v1.PiecePacket)
err := c.cc.Invoke(ctx, "/cdnsystem.Seeder/GetPieceTasks", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seederClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Seeder_SyncPieceTasksClient, error) {
stream, err := c.cc.NewStream(ctx, &_Seeder_serviceDesc.Streams[1], "/cdnsystem.Seeder/SyncPieceTasks", opts...)
if err != nil {
return nil, err
}
x := &seederSyncPieceTasksClient{stream}
return x, nil
}
type Seeder_SyncPieceTasksClient interface {
Send(*v1.PieceTaskRequest) error
Recv() (*v1.PiecePacket, error)
grpc.ClientStream
}
type seederSyncPieceTasksClient struct {
grpc.ClientStream
}
func (x *seederSyncPieceTasksClient) Send(m *v1.PieceTaskRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *seederSyncPieceTasksClient) Recv() (*v1.PiecePacket, error) {
m := new(v1.PiecePacket)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// SeederServer is the server API for Seeder service.
type SeederServer interface {
// Generate seeds and return to scheduler
ObtainSeeds(*SeedRequest, Seeder_ObtainSeedsServer) error
// Get piece tasks from cdn
GetPieceTasks(context.Context, *v1.PieceTaskRequest) (*v1.PiecePacket, error)
// Sync piece tasks with other peers
SyncPieceTasks(Seeder_SyncPieceTasksServer) error
}
// UnimplementedSeederServer can be embedded to have forward compatible implementations.
type UnimplementedSeederServer struct {
}
func (*UnimplementedSeederServer) ObtainSeeds(*SeedRequest, Seeder_ObtainSeedsServer) error {
return status.Errorf(codes.Unimplemented, "method ObtainSeeds not implemented")
}
func (*UnimplementedSeederServer) GetPieceTasks(context.Context, *v1.PieceTaskRequest) (*v1.PiecePacket, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetPieceTasks not implemented")
}
func (*UnimplementedSeederServer) SyncPieceTasks(Seeder_SyncPieceTasksServer) error {
return status.Errorf(codes.Unimplemented, "method SyncPieceTasks not implemented")
}
func RegisterSeederServer(s *grpc.Server, srv SeederServer) {
s.RegisterService(&_Seeder_serviceDesc, srv)
}
func _Seeder_ObtainSeeds_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(SeedRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(SeederServer).ObtainSeeds(m, &seederObtainSeedsServer{stream})
}
type Seeder_ObtainSeedsServer interface {
Send(*PieceSeed) error
grpc.ServerStream
}
type seederObtainSeedsServer struct {
grpc.ServerStream
}
func (x *seederObtainSeedsServer) Send(m *PieceSeed) error {
return x.ServerStream.SendMsg(m)
}
func _Seeder_GetPieceTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(v1.PieceTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeederServer).GetPieceTasks(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/cdnsystem.Seeder/GetPieceTasks",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeederServer).GetPieceTasks(ctx, req.(*v1.PieceTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Seeder_SyncPieceTasks_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeederServer).SyncPieceTasks(&seederSyncPieceTasksServer{stream})
}
type Seeder_SyncPieceTasksServer interface {
Send(*v1.PiecePacket) error
Recv() (*v1.PieceTaskRequest, error)
grpc.ServerStream
}
type seederSyncPieceTasksServer struct {
grpc.ServerStream
}
func (x *seederSyncPieceTasksServer) Send(m *v1.PiecePacket) error {
return x.ServerStream.SendMsg(m)
}
func (x *seederSyncPieceTasksServer) Recv() (*v1.PieceTaskRequest, error) {
m := new(v1.PieceTaskRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _Seeder_serviceDesc = grpc.ServiceDesc{
ServiceName: "cdnsystem.Seeder",
HandlerType: (*SeederServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetPieceTasks",
Handler: _Seeder_GetPieceTasks_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ObtainSeeds",
Handler: _Seeder_ObtainSeeds_Handler,
ServerStreams: true,
},
{
StreamName: "SyncPieceTasks",
Handler: _Seeder_SyncPieceTasks_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "pkg/apis/cdnsystem/v1/cdnsystem.proto",
}

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: pkg/apis/cdnsystem/v1/cdnsystem.proto
package cdnsystem
package v1
import (
"bytes"
@ -217,8 +217,6 @@ func (m *PieceSeed) validate(all bool) error {
var errors []error
// no validation rules for Reuse
if utf8.RuneCountInString(m.GetPeerId()) < 1 {
err := PieceSeedValidationError{
field: "PeerId",

View File

@ -21,7 +21,7 @@ package cdnsystem;
import "pkg/apis/common/v1/common.proto";
import "validate/validate.proto";
option go_package = "d7y.io/api/v2/pkg/apis/cdnsystem/v1;cdnsystem";
option go_package = "d7y.io/api/pkg/apis/cdnsystem/v1";
message SeedRequest{
string task_id = 1 [(validate.rules).string.min_len = 1];
@ -32,14 +32,12 @@ message SeedRequest{
// keep piece meta and data separately
// check piece md5, md5s sign and total content length
message PieceSeed{
// reuse already downloaded peer
bool reuse = 1;
// peer id for cdn node, need suffix with _CDN
string peer_id = 2 [(validate.rules).string.min_len = 1];
// cdn host id
string host_id = 3 [(validate.rules).string.min_len = 1];
// piece info
common.PieceInfo piece_info = 4;
// whether or not all seeds are downloaded
bool done = 5;
// content total length for the url, content_length < 0 represent content length is unknown

View File

@ -1,242 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.21.6
// source: pkg/apis/cdnsystem/v1/cdnsystem.proto
package cdnsystem
import (
context "context"
v1 "d7y.io/api/v2/pkg/apis/common/v1"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// SeederClient is the client API for Seeder service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type SeederClient interface {
// Generate seeds and return to scheduler
ObtainSeeds(ctx context.Context, in *SeedRequest, opts ...grpc.CallOption) (Seeder_ObtainSeedsClient, error)
// Get piece tasks from cdn
GetPieceTasks(ctx context.Context, in *v1.PieceTaskRequest, opts ...grpc.CallOption) (*v1.PiecePacket, error)
// Sync piece tasks with other peers
SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Seeder_SyncPieceTasksClient, error)
}
type seederClient struct {
cc grpc.ClientConnInterface
}
func NewSeederClient(cc grpc.ClientConnInterface) SeederClient {
return &seederClient{cc}
}
func (c *seederClient) ObtainSeeds(ctx context.Context, in *SeedRequest, opts ...grpc.CallOption) (Seeder_ObtainSeedsClient, error) {
stream, err := c.cc.NewStream(ctx, &Seeder_ServiceDesc.Streams[0], "/cdnsystem.Seeder/ObtainSeeds", opts...)
if err != nil {
return nil, err
}
x := &seederObtainSeedsClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type Seeder_ObtainSeedsClient interface {
Recv() (*PieceSeed, error)
grpc.ClientStream
}
type seederObtainSeedsClient struct {
grpc.ClientStream
}
func (x *seederObtainSeedsClient) Recv() (*PieceSeed, error) {
m := new(PieceSeed)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *seederClient) GetPieceTasks(ctx context.Context, in *v1.PieceTaskRequest, opts ...grpc.CallOption) (*v1.PiecePacket, error) {
out := new(v1.PiecePacket)
err := c.cc.Invoke(ctx, "/cdnsystem.Seeder/GetPieceTasks", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seederClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Seeder_SyncPieceTasksClient, error) {
stream, err := c.cc.NewStream(ctx, &Seeder_ServiceDesc.Streams[1], "/cdnsystem.Seeder/SyncPieceTasks", opts...)
if err != nil {
return nil, err
}
x := &seederSyncPieceTasksClient{stream}
return x, nil
}
type Seeder_SyncPieceTasksClient interface {
Send(*v1.PieceTaskRequest) error
Recv() (*v1.PiecePacket, error)
grpc.ClientStream
}
type seederSyncPieceTasksClient struct {
grpc.ClientStream
}
func (x *seederSyncPieceTasksClient) Send(m *v1.PieceTaskRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *seederSyncPieceTasksClient) Recv() (*v1.PiecePacket, error) {
m := new(v1.PiecePacket)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// SeederServer is the server API for Seeder service.
// All implementations should embed UnimplementedSeederServer
// for forward compatibility
type SeederServer interface {
// Generate seeds and return to scheduler
ObtainSeeds(*SeedRequest, Seeder_ObtainSeedsServer) error
// Get piece tasks from cdn
GetPieceTasks(context.Context, *v1.PieceTaskRequest) (*v1.PiecePacket, error)
// Sync piece tasks with other peers
SyncPieceTasks(Seeder_SyncPieceTasksServer) error
}
// UnimplementedSeederServer should be embedded to have forward compatible implementations.
type UnimplementedSeederServer struct {
}
func (UnimplementedSeederServer) ObtainSeeds(*SeedRequest, Seeder_ObtainSeedsServer) error {
return status.Errorf(codes.Unimplemented, "method ObtainSeeds not implemented")
}
func (UnimplementedSeederServer) GetPieceTasks(context.Context, *v1.PieceTaskRequest) (*v1.PiecePacket, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetPieceTasks not implemented")
}
func (UnimplementedSeederServer) SyncPieceTasks(Seeder_SyncPieceTasksServer) error {
return status.Errorf(codes.Unimplemented, "method SyncPieceTasks not implemented")
}
// UnsafeSeederServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SeederServer will
// result in compilation errors.
type UnsafeSeederServer interface {
mustEmbedUnimplementedSeederServer()
}
func RegisterSeederServer(s grpc.ServiceRegistrar, srv SeederServer) {
s.RegisterService(&Seeder_ServiceDesc, srv)
}
func _Seeder_ObtainSeeds_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(SeedRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(SeederServer).ObtainSeeds(m, &seederObtainSeedsServer{stream})
}
type Seeder_ObtainSeedsServer interface {
Send(*PieceSeed) error
grpc.ServerStream
}
type seederObtainSeedsServer struct {
grpc.ServerStream
}
func (x *seederObtainSeedsServer) Send(m *PieceSeed) error {
return x.ServerStream.SendMsg(m)
}
func _Seeder_GetPieceTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(v1.PieceTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeederServer).GetPieceTasks(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/cdnsystem.Seeder/GetPieceTasks",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeederServer).GetPieceTasks(ctx, req.(*v1.PieceTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Seeder_SyncPieceTasks_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeederServer).SyncPieceTasks(&seederSyncPieceTasksServer{stream})
}
type Seeder_SyncPieceTasksServer interface {
Send(*v1.PiecePacket) error
Recv() (*v1.PieceTaskRequest, error)
grpc.ServerStream
}
type seederSyncPieceTasksServer struct {
grpc.ServerStream
}
func (x *seederSyncPieceTasksServer) Send(m *v1.PiecePacket) error {
return x.ServerStream.SendMsg(m)
}
func (x *seederSyncPieceTasksServer) Recv() (*v1.PieceTaskRequest, error) {
m := new(v1.PieceTaskRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Seeder_ServiceDesc is the grpc.ServiceDesc for Seeder service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Seeder_ServiceDesc = grpc.ServiceDesc{
ServiceName: "cdnsystem.Seeder",
HandlerType: (*SeederServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetPieceTasks",
Handler: _Seeder_GetPieceTasks_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ObtainSeeds",
Handler: _Seeder_ObtainSeeds_Handler,
ServerStreams: true,
},
{
StreamName: "SyncPieceTasks",
Handler: _Seeder_SyncPieceTasks_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "pkg/apis/cdnsystem/v1/cdnsystem.proto",
}

View File

@ -1,10 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: ../cdnsystem_grpc.pb.go
//
// Generated by this command:
//
// mockgen -destination cdnsystem_mock.go -source ../cdnsystem_grpc.pb.go -package mocks
//
// Source: ../cdnsystem.pb.go
// Package mocks is a generated GoMock package.
package mocks
@ -13,9 +8,9 @@ import (
context "context"
reflect "reflect"
cdnsystem "d7y.io/api/v2/pkg/apis/cdnsystem/v1"
common "d7y.io/api/v2/pkg/apis/common/v1"
gomock "go.uber.org/mock/gomock"
v1 "d7y.io/api/pkg/apis/cdnsystem/v1"
v10 "d7y.io/api/pkg/apis/common/v1"
gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
)
@ -24,7 +19,6 @@ import (
type MockSeederClient struct {
ctrl *gomock.Controller
recorder *MockSeederClientMockRecorder
isgomock struct{}
}
// MockSeederClientMockRecorder is the mock recorder for MockSeederClient.
@ -45,62 +39,62 @@ func (m *MockSeederClient) EXPECT() *MockSeederClientMockRecorder {
}
// GetPieceTasks mocks base method.
func (m *MockSeederClient) GetPieceTasks(ctx context.Context, in *common.PieceTaskRequest, opts ...grpc.CallOption) (*common.PiecePacket, error) {
func (m *MockSeederClient) GetPieceTasks(ctx context.Context, in *v10.PieceTaskRequest, opts ...grpc.CallOption) (*v10.PiecePacket, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetPieceTasks", varargs...)
ret0, _ := ret[0].(*common.PiecePacket)
ret0, _ := ret[0].(*v10.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieceTasks indicates an expected call of GetPieceTasks.
func (mr *MockSeederClientMockRecorder) GetPieceTasks(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockSeederClientMockRecorder) GetPieceTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockSeederClient)(nil).GetPieceTasks), varargs...)
}
// ObtainSeeds mocks base method.
func (m *MockSeederClient) ObtainSeeds(ctx context.Context, in *cdnsystem.SeedRequest, opts ...grpc.CallOption) (cdnsystem.Seeder_ObtainSeedsClient, error) {
func (m *MockSeederClient) ObtainSeeds(ctx context.Context, in *v1.SeedRequest, opts ...grpc.CallOption) (v1.Seeder_ObtainSeedsClient, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ObtainSeeds", varargs...)
ret0, _ := ret[0].(cdnsystem.Seeder_ObtainSeedsClient)
ret0, _ := ret[0].(v1.Seeder_ObtainSeedsClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ObtainSeeds indicates an expected call of ObtainSeeds.
func (mr *MockSeederClientMockRecorder) ObtainSeeds(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockSeederClientMockRecorder) ObtainSeeds(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockSeederClient)(nil).ObtainSeeds), varargs...)
}
// SyncPieceTasks mocks base method.
func (m *MockSeederClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (cdnsystem.Seeder_SyncPieceTasksClient, error) {
func (m *MockSeederClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (v1.Seeder_SyncPieceTasksClient, error) {
m.ctrl.T.Helper()
varargs := []any{ctx}
varargs := []interface{}{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...)
ret0, _ := ret[0].(cdnsystem.Seeder_SyncPieceTasksClient)
ret0, _ := ret[0].(v1.Seeder_SyncPieceTasksClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncPieceTasks indicates an expected call of SyncPieceTasks.
func (mr *MockSeederClientMockRecorder) SyncPieceTasks(ctx any, opts ...any) *gomock.Call {
func (mr *MockSeederClientMockRecorder) SyncPieceTasks(ctx interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx}, opts...)
varargs := append([]interface{}{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockSeederClient)(nil).SyncPieceTasks), varargs...)
}
@ -108,7 +102,6 @@ func (mr *MockSeederClientMockRecorder) SyncPieceTasks(ctx any, opts ...any) *go
type MockSeeder_ObtainSeedsClient struct {
ctrl *gomock.Controller
recorder *MockSeeder_ObtainSeedsClientMockRecorder
isgomock struct{}
}
// MockSeeder_ObtainSeedsClientMockRecorder is the mock recorder for MockSeeder_ObtainSeedsClient.
@ -172,10 +165,10 @@ func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Header() *gomock.Call {
}
// Recv mocks base method.
func (m *MockSeeder_ObtainSeedsClient) Recv() (*cdnsystem.PieceSeed, error) {
func (m *MockSeeder_ObtainSeedsClient) Recv() (*v1.PieceSeed, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*cdnsystem.PieceSeed)
ret0, _ := ret[0].(*v1.PieceSeed)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -187,7 +180,7 @@ func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Recv() *gomock.Call {
}
// RecvMsg mocks base method.
func (m_2 *MockSeeder_ObtainSeedsClient) RecvMsg(m any) error {
func (m_2 *MockSeeder_ObtainSeedsClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -195,13 +188,13 @@ func (m_2 *MockSeeder_ObtainSeedsClient) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockSeeder_ObtainSeedsClientMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockSeeder_ObtainSeedsClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).RecvMsg), m)
}
// SendMsg mocks base method.
func (m_2 *MockSeeder_ObtainSeedsClient) SendMsg(m any) error {
func (m_2 *MockSeeder_ObtainSeedsClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -209,7 +202,7 @@ func (m_2 *MockSeeder_ObtainSeedsClient) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockSeeder_ObtainSeedsClientMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockSeeder_ObtainSeedsClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).SendMsg), m)
}
@ -232,7 +225,6 @@ func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Trailer() *gomock.Call {
type MockSeeder_SyncPieceTasksClient struct {
ctrl *gomock.Controller
recorder *MockSeeder_SyncPieceTasksClientMockRecorder
isgomock struct{}
}
// MockSeeder_SyncPieceTasksClientMockRecorder is the mock recorder for MockSeeder_SyncPieceTasksClient.
@ -296,10 +288,10 @@ func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Header() *gomock.Call {
}
// Recv mocks base method.
func (m *MockSeeder_SyncPieceTasksClient) Recv() (*common.PiecePacket, error) {
func (m *MockSeeder_SyncPieceTasksClient) Recv() (*v10.PiecePacket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*common.PiecePacket)
ret0, _ := ret[0].(*v10.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -311,7 +303,7 @@ func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Recv() *gomock.Call {
}
// RecvMsg mocks base method.
func (m_2 *MockSeeder_SyncPieceTasksClient) RecvMsg(m any) error {
func (m_2 *MockSeeder_SyncPieceTasksClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -319,13 +311,13 @@ func (m_2 *MockSeeder_SyncPieceTasksClient) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockSeeder_SyncPieceTasksClient) Send(arg0 *common.PieceTaskRequest) error {
func (m *MockSeeder_SyncPieceTasksClient) Send(arg0 *v10.PieceTaskRequest) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
@ -333,13 +325,13 @@ func (m *MockSeeder_SyncPieceTasksClient) Send(arg0 *common.PieceTaskRequest) er
}
// Send indicates an expected call of Send.
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Send(arg0 any) *gomock.Call {
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).Send), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockSeeder_SyncPieceTasksClient) SendMsg(m any) error {
func (m_2 *MockSeeder_SyncPieceTasksClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -347,7 +339,7 @@ func (m_2 *MockSeeder_SyncPieceTasksClient) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).SendMsg), m)
}
@ -370,7 +362,6 @@ func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Trailer() *gomock.Call {
type MockSeederServer struct {
ctrl *gomock.Controller
recorder *MockSeederServerMockRecorder
isgomock struct{}
}
// MockSeederServerMockRecorder is the mock recorder for MockSeederServer.
@ -391,22 +382,22 @@ func (m *MockSeederServer) EXPECT() *MockSeederServerMockRecorder {
}
// GetPieceTasks mocks base method.
func (m *MockSeederServer) GetPieceTasks(arg0 context.Context, arg1 *common.PieceTaskRequest) (*common.PiecePacket, error) {
func (m *MockSeederServer) GetPieceTasks(arg0 context.Context, arg1 *v10.PieceTaskRequest) (*v10.PiecePacket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1)
ret0, _ := ret[0].(*common.PiecePacket)
ret0, _ := ret[0].(*v10.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieceTasks indicates an expected call of GetPieceTasks.
func (mr *MockSeederServerMockRecorder) GetPieceTasks(arg0, arg1 any) *gomock.Call {
func (mr *MockSeederServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockSeederServer)(nil).GetPieceTasks), arg0, arg1)
}
// ObtainSeeds mocks base method.
func (m *MockSeederServer) ObtainSeeds(arg0 *cdnsystem.SeedRequest, arg1 cdnsystem.Seeder_ObtainSeedsServer) error {
func (m *MockSeederServer) ObtainSeeds(arg0 *v1.SeedRequest, arg1 v1.Seeder_ObtainSeedsServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ObtainSeeds", arg0, arg1)
ret0, _ := ret[0].(error)
@ -414,13 +405,13 @@ func (m *MockSeederServer) ObtainSeeds(arg0 *cdnsystem.SeedRequest, arg1 cdnsyst
}
// ObtainSeeds indicates an expected call of ObtainSeeds.
func (mr *MockSeederServerMockRecorder) ObtainSeeds(arg0, arg1 any) *gomock.Call {
func (mr *MockSeederServerMockRecorder) ObtainSeeds(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockSeederServer)(nil).ObtainSeeds), arg0, arg1)
}
// SyncPieceTasks mocks base method.
func (m *MockSeederServer) SyncPieceTasks(arg0 cdnsystem.Seeder_SyncPieceTasksServer) error {
func (m *MockSeederServer) SyncPieceTasks(arg0 v1.Seeder_SyncPieceTasksServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SyncPieceTasks", arg0)
ret0, _ := ret[0].(error)
@ -428,52 +419,15 @@ func (m *MockSeederServer) SyncPieceTasks(arg0 cdnsystem.Seeder_SyncPieceTasksSe
}
// SyncPieceTasks indicates an expected call of SyncPieceTasks.
func (mr *MockSeederServerMockRecorder) SyncPieceTasks(arg0 any) *gomock.Call {
func (mr *MockSeederServerMockRecorder) SyncPieceTasks(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockSeederServer)(nil).SyncPieceTasks), arg0)
}
// MockUnsafeSeederServer is a mock of UnsafeSeederServer interface.
type MockUnsafeSeederServer struct {
ctrl *gomock.Controller
recorder *MockUnsafeSeederServerMockRecorder
isgomock struct{}
}
// MockUnsafeSeederServerMockRecorder is the mock recorder for MockUnsafeSeederServer.
type MockUnsafeSeederServerMockRecorder struct {
mock *MockUnsafeSeederServer
}
// NewMockUnsafeSeederServer creates a new mock instance.
func NewMockUnsafeSeederServer(ctrl *gomock.Controller) *MockUnsafeSeederServer {
mock := &MockUnsafeSeederServer{ctrl: ctrl}
mock.recorder = &MockUnsafeSeederServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockUnsafeSeederServer) EXPECT() *MockUnsafeSeederServerMockRecorder {
return m.recorder
}
// mustEmbedUnimplementedSeederServer mocks base method.
func (m *MockUnsafeSeederServer) mustEmbedUnimplementedSeederServer() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "mustEmbedUnimplementedSeederServer")
}
// mustEmbedUnimplementedSeederServer indicates an expected call of mustEmbedUnimplementedSeederServer.
func (mr *MockUnsafeSeederServerMockRecorder) mustEmbedUnimplementedSeederServer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedSeederServer", reflect.TypeOf((*MockUnsafeSeederServer)(nil).mustEmbedUnimplementedSeederServer))
}
// MockSeeder_ObtainSeedsServer is a mock of Seeder_ObtainSeedsServer interface.
type MockSeeder_ObtainSeedsServer struct {
ctrl *gomock.Controller
recorder *MockSeeder_ObtainSeedsServerMockRecorder
isgomock struct{}
}
// MockSeeder_ObtainSeedsServerMockRecorder is the mock recorder for MockSeeder_ObtainSeedsServer.
@ -508,7 +462,7 @@ func (mr *MockSeeder_ObtainSeedsServerMockRecorder) Context() *gomock.Call {
}
// RecvMsg mocks base method.
func (m_2 *MockSeeder_ObtainSeedsServer) RecvMsg(m any) error {
func (m_2 *MockSeeder_ObtainSeedsServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -516,13 +470,13 @@ func (m_2 *MockSeeder_ObtainSeedsServer) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockSeeder_ObtainSeedsServer) Send(arg0 *cdnsystem.PieceSeed) error {
func (m *MockSeeder_ObtainSeedsServer) Send(arg0 *v1.PieceSeed) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
@ -530,7 +484,7 @@ func (m *MockSeeder_ObtainSeedsServer) Send(arg0 *cdnsystem.PieceSeed) error {
}
// Send indicates an expected call of Send.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) Send(arg0 any) *gomock.Call {
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).Send), arg0)
}
@ -544,13 +498,13 @@ func (m *MockSeeder_ObtainSeedsServer) SendHeader(arg0 metadata.MD) error {
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SendHeader(arg0 any) *gomock.Call {
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockSeeder_ObtainSeedsServer) SendMsg(m any) error {
func (m_2 *MockSeeder_ObtainSeedsServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -558,7 +512,7 @@ func (m_2 *MockSeeder_ObtainSeedsServer) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SendMsg), m)
}
@ -572,7 +526,7 @@ func (m *MockSeeder_ObtainSeedsServer) SetHeader(arg0 metadata.MD) error {
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SetHeader(arg0 any) *gomock.Call {
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SetHeader), arg0)
}
@ -584,7 +538,7 @@ func (m *MockSeeder_ObtainSeedsServer) SetTrailer(arg0 metadata.MD) {
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SetTrailer(arg0 any) *gomock.Call {
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SetTrailer), arg0)
}
@ -593,7 +547,6 @@ func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SetTrailer(arg0 any) *gomock
type MockSeeder_SyncPieceTasksServer struct {
ctrl *gomock.Controller
recorder *MockSeeder_SyncPieceTasksServerMockRecorder
isgomock struct{}
}
// MockSeeder_SyncPieceTasksServerMockRecorder is the mock recorder for MockSeeder_SyncPieceTasksServer.
@ -628,10 +581,10 @@ func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) Context() *gomock.Call {
}
// Recv mocks base method.
func (m *MockSeeder_SyncPieceTasksServer) Recv() (*common.PieceTaskRequest, error) {
func (m *MockSeeder_SyncPieceTasksServer) Recv() (*v10.PieceTaskRequest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*common.PieceTaskRequest)
ret0, _ := ret[0].(*v10.PieceTaskRequest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -643,7 +596,7 @@ func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) Recv() *gomock.Call {
}
// RecvMsg mocks base method.
func (m_2 *MockSeeder_SyncPieceTasksServer) RecvMsg(m any) error {
func (m_2 *MockSeeder_SyncPieceTasksServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -651,13 +604,13 @@ func (m_2 *MockSeeder_SyncPieceTasksServer) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockSeeder_SyncPieceTasksServer) Send(arg0 *common.PiecePacket) error {
func (m *MockSeeder_SyncPieceTasksServer) Send(arg0 *v10.PiecePacket) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
@ -665,7 +618,7 @@ func (m *MockSeeder_SyncPieceTasksServer) Send(arg0 *common.PiecePacket) error {
}
// Send indicates an expected call of Send.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) Send(arg0 any) *gomock.Call {
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).Send), arg0)
}
@ -679,13 +632,13 @@ func (m *MockSeeder_SyncPieceTasksServer) SendHeader(arg0 metadata.MD) error {
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SendHeader(arg0 any) *gomock.Call {
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockSeeder_SyncPieceTasksServer) SendMsg(m any) error {
func (m_2 *MockSeeder_SyncPieceTasksServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -693,7 +646,7 @@ func (m_2 *MockSeeder_SyncPieceTasksServer) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SendMsg), m)
}
@ -707,7 +660,7 @@ func (m *MockSeeder_SyncPieceTasksServer) SetHeader(arg0 metadata.MD) error {
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SetHeader(arg0 any) *gomock.Call {
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SetHeader), arg0)
}
@ -719,7 +672,7 @@ func (m *MockSeeder_SyncPieceTasksServer) SetTrailer(arg0 metadata.MD) {
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SetTrailer(arg0 any) *gomock.Call {
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SetTrailer), arg0)
}

View File

@ -16,4 +16,4 @@
package mocks
//go:generate mockgen -destination cdnsystem_mock.go -source ../cdnsystem_grpc.pb.go -package mocks
//go:generate mockgen -destination cdnsystem_mock.go -source ../cdnsystem.pb.go -package mocks

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: pkg/apis/common/v1/common.proto
package common
package v1
import (
"bytes"
@ -164,7 +164,7 @@ func (m *UrlMeta) validate(all bool) error {
if !_UrlMeta_Digest_Pattern.MatchString(m.GetDigest()) {
err := UrlMetaValidationError{
field: "Digest",
reason: "value does not match regex pattern \"^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128})$\"",
reason: "value does not match regex pattern \"^(md5)|(sha256):[A-Fa-f0-9]+$\"",
}
if !all {
return err
@ -197,8 +197,6 @@ func (m *UrlMeta) validate(all bool) error {
// no validation rules for Application
// no validation rules for Priority
if len(errors) > 0 {
return UrlMetaMultiError(errors)
}
@ -276,10 +274,142 @@ var _ interface {
ErrorName() string
} = UrlMetaValidationError{}
var _UrlMeta_Digest_Pattern = regexp.MustCompile("^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128})$")
var _UrlMeta_Digest_Pattern = regexp.MustCompile("^(md5)|(sha256):[A-Fa-f0-9]+$")
var _UrlMeta_Range_Pattern = regexp.MustCompile("^[0-9]+-[0-9]*$")
// Validate checks the field values on HostLoad with the rules defined in the
// proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *HostLoad) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on HostLoad with the rules defined in
// the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in HostLoadMultiError, or nil
// if none found.
func (m *HostLoad) ValidateAll() error {
return m.validate(true)
}
func (m *HostLoad) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
if val := m.GetCpuRatio(); val < 0 || val > 1 {
err := HostLoadValidationError{
field: "CpuRatio",
reason: "value must be inside range [0, 1]",
}
if !all {
return err
}
errors = append(errors, err)
}
if val := m.GetMemRatio(); val < 0 || val > 1 {
err := HostLoadValidationError{
field: "MemRatio",
reason: "value must be inside range [0, 1]",
}
if !all {
return err
}
errors = append(errors, err)
}
if val := m.GetDiskRatio(); val < 0 || val > 1 {
err := HostLoadValidationError{
field: "DiskRatio",
reason: "value must be inside range [0, 1]",
}
if !all {
return err
}
errors = append(errors, err)
}
if len(errors) > 0 {
return HostLoadMultiError(errors)
}
return nil
}
// HostLoadMultiError is an error wrapping multiple validation errors returned
// by HostLoad.ValidateAll() if the designated constraints aren't met.
type HostLoadMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HostLoadMultiError) Error() string {
var msgs []string
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m HostLoadMultiError) AllErrors() []error { return m }
// HostLoadValidationError is the validation error returned by
// HostLoad.Validate if the designated constraints aren't met.
type HostLoadValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e HostLoadValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e HostLoadValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e HostLoadValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e HostLoadValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e HostLoadValidationError) ErrorName() string { return "HostLoadValidationError" }
// Error satisfies the builtin error interface
func (e HostLoadValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHostLoad.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = HostLoadValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = HostLoadValidationError{}
// Validate checks the field values on PieceTaskRequest with the rules defined
// in the proto definition for this message. If any rules are violated, the
// first error encountered is returned, or nil if there are no violations.
@ -905,217 +1035,3 @@ var _ interface {
Cause() error
ErrorName() string
} = PiecePacketValidationError{}
// Validate checks the field values on Host with the rules defined in the proto
// definition for this message. If any rules are violated, the first error
// encountered is returned, or nil if there are no violations.
func (m *Host) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on Host with the rules defined in the
// proto definition for this message. If any rules are violated, the result is
// a list of violation errors wrapped in HostMultiError, or nil if none found.
func (m *Host) ValidateAll() error {
return m.validate(true)
}
func (m *Host) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
if utf8.RuneCountInString(m.GetId()) < 1 {
err := HostValidationError{
field: "Id",
reason: "value length must be at least 1 runes",
}
if !all {
return err
}
errors = append(errors, err)
}
if utf8.RuneCountInString(m.GetIp()) < 1 {
err := HostValidationError{
field: "Ip",
reason: "value length must be at least 1 runes",
}
if !all {
return err
}
errors = append(errors, err)
}
if err := m._validateHostname(m.GetHostname()); err != nil {
err = HostValidationError{
field: "Hostname",
reason: "value must be a valid hostname",
cause: err,
}
if !all {
return err
}
errors = append(errors, err)
}
if val := m.GetPort(); val < 1024 || val >= 65535 {
err := HostValidationError{
field: "Port",
reason: "value must be inside range [1024, 65535)",
}
if !all {
return err
}
errors = append(errors, err)
}
if val := m.GetDownloadPort(); val < 1024 || val >= 65535 {
err := HostValidationError{
field: "DownloadPort",
reason: "value must be inside range [1024, 65535)",
}
if !all {
return err
}
errors = append(errors, err)
}
if m.GetLocation() != "" {
if utf8.RuneCountInString(m.GetLocation()) < 1 {
err := HostValidationError{
field: "Location",
reason: "value length must be at least 1 runes",
}
if !all {
return err
}
errors = append(errors, err)
}
}
if m.GetIdc() != "" {
if utf8.RuneCountInString(m.GetIdc()) < 1 {
err := HostValidationError{
field: "Idc",
reason: "value length must be at least 1 runes",
}
if !all {
return err
}
errors = append(errors, err)
}
}
if len(errors) > 0 {
return HostMultiError(errors)
}
return nil
}
func (m *Host) _validateHostname(host string) error {
s := strings.ToLower(strings.TrimSuffix(host, "."))
if len(host) > 253 {
return errors.New("hostname cannot exceed 253 characters")
}
for _, part := range strings.Split(s, ".") {
if l := len(part); l == 0 || l > 63 {
return errors.New("hostname part must be non-empty and cannot exceed 63 characters")
}
if part[0] == '-' {
return errors.New("hostname parts cannot begin with hyphens")
}
if part[len(part)-1] == '-' {
return errors.New("hostname parts cannot end with hyphens")
}
for _, r := range part {
if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' {
return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r))
}
}
}
return nil
}
// HostMultiError is an error wrapping multiple validation errors returned by
// Host.ValidateAll() if the designated constraints aren't met.
type HostMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m HostMultiError) Error() string {
var msgs []string
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m HostMultiError) AllErrors() []error { return m }
// HostValidationError is the validation error returned by Host.Validate if the
// designated constraints aren't met.
type HostValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e HostValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e HostValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e HostValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e HostValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e HostValidationError) ErrorName() string { return "HostValidationError" }
// Error satisfies the builtin error interface
func (e HostValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHost.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = HostValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = HostValidationError{}

View File

@ -20,18 +20,17 @@ package common;
import "validate/validate.proto";
option go_package = "d7y.io/api/v2/pkg/apis/common/v1;common";
option go_package = "d7y.io/api/pkg/apis/common/v1";
// Code represents the code of the grpc api.
enum Code {
enum Code{
X_UNSPECIFIED = 0;
// Success code 200-299.
// success code 200-299
Success = 200;
// Framework can not find server node.
// framework can not find server node
ServerUnavailable = 500;
// Common response error 1000-1999.
// Client can be migrated to another scheduler/CDN.
// common response error 1000-1999
// client can be migrated to another scheduler/CDN
ResourceLacked = 1000;
BackToSourceAborted = 1001;
BadRequest = 1400;
@ -39,71 +38,63 @@ enum Code {
UnknownError = 1500;
RequestTimeOut = 1504;
// Client response error 4000-4999.
// client response error 4000-4999
ClientError = 4000;
// Get piece task from other peer error.
ClientPieceRequestFail = 4001;
// Wait scheduler response timeout.
ClientScheduleTimeout = 4002;
ClientPieceRequestFail = 4001; // get piece task from other peer error
ClientScheduleTimeout = 4002; // wait scheduler response timeout
ClientContextCanceled = 4003;
// When target peer downloads from source slowly, should wait.
ClientWaitPieceReady = 4004;
ClientWaitPieceReady = 4004; // when target peer downloads from source slowly, should wait
ClientPieceDownloadFail = 4005;
ClientRequestLimitFail = 4006;
ClientConnectionError = 4007;
ClientBackSourceError = 4008;
ClientPieceNotFound = 4404;
// Scheduler response error 5000-5999.
// scheduler response error 5000-5999
SchedError = 5000;
// Client should try to download from source.
SchedNeedBackSource = 5001;
// Client should disconnect from scheduler.
SchedPeerGone = 5002;
// Peer not found in scheduler.
SchedPeerNotFound = 5004;
// Report piece.
SchedPeerPieceResultReportFail = 5005;
// Task status is fail.
SchedTaskStatusError = 5006;
// Task should be reregister.
SchedReregister = 5007;
// Task should be forbidden.
SchedForbidden = 5008;
SchedNeedBackSource = 5001; // client should try to download from source
SchedPeerGone = 5002; // client should disconnect from scheduler
SchedPeerNotFound = 5004; // peer not found in scheduler
SchedPeerPieceResultReportFail = 5005; // report piece
SchedTaskStatusError = 5006; // task status is fail
// CDN system response error 6000-6999.
// cdnsystem response error 6000-6999
CDNTaskRegistryFail = 6001;
CDNTaskNotFound = 6404;
// Manager response error 7000-7999.
// manager response error 7000-7999
InvalidResourceType = 7001;
}
// PieceStyle represents the style of piece.
enum PieceStyle {
enum PieceStyle{
PLAIN = 0;
}
// SizeScope represents size scope of task.
enum SizeScope {
// NORMAL task has pieces is more than one piece.
enum SizeScope{
// size > one piece size
NORMAL = 0;
// SMALL task's content length is more than 128 byte and has only one piece.
// 128 byte < size <= one piece size and be plain type
SMALL = 1;
// TINY task's content length is less than 128 byte.
// size <= 128 byte and be plain type
TINY = 2;
}
// EMPTY task's content length is equal to zero.
EMPTY = 3;
// Pattern represents pattern of task.
enum Pattern{
// Default pattern, scheduler will use all p2p node
// include dfdaemon and seed peers.
P2P = 0;
// UNKNOW task has invalid size scope.
UNKNOW = 4;
// Seed peer pattern, scheduler will use only seed peers.
SEED_PEER = 1;
// Source pattern, scheduler will say back source
// when there is no available peer in p2p.
SOURCE = 2;
}
// TaskType represents type of task.
enum TaskType {
enum TaskType{
// Normal is normal type of task,
// normal task is a normal p2p task.
Normal = 0;
@ -118,131 +109,79 @@ enum TaskType {
DfStore = 2;
}
// Priority represents priority of application.
enum Priority {
// LEVEL0 has no special meaning for scheduler.
LEVEL0 = 0;
// LEVEL1 represents the download task is forbidden,
// and an error code is returned during the registration.
LEVEL1 = 1;
// LEVEL2 represents when the task is downloaded for the first time,
// allow peers to download from the other peers,
// but not back-to-source. When the task is not downloaded for
// the first time, it is scheduled normally.
LEVEL2 = 2;
// LEVEL3 represents when the task is downloaded for the first time,
// the normal peer is first to download back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL3 = 3;
// LEVEL4 represents when the task is downloaded for the first time,
// the weak peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL4 = 4;
// LEVEL5 represents when the task is downloaded for the first time,
// the strong peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL5 = 5;
// LEVEL6 represents when the task is downloaded for the first time,
// the super peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL6 = 6;
}
// GrpcDfError represents error of the grpc.
message GrpcDfError {
Code code = 1;
string message = 2;
}
// UrlMeta describes url meta info.
message UrlMeta {
// Digest checks integrity of url content, for example md5:xxx or sha256:yyy.
string digest = 1 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128})$", ignore_empty:true}];
// URL tag identifies different task for same url, conflict with digest.
message UrlMeta{
// digest checks integrity of url content, for example md5:xxx or sha256:yyy
string digest = 1 [(validate.rules).string = {pattern: "^(md5)|(sha256):[A-Fa-f0-9]+$", ignore_empty:true}];
// url tag identifies different task for same url, conflict with digest
string tag = 2;
// Content range for url.
// content range for url
string range = 3 [(validate.rules).string = {pattern: "^[0-9]+-[0-9]*$", ignore_empty:true}];
// Filter url used to generate task id.
// filter url used to generate task id
string filter = 4;
// Other url header infos.
// other url header infos
map<string, string> header = 5;
// Application.
// application
string application = 6;
// Priority.
Priority priority = 7;
}
// PieceTaskRequest represents request of PieceTask.
message PieceTaskRequest {
message HostLoad{
// cpu usage
float cpu_ratio = 1 [(validate.rules).float = {gte: 0, lte: 1}];
// memory usage
float mem_ratio = 2 [(validate.rules).float = {gte: 0, lte: 1}];
// disk space usage
float disk_ratio = 3 [(validate.rules).float = {gte: 0, lte: 1}];
}
message PieceTaskRequest{
string task_id = 1 [(validate.rules).string.min_len = 1];
string src_pid = 2 [(validate.rules).string.min_len = 1];
string dst_pid = 3 [(validate.rules).string.min_len = 1];
// Piece number.
// piece number
uint32 start_num = 4 [(validate.rules).uint32.gte = 0];
// Expected piece count, limit = 0 represent request pieces as many shards as possible.
// expected piece count, limit = 0 represent request pieces as many shards as possible
uint32 limit = 5 [(validate.rules).uint32.gte = 0];
}
// Piece metadata.
message PieceInfo {
// If piece_num is less than zero, it means to start report piece flag.
message PieceInfo{
// piece_num < 0 represent start report piece flag
int32 piece_num = 1;
uint64 range_start = 2 [(validate.rules).uint64.gte = 0];
uint32 range_size = 3 [(validate.rules).uint32.gte = 0];
string piece_md5 = 4 [(validate.rules).string = {pattern:"([a-f\\d]{32}|[A-F\\d]{32}|[a-f\\d]{16}|[A-F\\d]{16})", ignore_empty:true}];
uint64 piece_offset = 5 [(validate.rules).uint64.gte = 0];
PieceStyle piece_style = 6;
// Cost of the downloading.
// total time(millisecond) consumed
uint64 download_cost = 7 [(validate.rules).uint64.gte = 0];
}
// ExtendAttribute is extend attribute.
message ExtendAttribute {
// Task response header, eg: HTTP Response Header.
message ExtendAttribute{
// task response header, eg: HTTP Response Header
map<string, string> header = 1;
// Task response code, eg: HTTP Status Code.
// task response code, eg: HTTP Status Code
int32 status_code = 2;
// Task response status, eg: HTTP Status.
// task response status, eg: HTTP Status
string status = 3;
}
// Piece metadata.
message PiecePacket {
message PiecePacket{
string task_id = 2 [(validate.rules).string.min_len = 1];
string dst_pid = 3 [(validate.rules).string.min_len = 1];
// Address of the remote peer.
// ip:port
string dst_addr = 4 [(validate.rules).string.min_len = 1];
repeated PieceInfo piece_infos = 5;
// Total piece count for url, total_piece represent total piece is unknown.
// total piece count for url, total_piece represent total piece is unknown
int32 total_piece = 6;
// If content_length is less than zero, it means content length is unknown.
// content_length < 0 represent content length is unknown
int64 content_length = 7;
// Sha256 code of all piece md5.
// sha256 code of all piece md5
string piece_md5_sign = 8;
// Task extend attribute.
// task extend attribute
ExtendAttribute extend_attribute = 9;
}
// Host metadata.
message Host {
// Host id.
string id = 1 [(validate.rules).string.min_len = 1];
// Host ip.
string ip = 2 [(validate.rules).string.min_len = 1];
// Peer hostname.
string hostname = 3 [(validate.rules).string.hostname = true];
// Port of grpc service.
int32 port = 4 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Port of download server.
int32 download_port = 5 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Host location, eg: area|country|province|city.
string location = 7 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// IDC where the peer host is located.
string idc = 8 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,519 +0,0 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package common.v2;
import "validate/validate.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
option go_package = "d7y.io/api/v2/pkg/apis/common/v2;common";
// SizeScope represents size scope of task.
enum SizeScope {
// NORMAL task has pieces is more than one piece.
NORMAL = 0;
// SMALL task's content length is more than 128 byte and has only one piece.
SMALL = 1;
// TINY task's content length is less than 128 byte.
TINY = 2;
// EMPTY task's content length is equal to zero.
EMPTY = 3;
// UNKNOW task has invalid size scope.
UNKNOW = 4;
}
// TaskType represents type of task.
enum TaskType {
// STANDARD is standard type of task, it can download from source, remote peer and
// local peer(local cache). When the standard task is never downloaded in the
// P2P cluster, dfdaemon will download the task from the source. When the standard
// task is downloaded in the P2P cluster, dfdaemon will download the task from
// the remote peer or local peer(local cache).
STANDARD = 0;
// PERSISTENT is persistent type of task, it can import file and export file in P2P cluster.
// When the persistent task is imported into the P2P cluster, dfdaemon will store
// the task in the peer's disk and copy multiple replicas to remote peers to
// prevent data loss.
PERSISTENT = 1;
// PERSIST_CACHE is persistent cache type of task, it can import file and export file in P2P cluster.
// When the persistent cache task is imported into the P2P cluster, dfdaemon will store
// the task in the peer's disk and copy multiple replicas to remote peers to prevent data loss.
// When the expiration time is reached, task will be deleted in the P2P cluster.
PERSISTENT_CACHE = 2;
}
// TrafficType represents type of traffic.
enum TrafficType {
// BACK_TO_SOURCE is to download traffic from the source.
BACK_TO_SOURCE = 0;
// REMOTE_PEER is to download traffic from the remote peer.
REMOTE_PEER = 1;
// LOCAL_PEER is to download traffic from the local peer.
LOCAL_PEER = 2;
}
// Priority represents priority of application.
enum Priority {
// LEVEL0 has no special meaning for scheduler.
LEVEL0 = 0;
// LEVEL1 represents the download task is forbidden,
// and an error code is returned during the registration.
LEVEL1 = 1;
// LEVEL2 represents when the task is downloaded for the first time,
// allow peers to download from the other peers,
// but not back-to-source. When the task is not downloaded for
// the first time, it is scheduled normally.
LEVEL2 = 2;
// LEVEL3 represents when the task is downloaded for the first time,
// the normal peer is first to download back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL3 = 3;
// LEVEL4 represents when the task is downloaded for the first time,
// the weak peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL4 = 4;
// LEVEL5 represents when the task is downloaded for the first time,
// the strong peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL5 = 5;
// LEVEL6 represents when the task is downloaded for the first time,
// the super peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL6 = 6;
}
// Peer metadata.
message Peer {
// Peer id.
string id = 1 [(validate.rules).string.min_len = 1];
// Range is url range of request.
optional Range range = 2;
// Peer priority.
Priority priority = 3 [(validate.rules).enum.defined_only = true];
// Pieces of peer.
repeated Piece pieces = 4 [(validate.rules).repeated = {min_items: 1, ignore_empty: true}];
// Peer downloads costs time.
google.protobuf.Duration cost = 5 [(validate.rules).duration.required = true];
// Peer state.
string state = 6 [(validate.rules).string.min_len = 1];
// Task info.
Task task = 7 [(validate.rules).message.required = true];
// Host info.
Host host = 8 [(validate.rules).message.required = true];
// NeedBackToSource needs downloaded from source.
bool need_back_to_source = 9;
// Peer create time.
google.protobuf.Timestamp created_at = 10 [(validate.rules).timestamp.required = true];
// Peer update time.
google.protobuf.Timestamp updated_at = 11 [(validate.rules).timestamp.required = true];
}
// PersistentCachePeer metadata.
message PersistentCachePeer {
// Peer id.
string id = 1 [(validate.rules).string.min_len = 1];
// Persistent represents whether the persistent cache peer is persistent.
// If the persistent cache peer is persistent, the persistent cache peer will
// not be deleted when dfdaemon runs garbage collection. It only be deleted
// when the task is deleted by the user.
bool persistent = 2;
// Peer downloads costs time.
google.protobuf.Duration cost = 3 [(validate.rules).duration.required = true];
// Peer state.
string state = 4 [(validate.rules).string.min_len = 1];
// Task info.
PersistentCacheTask task = 5 [(validate.rules).message.required = true];
// Host info.
Host host = 6 [(validate.rules).message.required = true];
// Peer create time.
google.protobuf.Timestamp created_at = 7 [(validate.rules).timestamp.required = true];
// Peer update time.
google.protobuf.Timestamp updated_at = 8 [(validate.rules).timestamp.required = true];
}
// Task metadata.
message Task {
// Task id.
string id = 1 [(validate.rules).string.min_len = 1];
// Task type.
TaskType type = 2 [(validate.rules).enum.defined_only = true];
// Download url.
string url = 3 [(validate.rules).string.uri = true];
// Verifies task data integrity after download using a digest. Supports CRC32, SHA256, and SHA512 algorithms.
// Format: `<algorithm>:<hash>`, e.g., `crc32:xxx`, `sha256:yyy`, `sha512:zzz`.
// Returns an error if the computed digest mismatches the expected value.
//
// Performance
// Digest calculation increases processing time. Enable only when data integrity verification is critical.
optional string digest = 4 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
// URL tag identifies different task for same url.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Filtered query params to generate the task id.
// When filter is ["Signature", "Expires", "ns"], for example:
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
// will generate the same task id.
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
repeated string filtered_query_params = 7;
// Task request headers.
map<string, string> request_header = 8;
// Task content length.
uint64 content_length = 9;
// Task piece count.
uint32 piece_count = 10;
// Task size scope.
SizeScope size_scope = 11;
// Pieces of task.
repeated Piece pieces = 12 [(validate.rules).repeated = {min_items: 1, ignore_empty: true}];
// Task state.
string state = 13 [(validate.rules).string.min_len = 1];
// Task peer count.
uint32 peer_count = 14;
// Task contains available peer.
bool has_available_peer = 15;
// Task create time.
google.protobuf.Timestamp created_at = 16 [(validate.rules).timestamp.required = true];
// Task update time.
google.protobuf.Timestamp updated_at = 17 [(validate.rules).timestamp.required = true];
}
// PersistentCacheTask metadata.
message PersistentCacheTask {
// Task id.
string id = 1 [(validate.rules).string.min_len = 1];
// Replica count of the persistent cache task. The persistent cache task will
// not be deleted when dfdamon runs garbage collection. It only be deleted
// when the task is deleted by the user.
uint64 persistent_replica_count = 2 [(validate.rules).uint64.gte = 1];
// Current replica count of the persistent cache task. The persistent cache task
// will not be deleted when dfdaemon runs garbage collection. It only be deleted
// when the task is deleted by the user.
uint64 current_persistent_replica_count = 3;
// Current replica count of the cache task. If cache task is not persistent,
// the persistent cache task will be deleted when dfdaemon runs garbage collection.
uint64 current_replica_count = 4;
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Task piece length.
uint64 piece_length = 7 [(validate.rules).uint64.gte = 4194304];
// Task content length.
uint64 content_length = 8;
// Task piece count.
uint32 piece_count = 9;
// Task state.
string state = 10 [(validate.rules).string.min_len = 1];
// TTL of the persistent cache task.
google.protobuf.Duration ttl = 11 [(validate.rules).duration.required = true];
// Task create time.
google.protobuf.Timestamp created_at = 12 [(validate.rules).timestamp.required = true];
// Task update time.
google.protobuf.Timestamp updated_at = 13 [(validate.rules).timestamp.required = true];
}
// Host metadata.
message Host {
// Host id.
string id = 1 [(validate.rules).string.min_len = 1];
// Host type.
uint32 type = 2 [(validate.rules).uint32.lte = 3];
// Hostname.
string hostname = 3 [(validate.rules).string.min_len = 1];
// Host ip.
string ip = 4 [(validate.rules).string.ip = true];
// Port of grpc service.
int32 port = 5 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Port of download server.
int32 download_port = 6 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Host OS.
string os = 7;
// Host platform.
string platform = 8;
// Host platform family.
string platform_family = 9;
// Host platform version.
string platform_version = 10;
// Host kernel version.
string kernel_version = 11;
// CPU Stat.
optional CPU cpu = 12;
// Memory Stat.
optional Memory memory = 13;
// Network Stat.
optional Network network = 14;
// Disk Stat.
optional Disk disk = 15;
// Build information.
optional Build build = 16;
// ID of the cluster to which the host belongs.
uint64 scheduler_cluster_id = 17;
// Disable shared data for other peers.
bool disable_shared = 18;
}
// CPU Stat.
message CPU {
// Number of logical cores in the system.
uint32 logical_count = 1;
// Number of physical cores in the system
uint32 physical_count = 2;
// Percent calculates the percentage of cpu used.
double percent = 3 [(validate.rules).double.gte = 0];
// Calculates the percentage of cpu used by process.
double process_percent = 4 [(validate.rules).double.gte = 0];
// CPUTimes contains the amounts of time the CPU has spent performing different kinds of work.
optional CPUTimes times = 5;
}
// CPUTimes contains the amounts of time the CPU has spent performing different
// kinds of work. Time units are in seconds.
message CPUTimes {
// CPU time of user.
double user = 1 [(validate.rules).double.gte = 0];
// CPU time of system.
double system = 2 [(validate.rules).double.gte = 0];
// CPU time of idle.
double idle = 3 [(validate.rules).double.gte = 0];
// CPU time of nice.
double nice = 4 [(validate.rules).double.gte = 0];
// CPU time of iowait.
double iowait = 5 [(validate.rules).double.gte = 0];
// CPU time of irq.
double irq = 6 [(validate.rules).double.gte = 0];
// CPU time of softirq.
double softirq = 7 [(validate.rules).double.gte = 0];
// CPU time of steal.
double steal = 8 [(validate.rules).double.gte = 0];
// CPU time of guest.
double guest = 9 [(validate.rules).double.gte = 0];
// CPU time of guest nice.
double guest_nice = 10 [(validate.rules).double.gte = 0];
}
// Memory Stat.
message Memory {
// Total amount of RAM on this system.
uint64 total = 1;
// RAM available for programs to allocate.
uint64 available = 2;
// RAM used by programs.
uint64 used = 3;
// Percentage of RAM used by programs.
double used_percent = 4 [(validate.rules).double = {gte: 0, lte: 100}];
// Calculates the percentage of memory used by process.
double process_used_percent = 5 [(validate.rules).double = {gte: 0, lte: 100}];
// This is the kernel's notion of free memory.
uint64 free = 6;
}
// Network Stat.
message Network {
// Return count of tcp connections opened and status is ESTABLISHED.
uint32 tcp_connection_count = 1;
// Return count of upload tcp connections opened and status is ESTABLISHED.
uint32 upload_tcp_connection_count = 2;
// Location path(area|country|province|city|...).
optional string location = 3;
// IDC where the peer host is located
optional string idc = 4;
// Download rate is received bytes per second.
uint64 download_rate = 5;
// Download rate is the limit of received bytes per second.
uint64 download_rate_limit = 6;
// Upload rate is transmitted bytes per second.
uint64 upload_rate = 7;
// Upload rate is the limit of transmitted bytes per second.
uint64 upload_rate_limit = 8;
}
// Disk Stat.
message Disk {
// Total amount of disk on the data path of dragonfly.
uint64 total = 1;
// Free amount of disk on the data path of dragonfly.
uint64 free = 2;
// Used amount of disk on the data path of dragonfly.
uint64 used = 3;
// Used percent of disk on the data path of dragonfly directory.
double used_percent = 4 [(validate.rules).double = {gte: 0, lte: 100}];
// Total amount of indoes on the data path of dragonfly directory.
uint64 inodes_total = 5;
// Used amount of indoes on the data path of dragonfly directory.
uint64 inodes_used = 6;
// Free amount of indoes on the data path of dragonfly directory.
uint64 inodes_free = 7;
// Used percent of indoes on the data path of dragonfly directory.
double inodes_used_percent = 8 [(validate.rules).double = {gte: 0, lte: 100}];
// Disk read bandwidth, in bytes per second.
uint64 read_bandwidth = 9;
// Disk write bandwidth, in bytes per second.
uint64 write_bandwidth = 10;
}
// Build information.
message Build {
// Git version.
string git_version = 1;
// Git commit.
optional string git_commit = 2;
// Golang version.
optional string go_version = 3;
// Rust version.
optional string rust_version = 4;
// Build platform.
optional string platform = 5;
}
// Download information.
message Download {
// Download url.
string url = 1 [(validate.rules).string.uri = true];
// Digest of the task digest, for example blake3:xxx or sha256:yyy.
optional string digest = 2 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
// Range is url range of request. If protocol is http, range
// will set in request header. If protocol is others, range
// will set in range field.
optional Range range = 3;
// Task type.
TaskType type = 4 [(validate.rules).enum.defined_only = true];
// URL tag identifies different task for same url.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Peer priority.
Priority priority = 7 [(validate.rules).enum.defined_only = true];
// Filtered query params to generate the task id.
// When filter is ["Signature", "Expires", "ns"], for example:
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
// will generate the same task id.
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
repeated string filtered_query_params = 8;
// Task request headers.
map<string, string> request_header = 9;
// Task piece length, the value needs to be greater than or equal to 4194304(4MiB).
optional uint64 piece_length = 10 [(validate.rules).uint64 = {gte: 4194304, ignore_empty: true}];
// File path to be downloaded. If output_path is set, the downloaded file will be saved to the specified path.
// Dfdaemon will try to create hard link to the output path before starting the download. If hard link creation fails,
// it will copy the file to the output path after the download is completed.
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
optional string output_path = 11 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Download timeout.
optional google.protobuf.Duration timeout = 12;
// Dfdaemon cannot download the task from the source if disable_back_to_source is true.
bool disable_back_to_source = 13;
// Scheduler needs to schedule the task downloads from the source if need_back_to_source is true.
bool need_back_to_source = 14;
// certificate_chain is the client certs with DER format for the backend client to download back-to-source.
repeated bytes certificate_chain = 15;
// Prefetch pre-downloads all pieces of the task when the download task request is a range request.
bool prefetch = 16;
// Object storage protocol information.
optional ObjectStorage object_storage = 17;
// HDFS protocol information.
optional HDFS hdfs = 18;
// is_prefetch is the flag to indicate whether the request is a prefetch request.
bool is_prefetch = 19;
// need_piece_content is the flag to indicate whether the response needs to return piece content.
bool need_piece_content = 20;
// load_to_cache indicates whether the content downloaded will be stored in the cache storage.
// Cache storage is designed to store downloaded piece content from preheat tasks,
// allowing other peers to access the content from memory instead of disk.
bool load_to_cache = 21;
// force_hard_link is the flag to indicate whether the download file must be hard linked to the output path.
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
bool force_hard_link = 22;
// content_for_calculating_task_id is the content used to calculate the task id.
// If content_for_calculating_task_id is set, use its value to calculate the task ID.
// Otherwise, calculate the task ID based on url, piece_length, tag, application, and filtered_query_params.
optional string content_for_calculating_task_id = 23;
// remote_ip represents the IP address of the client initiating the download request.
// For proxy requests, it is set to the IP address of the request source.
// For dfget requests, it is set to the IP address of the dfget.
optional string remote_ip = 24 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// Object Storage related information.
message ObjectStorage {
// Region is the region of the object storage service.
optional string region = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Endpoint is the endpoint of the object storage service.
optional string endpoint = 2 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Access key that used to access the object storage service.
optional string access_key_id = 3 [(validate.rules).string.min_len = 1];
// Access secret that used to access the object storage service.
optional string access_key_secret = 4 [(validate.rules).string.min_len = 1];
// Session token that used to access s3 storage service.
optional string session_token = 5 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Local path to credential file for Google Cloud Storage service OAuth2 authentication.
optional string credential_path = 6 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Predefined ACL that used for the Google Cloud Storage service.
optional string predefined_acl = 7 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// HDFS related information.
message HDFS {
// Delegation token for Web HDFS operator.
optional string delegation_token = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// Range represents download range.
message Range {
// Start of range.
uint64 start = 1;
// Length of range.
uint64 length = 2;
}
// Piece represents information of piece.
message Piece {
// Piece number.
uint32 number = 1;
// Parent peer id.
optional string parent_id = 2 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Piece offset.
uint64 offset = 3;
// Piece length.
uint64 length = 4;
// Digest of the piece data, for example blake3:xxx or sha256:yyy.
string digest = 5 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
// Piece content.
optional bytes content = 6 [(validate.rules).bytes = {min_len: 1, ignore_empty: true}];
// Traffic type.
optional TrafficType traffic_type = 7;
// Downloading piece costs time.
google.protobuf.Duration cost = 8 [(validate.rules).duration.required = true];
// Piece create time.
google.protobuf.Timestamp created_at = 9 [(validate.rules).timestamp.required = true];
}

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: pkg/apis/dfdaemon/v1/dfdaemon.proto
package dfdaemon
package v1
import (
"bytes"
@ -17,8 +17,6 @@ import (
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
common "d7y.io/api/v2/pkg/apis/common/v1"
)
// ensure the imports are used
@ -35,10 +33,11 @@ var (
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = sort.Sort
_ = common.TaskType(0)
)
// define the regex for a UUID once up-front
var _dfdaemon_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
// Validate checks the field values on DownRequest with the rules defined in
// the proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
@ -61,7 +60,17 @@ func (m *DownRequest) validate(all bool) error {
var errors []error
// no validation rules for Uuid
if err := m._validateUuid(m.GetUuid()); err != nil {
err = DownRequestValidationError{
field: "Uuid",
reason: "value must be a valid UUID",
cause: err,
}
if !all {
return err
}
errors = append(errors, err)
}
if uri, err := url.Parse(m.GetUrl()); err != nil {
err = DownRequestValidationError{
@ -119,17 +128,6 @@ func (m *DownRequest) validate(all bool) error {
// no validation rules for DisableBackSource
if m.GetUrlMeta() == nil {
err := DownRequestValidationError{
field: "UrlMeta",
reason: "value is required",
}
if !all {
return err
}
errors = append(errors, err)
}
if all {
switch v := interface{}(m.GetUrlMeta()).(type) {
case interface{ ValidateAll() error }:
@ -159,6 +157,23 @@ func (m *DownRequest) validate(all bool) error {
}
}
if m.GetPattern() != "" {
if _, ok := _DownRequest_Pattern_InLookup[m.GetPattern()]; !ok {
err := DownRequestValidationError{
field: "Pattern",
reason: "value must be in list [p2p seed-peer source]",
}
if !all {
return err
}
errors = append(errors, err)
}
}
// no validation rules for Callsystem
// no validation rules for Uid
// no validation rules for Gid
@ -174,6 +189,14 @@ func (m *DownRequest) validate(all bool) error {
return nil
}
func (m *DownRequest) _validateUuid(uuid string) error {
if matched := _dfdaemon_uuidPattern.MatchString(uuid); !matched {
return errors.New("invalid uuid format")
}
return nil
}
// DownRequestMultiError is an error wrapping multiple validation errors
// returned by DownRequest.ValidateAll() if the designated constraints aren't met.
type DownRequestMultiError []error
@ -244,6 +267,12 @@ var _ interface {
ErrorName() string
} = DownRequestValidationError{}
var _DownRequest_Pattern_InLookup = map[string]struct{}{
"p2p": {},
"seed-peer": {},
"source": {},
}
// Validate checks the field values on DownResult with the rules defined in the
// proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
@ -413,17 +442,6 @@ func (m *StatTaskRequest) validate(all bool) error {
errors = append(errors, err)
}
if m.GetUrlMeta() == nil {
err := StatTaskRequestValidationError{
field: "UrlMeta",
reason: "value is required",
}
if !all {
return err
}
errors = append(errors, err)
}
if all {
switch v := interface{}(m.GetUrlMeta()).(type) {
case interface{ ValidateAll() error }:
@ -566,17 +584,6 @@ func (m *ImportTaskRequest) validate(all bool) error {
errors = append(errors, err)
}
if m.GetUrlMeta() == nil {
err := ImportTaskRequestValidationError{
field: "UrlMeta",
reason: "value is required",
}
if !all {
return err
}
errors = append(errors, err)
}
if all {
switch v := interface{}(m.GetUrlMeta()).(type) {
case interface{ ValidateAll() error }:
@ -765,17 +772,6 @@ func (m *ExportTaskRequest) validate(all bool) error {
errors = append(errors, err)
}
if m.GetUrlMeta() == nil {
err := ExportTaskRequestValidationError{
field: "UrlMeta",
reason: "value is required",
}
if !all {
return err
}
errors = append(errors, err)
}
if all {
switch v := interface{}(m.GetUrlMeta()).(type) {
case interface{ ValidateAll() error }:
@ -805,6 +801,8 @@ func (m *ExportTaskRequest) validate(all bool) error {
}
}
// no validation rules for Callsystem
// no validation rules for Uid
// no validation rules for Gid
@ -924,17 +922,6 @@ func (m *DeleteTaskRequest) validate(all bool) error {
errors = append(errors, err)
}
if m.GetUrlMeta() == nil {
err := DeleteTaskRequestValidationError{
field: "UrlMeta",
reason: "value is required",
}
if !all {
return err
}
errors = append(errors, err)
}
if all {
switch v := interface{}(m.GetUrlMeta()).(type) {
case interface{ ValidateAll() error }:
@ -1043,242 +1030,3 @@ var _ interface {
Cause() error
ErrorName() string
} = DeleteTaskRequestValidationError{}
// Validate checks the field values on PeerMetadata with the rules defined in
// the proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *PeerMetadata) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on PeerMetadata with the rules defined
// in the proto definition for this message. If any rules are violated, the
// result is a list of violation errors wrapped in PeerMetadataMultiError, or
// nil if none found.
func (m *PeerMetadata) ValidateAll() error {
return m.validate(true)
}
func (m *PeerMetadata) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for TaskId
// no validation rules for PeerId
// no validation rules for State
if len(errors) > 0 {
return PeerMetadataMultiError(errors)
}
return nil
}
// PeerMetadataMultiError is an error wrapping multiple validation errors
// returned by PeerMetadata.ValidateAll() if the designated constraints aren't met.
type PeerMetadataMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PeerMetadataMultiError) Error() string {
var msgs []string
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m PeerMetadataMultiError) AllErrors() []error { return m }
// PeerMetadataValidationError is the validation error returned by
// PeerMetadata.Validate if the designated constraints aren't met.
type PeerMetadataValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e PeerMetadataValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e PeerMetadataValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e PeerMetadataValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e PeerMetadataValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e PeerMetadataValidationError) ErrorName() string { return "PeerMetadataValidationError" }
// Error satisfies the builtin error interface
func (e PeerMetadataValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sPeerMetadata.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = PeerMetadataValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = PeerMetadataValidationError{}
// Validate checks the field values on PeerExchangeData with the rules defined
// in the proto definition for this message. If any rules are violated, the
// first error encountered is returned, or nil if there are no violations.
func (m *PeerExchangeData) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on PeerExchangeData with the rules
// defined in the proto definition for this message. If any rules are
// violated, the result is a list of violation errors wrapped in
// PeerExchangeDataMultiError, or nil if none found.
func (m *PeerExchangeData) ValidateAll() error {
return m.validate(true)
}
func (m *PeerExchangeData) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
for idx, item := range m.GetPeerMetadatas() {
_, _ = idx, item
if all {
switch v := interface{}(item).(type) {
case interface{ ValidateAll() error }:
if err := v.ValidateAll(); err != nil {
errors = append(errors, PeerExchangeDataValidationError{
field: fmt.Sprintf("PeerMetadatas[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
case interface{ Validate() error }:
if err := v.Validate(); err != nil {
errors = append(errors, PeerExchangeDataValidationError{
field: fmt.Sprintf("PeerMetadatas[%v]", idx),
reason: "embedded message failed validation",
cause: err,
})
}
}
} else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return PeerExchangeDataValidationError{
field: fmt.Sprintf("PeerMetadatas[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
if len(errors) > 0 {
return PeerExchangeDataMultiError(errors)
}
return nil
}
// PeerExchangeDataMultiError is an error wrapping multiple validation errors
// returned by PeerExchangeData.ValidateAll() if the designated constraints
// aren't met.
type PeerExchangeDataMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m PeerExchangeDataMultiError) Error() string {
var msgs []string
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m PeerExchangeDataMultiError) AllErrors() []error { return m }
// PeerExchangeDataValidationError is the validation error returned by
// PeerExchangeData.Validate if the designated constraints aren't met.
type PeerExchangeDataValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e PeerExchangeDataValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e PeerExchangeDataValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e PeerExchangeDataValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e PeerExchangeDataValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e PeerExchangeDataValidationError) ErrorName() string { return "PeerExchangeDataValidationError" }
// Error satisfies the builtin error interface
func (e PeerExchangeDataValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sPeerExchangeData.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = PeerExchangeDataValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = PeerExchangeDataValidationError{}

View File

@ -22,12 +22,11 @@ import "pkg/apis/common/v1/common.proto";
import "google/protobuf/empty.proto";
import "validate/validate.proto";
option go_package = "d7y.io/api/v2/pkg/apis/dfdaemon/v1;dfdaemon";
option go_package = "d7y.io/api/pkg/apis/dfdaemon/v1";
message DownRequest{
// Identify one downloading, the framework will fill it automatically.
// Deprecated
string uuid = 1;
string uuid = 1 [(validate.rules).string.uuid = true];
// Download file from the url, not only for http.
string url = 2 [(validate.rules).string.uri = true];
// Pieces will be written to output path directly,
@ -40,7 +39,11 @@ message DownRequest{
// Disable back-to-source.
bool disable_back_source = 6;
// URL meta info.
common.UrlMeta url_meta = 7 [(validate.rules).message.required = true];
common.UrlMeta url_meta = 7;
// Pattern has p2p/seed-peer/source, default is p2p.
string pattern = 8 [(validate.rules).string = {in:["p2p", "seed-peer", "source"], ignore_empty:true}];
// Call system.
string callsystem = 9;
// User id.
int64 uid = 10;
// Group id.
@ -68,7 +71,7 @@ message StatTaskRequest{
// Download url.
string url = 1 [(validate.rules).string.min_len = 1];
// URL meta info.
common.UrlMeta url_meta = 2 [(validate.rules).message.required = true];
common.UrlMeta url_meta = 2;
// Check local cache only.
bool local_only = 3;
}
@ -77,7 +80,7 @@ message ImportTaskRequest{
// Download url.
string url = 1 [(validate.rules).string.min_len = 1];
// URL meta info.
common.UrlMeta url_meta = 2 [(validate.rules).message.required = true];
common.UrlMeta url_meta = 2;
// File to be imported.
string path = 3 [(validate.rules).string.min_len = 1];
// Task type.
@ -94,7 +97,9 @@ message ExportTaskRequest{
// Rate limit in bytes per second.
double limit = 4 [(validate.rules).double.gte = 0];
// URL meta info.
common.UrlMeta url_meta = 5 [(validate.rules).message.required = true];
common.UrlMeta url_meta = 5;
// Call system.
string callsystem = 6;
// User id.
int64 uid = 7;
// Group id.
@ -107,28 +112,7 @@ message DeleteTaskRequest{
// Download url.
string url = 1 [(validate.rules).string.min_len = 1];
// URL meta info.
common.UrlMeta url_meta = 2 [(validate.rules).message.required = true];
}
// PeerState represents state of peer task.
enum PeerState {
Unknown = 0;
Running = 1;
Success = 2;
Failed = 3;
Deleted = 4;
}
// PeerMetadata represents metadata of a peer task.
message PeerMetadata {
string task_id = 1;
string peer_id = 2;
PeerState state = 3;
}
// PeerExchangeData represents metadata of peer tasks.
message PeerExchangeData {
repeated PeerMetadata PeerMetadatas= 1;
common.UrlMeta url_meta = 2;
}
// Daemon Client RPC Service
@ -149,8 +133,4 @@ service Daemon{
rpc ExportTask(ExportTaskRequest) returns(google.protobuf.Empty);
// Delete file from P2P cache system
rpc DeleteTask(DeleteTaskRequest) returns(google.protobuf.Empty);
// LeaveHost releases host in scheduler.
rpc LeaveHost(google.protobuf.Empty)returns(google.protobuf.Empty);
// Exchange peers between daemons
rpc PeerExchange(stream PeerExchangeData)returns(stream PeerExchangeData);
}

View File

@ -1,541 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.21.6
// source: pkg/apis/dfdaemon/v1/dfdaemon.proto
package dfdaemon
import (
context "context"
v1 "d7y.io/api/v2/pkg/apis/common/v1"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// DaemonClient is the client API for Daemon service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type DaemonClient interface {
// Trigger client to download file
Download(ctx context.Context, in *DownRequest, opts ...grpc.CallOption) (Daemon_DownloadClient, error)
// Get piece tasks from other peers
GetPieceTasks(ctx context.Context, in *v1.PieceTaskRequest, opts ...grpc.CallOption) (*v1.PiecePacket, error)
// Check daemon health
CheckHealth(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Sync piece tasks with other peers
SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Daemon_SyncPieceTasksClient, error)
// Check if given task exists in P2P cache system
StatTask(ctx context.Context, in *StatTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Import the given file into P2P cache system
ImportTask(ctx context.Context, in *ImportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Export or download file from P2P cache system
ExportTask(ctx context.Context, in *ExportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Delete file from P2P cache system
DeleteTask(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// LeaveHost releases host in scheduler.
LeaveHost(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Exchange peers between daemons
PeerExchange(ctx context.Context, opts ...grpc.CallOption) (Daemon_PeerExchangeClient, error)
}
type daemonClient struct {
cc grpc.ClientConnInterface
}
func NewDaemonClient(cc grpc.ClientConnInterface) DaemonClient {
return &daemonClient{cc}
}
func (c *daemonClient) Download(ctx context.Context, in *DownRequest, opts ...grpc.CallOption) (Daemon_DownloadClient, error) {
stream, err := c.cc.NewStream(ctx, &Daemon_ServiceDesc.Streams[0], "/dfdaemon.Daemon/Download", opts...)
if err != nil {
return nil, err
}
x := &daemonDownloadClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type Daemon_DownloadClient interface {
Recv() (*DownResult, error)
grpc.ClientStream
}
type daemonDownloadClient struct {
grpc.ClientStream
}
func (x *daemonDownloadClient) Recv() (*DownResult, error) {
m := new(DownResult)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *daemonClient) GetPieceTasks(ctx context.Context, in *v1.PieceTaskRequest, opts ...grpc.CallOption) (*v1.PiecePacket, error) {
out := new(v1.PiecePacket)
err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/GetPieceTasks", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonClient) CheckHealth(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/CheckHealth", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Daemon_SyncPieceTasksClient, error) {
stream, err := c.cc.NewStream(ctx, &Daemon_ServiceDesc.Streams[1], "/dfdaemon.Daemon/SyncPieceTasks", opts...)
if err != nil {
return nil, err
}
x := &daemonSyncPieceTasksClient{stream}
return x, nil
}
type Daemon_SyncPieceTasksClient interface {
Send(*v1.PieceTaskRequest) error
Recv() (*v1.PiecePacket, error)
grpc.ClientStream
}
type daemonSyncPieceTasksClient struct {
grpc.ClientStream
}
func (x *daemonSyncPieceTasksClient) Send(m *v1.PieceTaskRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *daemonSyncPieceTasksClient) Recv() (*v1.PiecePacket, error) {
m := new(v1.PiecePacket)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *daemonClient) StatTask(ctx context.Context, in *StatTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/StatTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonClient) ImportTask(ctx context.Context, in *ImportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/ImportTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonClient) ExportTask(ctx context.Context, in *ExportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/ExportTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonClient) DeleteTask(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/DeleteTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonClient) LeaveHost(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/LeaveHost", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *daemonClient) PeerExchange(ctx context.Context, opts ...grpc.CallOption) (Daemon_PeerExchangeClient, error) {
stream, err := c.cc.NewStream(ctx, &Daemon_ServiceDesc.Streams[2], "/dfdaemon.Daemon/PeerExchange", opts...)
if err != nil {
return nil, err
}
x := &daemonPeerExchangeClient{stream}
return x, nil
}
type Daemon_PeerExchangeClient interface {
Send(*PeerExchangeData) error
Recv() (*PeerExchangeData, error)
grpc.ClientStream
}
type daemonPeerExchangeClient struct {
grpc.ClientStream
}
func (x *daemonPeerExchangeClient) Send(m *PeerExchangeData) error {
return x.ClientStream.SendMsg(m)
}
func (x *daemonPeerExchangeClient) Recv() (*PeerExchangeData, error) {
m := new(PeerExchangeData)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// DaemonServer is the server API for Daemon service.
// All implementations should embed UnimplementedDaemonServer
// for forward compatibility
type DaemonServer interface {
// Trigger client to download file
Download(*DownRequest, Daemon_DownloadServer) error
// Get piece tasks from other peers
GetPieceTasks(context.Context, *v1.PieceTaskRequest) (*v1.PiecePacket, error)
// Check daemon health
CheckHealth(context.Context, *emptypb.Empty) (*emptypb.Empty, error)
// Sync piece tasks with other peers
SyncPieceTasks(Daemon_SyncPieceTasksServer) error
// Check if given task exists in P2P cache system
StatTask(context.Context, *StatTaskRequest) (*emptypb.Empty, error)
// Import the given file into P2P cache system
ImportTask(context.Context, *ImportTaskRequest) (*emptypb.Empty, error)
// Export or download file from P2P cache system
ExportTask(context.Context, *ExportTaskRequest) (*emptypb.Empty, error)
// Delete file from P2P cache system
DeleteTask(context.Context, *DeleteTaskRequest) (*emptypb.Empty, error)
// LeaveHost releases host in scheduler.
LeaveHost(context.Context, *emptypb.Empty) (*emptypb.Empty, error)
// Exchange peers between daemons
PeerExchange(Daemon_PeerExchangeServer) error
}
// UnimplementedDaemonServer should be embedded to have forward compatible implementations.
type UnimplementedDaemonServer struct {
}
func (UnimplementedDaemonServer) Download(*DownRequest, Daemon_DownloadServer) error {
return status.Errorf(codes.Unimplemented, "method Download not implemented")
}
func (UnimplementedDaemonServer) GetPieceTasks(context.Context, *v1.PieceTaskRequest) (*v1.PiecePacket, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetPieceTasks not implemented")
}
func (UnimplementedDaemonServer) CheckHealth(context.Context, *emptypb.Empty) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method CheckHealth not implemented")
}
func (UnimplementedDaemonServer) SyncPieceTasks(Daemon_SyncPieceTasksServer) error {
return status.Errorf(codes.Unimplemented, "method SyncPieceTasks not implemented")
}
func (UnimplementedDaemonServer) StatTask(context.Context, *StatTaskRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method StatTask not implemented")
}
func (UnimplementedDaemonServer) ImportTask(context.Context, *ImportTaskRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method ImportTask not implemented")
}
func (UnimplementedDaemonServer) ExportTask(context.Context, *ExportTaskRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method ExportTask not implemented")
}
func (UnimplementedDaemonServer) DeleteTask(context.Context, *DeleteTaskRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteTask not implemented")
}
func (UnimplementedDaemonServer) LeaveHost(context.Context, *emptypb.Empty) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method LeaveHost not implemented")
}
func (UnimplementedDaemonServer) PeerExchange(Daemon_PeerExchangeServer) error {
return status.Errorf(codes.Unimplemented, "method PeerExchange not implemented")
}
// UnsafeDaemonServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to DaemonServer will
// result in compilation errors.
type UnsafeDaemonServer interface {
mustEmbedUnimplementedDaemonServer()
}
func RegisterDaemonServer(s grpc.ServiceRegistrar, srv DaemonServer) {
s.RegisterService(&Daemon_ServiceDesc, srv)
}
func _Daemon_Download_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(DownRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(DaemonServer).Download(m, &daemonDownloadServer{stream})
}
type Daemon_DownloadServer interface {
Send(*DownResult) error
grpc.ServerStream
}
type daemonDownloadServer struct {
grpc.ServerStream
}
func (x *daemonDownloadServer) Send(m *DownResult) error {
return x.ServerStream.SendMsg(m)
}
func _Daemon_GetPieceTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(v1.PieceTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServer).GetPieceTasks(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/dfdaemon.Daemon/GetPieceTasks",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServer).GetPieceTasks(ctx, req.(*v1.PieceTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Daemon_CheckHealth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(emptypb.Empty)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServer).CheckHealth(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/dfdaemon.Daemon/CheckHealth",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServer).CheckHealth(ctx, req.(*emptypb.Empty))
}
return interceptor(ctx, in, info, handler)
}
func _Daemon_SyncPieceTasks_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(DaemonServer).SyncPieceTasks(&daemonSyncPieceTasksServer{stream})
}
type Daemon_SyncPieceTasksServer interface {
Send(*v1.PiecePacket) error
Recv() (*v1.PieceTaskRequest, error)
grpc.ServerStream
}
type daemonSyncPieceTasksServer struct {
grpc.ServerStream
}
func (x *daemonSyncPieceTasksServer) Send(m *v1.PiecePacket) error {
return x.ServerStream.SendMsg(m)
}
func (x *daemonSyncPieceTasksServer) Recv() (*v1.PieceTaskRequest, error) {
m := new(v1.PieceTaskRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _Daemon_StatTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServer).StatTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/dfdaemon.Daemon/StatTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServer).StatTask(ctx, req.(*StatTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Daemon_ImportTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ImportTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServer).ImportTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/dfdaemon.Daemon/ImportTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServer).ImportTask(ctx, req.(*ImportTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Daemon_ExportTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExportTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServer).ExportTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/dfdaemon.Daemon/ExportTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServer).ExportTask(ctx, req.(*ExportTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Daemon_DeleteTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServer).DeleteTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/dfdaemon.Daemon/DeleteTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServer).DeleteTask(ctx, req.(*DeleteTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Daemon_LeaveHost_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(emptypb.Empty)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DaemonServer).LeaveHost(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/dfdaemon.Daemon/LeaveHost",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DaemonServer).LeaveHost(ctx, req.(*emptypb.Empty))
}
return interceptor(ctx, in, info, handler)
}
func _Daemon_PeerExchange_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(DaemonServer).PeerExchange(&daemonPeerExchangeServer{stream})
}
type Daemon_PeerExchangeServer interface {
Send(*PeerExchangeData) error
Recv() (*PeerExchangeData, error)
grpc.ServerStream
}
type daemonPeerExchangeServer struct {
grpc.ServerStream
}
func (x *daemonPeerExchangeServer) Send(m *PeerExchangeData) error {
return x.ServerStream.SendMsg(m)
}
func (x *daemonPeerExchangeServer) Recv() (*PeerExchangeData, error) {
m := new(PeerExchangeData)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Daemon_ServiceDesc is the grpc.ServiceDesc for Daemon service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Daemon_ServiceDesc = grpc.ServiceDesc{
ServiceName: "dfdaemon.Daemon",
HandlerType: (*DaemonServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetPieceTasks",
Handler: _Daemon_GetPieceTasks_Handler,
},
{
MethodName: "CheckHealth",
Handler: _Daemon_CheckHealth_Handler,
},
{
MethodName: "StatTask",
Handler: _Daemon_StatTask_Handler,
},
{
MethodName: "ImportTask",
Handler: _Daemon_ImportTask_Handler,
},
{
MethodName: "ExportTask",
Handler: _Daemon_ExportTask_Handler,
},
{
MethodName: "DeleteTask",
Handler: _Daemon_DeleteTask_Handler,
},
{
MethodName: "LeaveHost",
Handler: _Daemon_LeaveHost_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "Download",
Handler: _Daemon_Download_Handler,
ServerStreams: true,
},
{
StreamName: "SyncPieceTasks",
Handler: _Daemon_SyncPieceTasks_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "PeerExchange",
Handler: _Daemon_PeerExchange_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "pkg/apis/dfdaemon/v1/dfdaemon.proto",
}

View File

@ -1,10 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: ../dfdaemon_grpc.pb.go
//
// Generated by this command:
//
// mockgen -destination dfdaemon_mock.go -source ../dfdaemon_grpc.pb.go -package mocks
//
// Source: ../dfdaemon.pb.go
// Package mocks is a generated GoMock package.
package mocks
@ -13,9 +8,9 @@ import (
context "context"
reflect "reflect"
common "d7y.io/api/v2/pkg/apis/common/v1"
dfdaemon "d7y.io/api/v2/pkg/apis/dfdaemon/v1"
gomock "go.uber.org/mock/gomock"
v1 "d7y.io/api/pkg/apis/common/v1"
v10 "d7y.io/api/pkg/apis/dfdaemon/v1"
gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
emptypb "google.golang.org/protobuf/types/known/emptypb"
@ -25,7 +20,6 @@ import (
type MockDaemonClient struct {
ctrl *gomock.Controller
recorder *MockDaemonClientMockRecorder
isgomock struct{}
}
// MockDaemonClientMockRecorder is the mock recorder for MockDaemonClient.
@ -48,7 +42,7 @@ func (m *MockDaemonClient) EXPECT() *MockDaemonClientMockRecorder {
// CheckHealth mocks base method.
func (m *MockDaemonClient) CheckHealth(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
@ -59,16 +53,16 @@ func (m *MockDaemonClient) CheckHealth(ctx context.Context, in *emptypb.Empty, o
}
// CheckHealth indicates an expected call of CheckHealth.
func (mr *MockDaemonClientMockRecorder) CheckHealth(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockDaemonClientMockRecorder) CheckHealth(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckHealth", reflect.TypeOf((*MockDaemonClient)(nil).CheckHealth), varargs...)
}
// DeleteTask mocks base method.
func (m *MockDaemonClient) DeleteTask(ctx context.Context, in *dfdaemon.DeleteTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
func (m *MockDaemonClient) DeleteTask(ctx context.Context, in *v10.DeleteTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
@ -79,36 +73,36 @@ func (m *MockDaemonClient) DeleteTask(ctx context.Context, in *dfdaemon.DeleteTa
}
// DeleteTask indicates an expected call of DeleteTask.
func (mr *MockDaemonClientMockRecorder) DeleteTask(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockDaemonClientMockRecorder) DeleteTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTask", reflect.TypeOf((*MockDaemonClient)(nil).DeleteTask), varargs...)
}
// Download mocks base method.
func (m *MockDaemonClient) Download(ctx context.Context, in *dfdaemon.DownRequest, opts ...grpc.CallOption) (dfdaemon.Daemon_DownloadClient, error) {
func (m *MockDaemonClient) Download(ctx context.Context, in *v10.DownRequest, opts ...grpc.CallOption) (v10.Daemon_DownloadClient, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Download", varargs...)
ret0, _ := ret[0].(dfdaemon.Daemon_DownloadClient)
ret0, _ := ret[0].(v10.Daemon_DownloadClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Download indicates an expected call of Download.
func (mr *MockDaemonClientMockRecorder) Download(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockDaemonClientMockRecorder) Download(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockDaemonClient)(nil).Download), varargs...)
}
// ExportTask mocks base method.
func (m *MockDaemonClient) ExportTask(ctx context.Context, in *dfdaemon.ExportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
func (m *MockDaemonClient) ExportTask(ctx context.Context, in *v10.ExportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
@ -119,36 +113,36 @@ func (m *MockDaemonClient) ExportTask(ctx context.Context, in *dfdaemon.ExportTa
}
// ExportTask indicates an expected call of ExportTask.
func (mr *MockDaemonClientMockRecorder) ExportTask(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockDaemonClientMockRecorder) ExportTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportTask", reflect.TypeOf((*MockDaemonClient)(nil).ExportTask), varargs...)
}
// GetPieceTasks mocks base method.
func (m *MockDaemonClient) GetPieceTasks(ctx context.Context, in *common.PieceTaskRequest, opts ...grpc.CallOption) (*common.PiecePacket, error) {
func (m *MockDaemonClient) GetPieceTasks(ctx context.Context, in *v1.PieceTaskRequest, opts ...grpc.CallOption) (*v1.PiecePacket, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetPieceTasks", varargs...)
ret0, _ := ret[0].(*common.PiecePacket)
ret0, _ := ret[0].(*v1.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieceTasks indicates an expected call of GetPieceTasks.
func (mr *MockDaemonClientMockRecorder) GetPieceTasks(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockDaemonClientMockRecorder) GetPieceTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockDaemonClient)(nil).GetPieceTasks), varargs...)
}
// ImportTask mocks base method.
func (m *MockDaemonClient) ImportTask(ctx context.Context, in *dfdaemon.ImportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
func (m *MockDaemonClient) ImportTask(ctx context.Context, in *v10.ImportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
@ -159,56 +153,16 @@ func (m *MockDaemonClient) ImportTask(ctx context.Context, in *dfdaemon.ImportTa
}
// ImportTask indicates an expected call of ImportTask.
func (mr *MockDaemonClientMockRecorder) ImportTask(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockDaemonClientMockRecorder) ImportTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportTask", reflect.TypeOf((*MockDaemonClient)(nil).ImportTask), varargs...)
}
// LeaveHost mocks base method.
func (m *MockDaemonClient) LeaveHost(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "LeaveHost", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LeaveHost indicates an expected call of LeaveHost.
func (mr *MockDaemonClientMockRecorder) LeaveHost(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveHost", reflect.TypeOf((*MockDaemonClient)(nil).LeaveHost), varargs...)
}
// PeerExchange mocks base method.
func (m *MockDaemonClient) PeerExchange(ctx context.Context, opts ...grpc.CallOption) (dfdaemon.Daemon_PeerExchangeClient, error) {
m.ctrl.T.Helper()
varargs := []any{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "PeerExchange", varargs...)
ret0, _ := ret[0].(dfdaemon.Daemon_PeerExchangeClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// PeerExchange indicates an expected call of PeerExchange.
func (mr *MockDaemonClientMockRecorder) PeerExchange(ctx any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerExchange", reflect.TypeOf((*MockDaemonClient)(nil).PeerExchange), varargs...)
}
// StatTask mocks base method.
func (m *MockDaemonClient) StatTask(ctx context.Context, in *dfdaemon.StatTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
func (m *MockDaemonClient) StatTask(ctx context.Context, in *v10.StatTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
@ -219,29 +173,29 @@ func (m *MockDaemonClient) StatTask(ctx context.Context, in *dfdaemon.StatTaskRe
}
// StatTask indicates an expected call of StatTask.
func (mr *MockDaemonClientMockRecorder) StatTask(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockDaemonClientMockRecorder) StatTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockDaemonClient)(nil).StatTask), varargs...)
}
// SyncPieceTasks mocks base method.
func (m *MockDaemonClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (dfdaemon.Daemon_SyncPieceTasksClient, error) {
func (m *MockDaemonClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (v10.Daemon_SyncPieceTasksClient, error) {
m.ctrl.T.Helper()
varargs := []any{ctx}
varargs := []interface{}{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...)
ret0, _ := ret[0].(dfdaemon.Daemon_SyncPieceTasksClient)
ret0, _ := ret[0].(v10.Daemon_SyncPieceTasksClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncPieceTasks indicates an expected call of SyncPieceTasks.
func (mr *MockDaemonClientMockRecorder) SyncPieceTasks(ctx any, opts ...any) *gomock.Call {
func (mr *MockDaemonClientMockRecorder) SyncPieceTasks(ctx interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx}, opts...)
varargs := append([]interface{}{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockDaemonClient)(nil).SyncPieceTasks), varargs...)
}
@ -249,7 +203,6 @@ func (mr *MockDaemonClientMockRecorder) SyncPieceTasks(ctx any, opts ...any) *go
type MockDaemon_DownloadClient struct {
ctrl *gomock.Controller
recorder *MockDaemon_DownloadClientMockRecorder
isgomock struct{}
}
// MockDaemon_DownloadClientMockRecorder is the mock recorder for MockDaemon_DownloadClient.
@ -313,10 +266,10 @@ func (mr *MockDaemon_DownloadClientMockRecorder) Header() *gomock.Call {
}
// Recv mocks base method.
func (m *MockDaemon_DownloadClient) Recv() (*dfdaemon.DownResult, error) {
func (m *MockDaemon_DownloadClient) Recv() (*v10.DownResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*dfdaemon.DownResult)
ret0, _ := ret[0].(*v10.DownResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -328,7 +281,7 @@ func (mr *MockDaemon_DownloadClientMockRecorder) Recv() *gomock.Call {
}
// RecvMsg mocks base method.
func (m_2 *MockDaemon_DownloadClient) RecvMsg(m any) error {
func (m_2 *MockDaemon_DownloadClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -336,13 +289,13 @@ func (m_2 *MockDaemon_DownloadClient) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockDaemon_DownloadClientMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockDaemon_DownloadClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).RecvMsg), m)
}
// SendMsg mocks base method.
func (m_2 *MockDaemon_DownloadClient) SendMsg(m any) error {
func (m_2 *MockDaemon_DownloadClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -350,7 +303,7 @@ func (m_2 *MockDaemon_DownloadClient) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockDaemon_DownloadClientMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockDaemon_DownloadClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).SendMsg), m)
}
@ -373,7 +326,6 @@ func (mr *MockDaemon_DownloadClientMockRecorder) Trailer() *gomock.Call {
type MockDaemon_SyncPieceTasksClient struct {
ctrl *gomock.Controller
recorder *MockDaemon_SyncPieceTasksClientMockRecorder
isgomock struct{}
}
// MockDaemon_SyncPieceTasksClientMockRecorder is the mock recorder for MockDaemon_SyncPieceTasksClient.
@ -437,10 +389,10 @@ func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Header() *gomock.Call {
}
// Recv mocks base method.
func (m *MockDaemon_SyncPieceTasksClient) Recv() (*common.PiecePacket, error) {
func (m *MockDaemon_SyncPieceTasksClient) Recv() (*v1.PiecePacket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*common.PiecePacket)
ret0, _ := ret[0].(*v1.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -452,7 +404,7 @@ func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Recv() *gomock.Call {
}
// RecvMsg mocks base method.
func (m_2 *MockDaemon_SyncPieceTasksClient) RecvMsg(m any) error {
func (m_2 *MockDaemon_SyncPieceTasksClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -460,13 +412,13 @@ func (m_2 *MockDaemon_SyncPieceTasksClient) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockDaemon_SyncPieceTasksClient) Send(arg0 *common.PieceTaskRequest) error {
func (m *MockDaemon_SyncPieceTasksClient) Send(arg0 *v1.PieceTaskRequest) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
@ -474,13 +426,13 @@ func (m *MockDaemon_SyncPieceTasksClient) Send(arg0 *common.PieceTaskRequest) er
}
// Send indicates an expected call of Send.
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Send(arg0 any) *gomock.Call {
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Send), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockDaemon_SyncPieceTasksClient) SendMsg(m any) error {
func (m_2 *MockDaemon_SyncPieceTasksClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -488,7 +440,7 @@ func (m_2 *MockDaemon_SyncPieceTasksClient) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).SendMsg), m)
}
@ -507,149 +459,10 @@ func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Trailer() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Trailer))
}
// MockDaemon_PeerExchangeClient is a mock of Daemon_PeerExchangeClient interface.
type MockDaemon_PeerExchangeClient struct {
ctrl *gomock.Controller
recorder *MockDaemon_PeerExchangeClientMockRecorder
isgomock struct{}
}
// MockDaemon_PeerExchangeClientMockRecorder is the mock recorder for MockDaemon_PeerExchangeClient.
type MockDaemon_PeerExchangeClientMockRecorder struct {
mock *MockDaemon_PeerExchangeClient
}
// NewMockDaemon_PeerExchangeClient creates a new mock instance.
func NewMockDaemon_PeerExchangeClient(ctrl *gomock.Controller) *MockDaemon_PeerExchangeClient {
mock := &MockDaemon_PeerExchangeClient{ctrl: ctrl}
mock.recorder = &MockDaemon_PeerExchangeClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDaemon_PeerExchangeClient) EXPECT() *MockDaemon_PeerExchangeClientMockRecorder {
return m.recorder
}
// CloseSend mocks base method.
func (m *MockDaemon_PeerExchangeClient) CloseSend() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CloseSend")
ret0, _ := ret[0].(error)
return ret0
}
// CloseSend indicates an expected call of CloseSend.
func (mr *MockDaemon_PeerExchangeClientMockRecorder) CloseSend() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockDaemon_PeerExchangeClient)(nil).CloseSend))
}
// Context mocks base method.
func (m *MockDaemon_PeerExchangeClient) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockDaemon_PeerExchangeClientMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_PeerExchangeClient)(nil).Context))
}
// Header mocks base method.
func (m *MockDaemon_PeerExchangeClient) Header() (metadata.MD, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Header")
ret0, _ := ret[0].(metadata.MD)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Header indicates an expected call of Header.
func (mr *MockDaemon_PeerExchangeClientMockRecorder) Header() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockDaemon_PeerExchangeClient)(nil).Header))
}
// Recv mocks base method.
func (m *MockDaemon_PeerExchangeClient) Recv() (*dfdaemon.PeerExchangeData, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*dfdaemon.PeerExchangeData)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockDaemon_PeerExchangeClientMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockDaemon_PeerExchangeClient)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockDaemon_PeerExchangeClient) RecvMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockDaemon_PeerExchangeClientMockRecorder) RecvMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_PeerExchangeClient)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockDaemon_PeerExchangeClient) Send(arg0 *dfdaemon.PeerExchangeData) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockDaemon_PeerExchangeClientMockRecorder) Send(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_PeerExchangeClient)(nil).Send), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockDaemon_PeerExchangeClient) SendMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockDaemon_PeerExchangeClientMockRecorder) SendMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_PeerExchangeClient)(nil).SendMsg), m)
}
// Trailer mocks base method.
func (m *MockDaemon_PeerExchangeClient) Trailer() metadata.MD {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Trailer")
ret0, _ := ret[0].(metadata.MD)
return ret0
}
// Trailer indicates an expected call of Trailer.
func (mr *MockDaemon_PeerExchangeClientMockRecorder) Trailer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockDaemon_PeerExchangeClient)(nil).Trailer))
}
// MockDaemonServer is a mock of DaemonServer interface.
type MockDaemonServer struct {
ctrl *gomock.Controller
recorder *MockDaemonServerMockRecorder
isgomock struct{}
}
// MockDaemonServerMockRecorder is the mock recorder for MockDaemonServer.
@ -679,13 +492,13 @@ func (m *MockDaemonServer) CheckHealth(arg0 context.Context, arg1 *emptypb.Empty
}
// CheckHealth indicates an expected call of CheckHealth.
func (mr *MockDaemonServerMockRecorder) CheckHealth(arg0, arg1 any) *gomock.Call {
func (mr *MockDaemonServerMockRecorder) CheckHealth(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckHealth", reflect.TypeOf((*MockDaemonServer)(nil).CheckHealth), arg0, arg1)
}
// DeleteTask mocks base method.
func (m *MockDaemonServer) DeleteTask(arg0 context.Context, arg1 *dfdaemon.DeleteTaskRequest) (*emptypb.Empty, error) {
func (m *MockDaemonServer) DeleteTask(arg0 context.Context, arg1 *v10.DeleteTaskRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
@ -694,13 +507,13 @@ func (m *MockDaemonServer) DeleteTask(arg0 context.Context, arg1 *dfdaemon.Delet
}
// DeleteTask indicates an expected call of DeleteTask.
func (mr *MockDaemonServerMockRecorder) DeleteTask(arg0, arg1 any) *gomock.Call {
func (mr *MockDaemonServerMockRecorder) DeleteTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTask", reflect.TypeOf((*MockDaemonServer)(nil).DeleteTask), arg0, arg1)
}
// Download mocks base method.
func (m *MockDaemonServer) Download(arg0 *dfdaemon.DownRequest, arg1 dfdaemon.Daemon_DownloadServer) error {
func (m *MockDaemonServer) Download(arg0 *v10.DownRequest, arg1 v10.Daemon_DownloadServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Download", arg0, arg1)
ret0, _ := ret[0].(error)
@ -708,13 +521,13 @@ func (m *MockDaemonServer) Download(arg0 *dfdaemon.DownRequest, arg1 dfdaemon.Da
}
// Download indicates an expected call of Download.
func (mr *MockDaemonServerMockRecorder) Download(arg0, arg1 any) *gomock.Call {
func (mr *MockDaemonServerMockRecorder) Download(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockDaemonServer)(nil).Download), arg0, arg1)
}
// ExportTask mocks base method.
func (m *MockDaemonServer) ExportTask(arg0 context.Context, arg1 *dfdaemon.ExportTaskRequest) (*emptypb.Empty, error) {
func (m *MockDaemonServer) ExportTask(arg0 context.Context, arg1 *v10.ExportTaskRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExportTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
@ -723,28 +536,28 @@ func (m *MockDaemonServer) ExportTask(arg0 context.Context, arg1 *dfdaemon.Expor
}
// ExportTask indicates an expected call of ExportTask.
func (mr *MockDaemonServerMockRecorder) ExportTask(arg0, arg1 any) *gomock.Call {
func (mr *MockDaemonServerMockRecorder) ExportTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportTask", reflect.TypeOf((*MockDaemonServer)(nil).ExportTask), arg0, arg1)
}
// GetPieceTasks mocks base method.
func (m *MockDaemonServer) GetPieceTasks(arg0 context.Context, arg1 *common.PieceTaskRequest) (*common.PiecePacket, error) {
func (m *MockDaemonServer) GetPieceTasks(arg0 context.Context, arg1 *v1.PieceTaskRequest) (*v1.PiecePacket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1)
ret0, _ := ret[0].(*common.PiecePacket)
ret0, _ := ret[0].(*v1.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieceTasks indicates an expected call of GetPieceTasks.
func (mr *MockDaemonServerMockRecorder) GetPieceTasks(arg0, arg1 any) *gomock.Call {
func (mr *MockDaemonServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockDaemonServer)(nil).GetPieceTasks), arg0, arg1)
}
// ImportTask mocks base method.
func (m *MockDaemonServer) ImportTask(arg0 context.Context, arg1 *dfdaemon.ImportTaskRequest) (*emptypb.Empty, error) {
func (m *MockDaemonServer) ImportTask(arg0 context.Context, arg1 *v10.ImportTaskRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImportTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
@ -753,42 +566,13 @@ func (m *MockDaemonServer) ImportTask(arg0 context.Context, arg1 *dfdaemon.Impor
}
// ImportTask indicates an expected call of ImportTask.
func (mr *MockDaemonServerMockRecorder) ImportTask(arg0, arg1 any) *gomock.Call {
func (mr *MockDaemonServerMockRecorder) ImportTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportTask", reflect.TypeOf((*MockDaemonServer)(nil).ImportTask), arg0, arg1)
}
// LeaveHost mocks base method.
func (m *MockDaemonServer) LeaveHost(arg0 context.Context, arg1 *emptypb.Empty) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LeaveHost", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LeaveHost indicates an expected call of LeaveHost.
func (mr *MockDaemonServerMockRecorder) LeaveHost(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveHost", reflect.TypeOf((*MockDaemonServer)(nil).LeaveHost), arg0, arg1)
}
// PeerExchange mocks base method.
func (m *MockDaemonServer) PeerExchange(arg0 dfdaemon.Daemon_PeerExchangeServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PeerExchange", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// PeerExchange indicates an expected call of PeerExchange.
func (mr *MockDaemonServerMockRecorder) PeerExchange(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerExchange", reflect.TypeOf((*MockDaemonServer)(nil).PeerExchange), arg0)
}
// StatTask mocks base method.
func (m *MockDaemonServer) StatTask(arg0 context.Context, arg1 *dfdaemon.StatTaskRequest) (*emptypb.Empty, error) {
func (m *MockDaemonServer) StatTask(arg0 context.Context, arg1 *v10.StatTaskRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StatTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
@ -797,13 +581,13 @@ func (m *MockDaemonServer) StatTask(arg0 context.Context, arg1 *dfdaemon.StatTas
}
// StatTask indicates an expected call of StatTask.
func (mr *MockDaemonServerMockRecorder) StatTask(arg0, arg1 any) *gomock.Call {
func (mr *MockDaemonServerMockRecorder) StatTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockDaemonServer)(nil).StatTask), arg0, arg1)
}
// SyncPieceTasks mocks base method.
func (m *MockDaemonServer) SyncPieceTasks(arg0 dfdaemon.Daemon_SyncPieceTasksServer) error {
func (m *MockDaemonServer) SyncPieceTasks(arg0 v10.Daemon_SyncPieceTasksServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SyncPieceTasks", arg0)
ret0, _ := ret[0].(error)
@ -811,52 +595,15 @@ func (m *MockDaemonServer) SyncPieceTasks(arg0 dfdaemon.Daemon_SyncPieceTasksSer
}
// SyncPieceTasks indicates an expected call of SyncPieceTasks.
func (mr *MockDaemonServerMockRecorder) SyncPieceTasks(arg0 any) *gomock.Call {
func (mr *MockDaemonServerMockRecorder) SyncPieceTasks(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockDaemonServer)(nil).SyncPieceTasks), arg0)
}
// MockUnsafeDaemonServer is a mock of UnsafeDaemonServer interface.
type MockUnsafeDaemonServer struct {
ctrl *gomock.Controller
recorder *MockUnsafeDaemonServerMockRecorder
isgomock struct{}
}
// MockUnsafeDaemonServerMockRecorder is the mock recorder for MockUnsafeDaemonServer.
type MockUnsafeDaemonServerMockRecorder struct {
mock *MockUnsafeDaemonServer
}
// NewMockUnsafeDaemonServer creates a new mock instance.
func NewMockUnsafeDaemonServer(ctrl *gomock.Controller) *MockUnsafeDaemonServer {
mock := &MockUnsafeDaemonServer{ctrl: ctrl}
mock.recorder = &MockUnsafeDaemonServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockUnsafeDaemonServer) EXPECT() *MockUnsafeDaemonServerMockRecorder {
return m.recorder
}
// mustEmbedUnimplementedDaemonServer mocks base method.
func (m *MockUnsafeDaemonServer) mustEmbedUnimplementedDaemonServer() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "mustEmbedUnimplementedDaemonServer")
}
// mustEmbedUnimplementedDaemonServer indicates an expected call of mustEmbedUnimplementedDaemonServer.
func (mr *MockUnsafeDaemonServerMockRecorder) mustEmbedUnimplementedDaemonServer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedDaemonServer", reflect.TypeOf((*MockUnsafeDaemonServer)(nil).mustEmbedUnimplementedDaemonServer))
}
// MockDaemon_DownloadServer is a mock of Daemon_DownloadServer interface.
type MockDaemon_DownloadServer struct {
ctrl *gomock.Controller
recorder *MockDaemon_DownloadServerMockRecorder
isgomock struct{}
}
// MockDaemon_DownloadServerMockRecorder is the mock recorder for MockDaemon_DownloadServer.
@ -891,7 +638,7 @@ func (mr *MockDaemon_DownloadServerMockRecorder) Context() *gomock.Call {
}
// RecvMsg mocks base method.
func (m_2 *MockDaemon_DownloadServer) RecvMsg(m any) error {
func (m_2 *MockDaemon_DownloadServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -899,13 +646,13 @@ func (m_2 *MockDaemon_DownloadServer) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockDaemon_DownloadServerMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockDaemon_DownloadServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockDaemon_DownloadServer) Send(arg0 *dfdaemon.DownResult) error {
func (m *MockDaemon_DownloadServer) Send(arg0 *v10.DownResult) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
@ -913,7 +660,7 @@ func (m *MockDaemon_DownloadServer) Send(arg0 *dfdaemon.DownResult) error {
}
// Send indicates an expected call of Send.
func (mr *MockDaemon_DownloadServerMockRecorder) Send(arg0 any) *gomock.Call {
func (mr *MockDaemon_DownloadServerMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).Send), arg0)
}
@ -927,13 +674,13 @@ func (m *MockDaemon_DownloadServer) SendHeader(arg0 metadata.MD) error {
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockDaemon_DownloadServerMockRecorder) SendHeader(arg0 any) *gomock.Call {
func (mr *MockDaemon_DownloadServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockDaemon_DownloadServer) SendMsg(m any) error {
func (m_2 *MockDaemon_DownloadServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -941,7 +688,7 @@ func (m_2 *MockDaemon_DownloadServer) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockDaemon_DownloadServerMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockDaemon_DownloadServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SendMsg), m)
}
@ -955,7 +702,7 @@ func (m *MockDaemon_DownloadServer) SetHeader(arg0 metadata.MD) error {
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockDaemon_DownloadServerMockRecorder) SetHeader(arg0 any) *gomock.Call {
func (mr *MockDaemon_DownloadServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SetHeader), arg0)
}
@ -967,7 +714,7 @@ func (m *MockDaemon_DownloadServer) SetTrailer(arg0 metadata.MD) {
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockDaemon_DownloadServerMockRecorder) SetTrailer(arg0 any) *gomock.Call {
func (mr *MockDaemon_DownloadServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SetTrailer), arg0)
}
@ -976,7 +723,6 @@ func (mr *MockDaemon_DownloadServerMockRecorder) SetTrailer(arg0 any) *gomock.Ca
type MockDaemon_SyncPieceTasksServer struct {
ctrl *gomock.Controller
recorder *MockDaemon_SyncPieceTasksServerMockRecorder
isgomock struct{}
}
// MockDaemon_SyncPieceTasksServerMockRecorder is the mock recorder for MockDaemon_SyncPieceTasksServer.
@ -1011,10 +757,10 @@ func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) Context() *gomock.Call {
}
// Recv mocks base method.
func (m *MockDaemon_SyncPieceTasksServer) Recv() (*common.PieceTaskRequest, error) {
func (m *MockDaemon_SyncPieceTasksServer) Recv() (*v1.PieceTaskRequest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*common.PieceTaskRequest)
ret0, _ := ret[0].(*v1.PieceTaskRequest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -1026,7 +772,7 @@ func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) Recv() *gomock.Call {
}
// RecvMsg mocks base method.
func (m_2 *MockDaemon_SyncPieceTasksServer) RecvMsg(m any) error {
func (m_2 *MockDaemon_SyncPieceTasksServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -1034,13 +780,13 @@ func (m_2 *MockDaemon_SyncPieceTasksServer) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockDaemon_SyncPieceTasksServer) Send(arg0 *common.PiecePacket) error {
func (m *MockDaemon_SyncPieceTasksServer) Send(arg0 *v1.PiecePacket) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
@ -1048,7 +794,7 @@ func (m *MockDaemon_SyncPieceTasksServer) Send(arg0 *common.PiecePacket) error {
}
// Send indicates an expected call of Send.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) Send(arg0 any) *gomock.Call {
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).Send), arg0)
}
@ -1062,13 +808,13 @@ func (m *MockDaemon_SyncPieceTasksServer) SendHeader(arg0 metadata.MD) error {
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SendHeader(arg0 any) *gomock.Call {
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockDaemon_SyncPieceTasksServer) SendMsg(m any) error {
func (m_2 *MockDaemon_SyncPieceTasksServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -1076,7 +822,7 @@ func (m_2 *MockDaemon_SyncPieceTasksServer) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SendMsg), m)
}
@ -1090,7 +836,7 @@ func (m *MockDaemon_SyncPieceTasksServer) SetHeader(arg0 metadata.MD) error {
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SetHeader(arg0 any) *gomock.Call {
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SetHeader), arg0)
}
@ -1102,142 +848,7 @@ func (m *MockDaemon_SyncPieceTasksServer) SetTrailer(arg0 metadata.MD) {
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SetTrailer(arg0 any) *gomock.Call {
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SetTrailer), arg0)
}
// MockDaemon_PeerExchangeServer is a mock of Daemon_PeerExchangeServer interface.
type MockDaemon_PeerExchangeServer struct {
ctrl *gomock.Controller
recorder *MockDaemon_PeerExchangeServerMockRecorder
isgomock struct{}
}
// MockDaemon_PeerExchangeServerMockRecorder is the mock recorder for MockDaemon_PeerExchangeServer.
type MockDaemon_PeerExchangeServerMockRecorder struct {
mock *MockDaemon_PeerExchangeServer
}
// NewMockDaemon_PeerExchangeServer creates a new mock instance.
func NewMockDaemon_PeerExchangeServer(ctrl *gomock.Controller) *MockDaemon_PeerExchangeServer {
mock := &MockDaemon_PeerExchangeServer{ctrl: ctrl}
mock.recorder = &MockDaemon_PeerExchangeServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDaemon_PeerExchangeServer) EXPECT() *MockDaemon_PeerExchangeServerMockRecorder {
return m.recorder
}
// Context mocks base method.
func (m *MockDaemon_PeerExchangeServer) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockDaemon_PeerExchangeServerMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_PeerExchangeServer)(nil).Context))
}
// Recv mocks base method.
func (m *MockDaemon_PeerExchangeServer) Recv() (*dfdaemon.PeerExchangeData, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*dfdaemon.PeerExchangeData)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockDaemon_PeerExchangeServerMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockDaemon_PeerExchangeServer)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockDaemon_PeerExchangeServer) RecvMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockDaemon_PeerExchangeServerMockRecorder) RecvMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_PeerExchangeServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockDaemon_PeerExchangeServer) Send(arg0 *dfdaemon.PeerExchangeData) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockDaemon_PeerExchangeServerMockRecorder) Send(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_PeerExchangeServer)(nil).Send), arg0)
}
// SendHeader mocks base method.
func (m *MockDaemon_PeerExchangeServer) SendHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockDaemon_PeerExchangeServerMockRecorder) SendHeader(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockDaemon_PeerExchangeServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockDaemon_PeerExchangeServer) SendMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockDaemon_PeerExchangeServerMockRecorder) SendMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_PeerExchangeServer)(nil).SendMsg), m)
}
// SetHeader mocks base method.
func (m *MockDaemon_PeerExchangeServer) SetHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockDaemon_PeerExchangeServerMockRecorder) SetHeader(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockDaemon_PeerExchangeServer)(nil).SetHeader), arg0)
}
// SetTrailer mocks base method.
func (m *MockDaemon_PeerExchangeServer) SetTrailer(arg0 metadata.MD) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetTrailer", arg0)
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockDaemon_PeerExchangeServerMockRecorder) SetTrailer(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockDaemon_PeerExchangeServer)(nil).SetTrailer), arg0)
}

View File

@ -16,4 +16,4 @@
package mocks
//go:generate mockgen -destination dfdaemon_mock.go -source ../dfdaemon_grpc.pb.go -package mocks
//go:generate mockgen -destination dfdaemon_mock.go -source ../dfdaemon.pb.go -package mocks

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,416 +0,0 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package dfdaemon.v2;
import "pkg/apis/common/v2/common.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/empty.proto";
import "validate/validate.proto";
option go_package = "d7y.io/api/v2/pkg/apis/dfdaemon/v2;dfdaemon";
// DownloadTaskRequest represents request of DownloadTask.
message DownloadTaskRequest {
// Download information.
common.v2.Download download = 1 [(validate.rules).message.required = true];
}
// DownloadTaskStartedResponse represents task download started response of DownloadTaskResponse.
message DownloadTaskStartedResponse {
// Task content length.
uint64 content_length = 1;
// Range is url range of request. If protocol is http, range
// is parsed from http header. If other protocol, range comes
// from download range field.
optional common.v2.Range range = 2;
// Task response headers.
map<string, string> response_header = 3;
// Need to download pieces.
repeated common.v2.Piece pieces = 4;
}
// DownloadPieceFinishedResponse represents piece download finished response of DownloadTaskResponse.
message DownloadPieceFinishedResponse {
// Finished piece of task.
common.v2.Piece piece = 1 [(validate.rules).message.required = true];
}
// DownloadTaskResponse represents response of DownloadTask.
message DownloadTaskResponse {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
oneof response {
option (validate.required) = true;
DownloadTaskStartedResponse download_task_started_response = 4;
DownloadPieceFinishedResponse download_piece_finished_response = 5;
}
}
// SyncPiecesRequest represents request of SyncPieces.
message SyncPiecesRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Interested piece numbers.
repeated uint32 interested_piece_numbers = 3 [(validate.rules).repeated = {min_items: 1}];
}
// SyncPiecesResponse represents response of SyncPieces.
message SyncPiecesResponse {
// Exist piece number.
uint32 number = 1;
// Piece offset.
uint64 offset = 2;
// Piece length.
uint64 length = 3;
}
// DownloadPieceRequest represents request of DownloadPiece.
message DownloadPieceRequest{
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Piece number.
uint32 piece_number = 3;
}
// DownloadPieceResponse represents response of DownloadPieces.
message DownloadPieceResponse {
// Piece information.
common.v2.Piece piece = 1 [(validate.rules).message.required = true];
// Piece metadata digest, it is used to verify the integrity of the piece metadata.
optional string digest = 2 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
}
// StatTaskRequest represents request of StatTask.
message StatTaskRequest {
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Remote IP represents the IP address of the client initiating the stat request.
optional string remote_ip = 2 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// ListTaskEntriesRequest represents request of ListTaskEntries.
message ListTaskEntriesRequest {
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// URL to be listed the entries.
string url = 2;
// HTTP header to be sent with the request.
map<string, string> request_header = 3;
// List timeout.
optional google.protobuf.Duration timeout = 4;
// certificate_chain is the client certs with DER format for the backend client to list the entries.
repeated bytes certificate_chain = 5;
// Object storage protocol information.
optional common.v2.ObjectStorage object_storage = 6;
// HDFS protocol information.
optional common.v2.HDFS hdfs = 7;
// Remote IP represents the IP address of the client initiating the list request.
optional string remote_ip = 8 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// ListTaskEntriesResponse represents response of ListTaskEntries.
message ListTaskEntriesResponse {
// Content length is the content length of the response
uint64 content_length = 1;
// HTTP header to be sent with the request.
map<string, string> response_header = 2;
// Backend HTTP status code.
optional int32 status_code = 3 [(validate.rules).int32 = {gte: 100, lt: 599, ignore_empty: true}];
/// Entries is the information of the entries in the directory.
repeated Entry entries = 4;
}
// Entry represents an entry in a directory.
message Entry {
// URL of the entry.
string url = 1;
// Size of the entry.
uint64 content_length = 2;
// Is directory or not.
bool is_dir = 3;
}
// DeleteTaskRequest represents request of DeleteTask.
message DeleteTaskRequest {
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Remote IP represents the IP address of the client initiating the delete request.
optional string remote_ip = 2 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// DownloadPersistentCacheTaskRequest represents request of DownloadPersistentCacheTask.
message DownloadPersistentCacheTaskRequest {
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Persistent represents whether the persistent cache task is persistent.
// If the persistent cache task is persistent, the persistent cache peer will
// not be deleted when dfdaemon runs garbage collection.
bool persistent = 2;
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 3;
// Application of task.
optional string application = 4;
// File path to be exported. If output_path is set, the exported file will be saved to the specified path.
// Dfdaemon will try to create hard link to the output path before starting the export. If hard link creation fails,
// it will copy the file to the output path after the export is completed.
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
optional string output_path = 5 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Download timeout.
optional google.protobuf.Duration timeout = 6;
// need_piece_content is the flag to indicate whether the response needs to return piece content.
bool need_piece_content = 7;
// force_hard_link is the flag to indicate whether the exported file must be hard linked to the output path.
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
bool force_hard_link = 8;
// Verifies task data integrity after download using a digest. Supports CRC32, SHA256, and SHA512 algorithms.
// Format: `<algorithm>:<hash>`, e.g., `crc32:xxx`, `sha256:yyy`, `sha512:zzz`.
// Returns an error if the computed digest mismatches the expected value.
//
// Performance
// Digest calculation increases processing time. Enable only when data integrity verification is critical.
optional string digest = 9;
// Remote IP represents the IP address of the client initiating the download request.
optional string remote_ip = 10 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// DownloadPersistentCacheTaskStartedResponse represents task download started response of DownloadPersistentCacheTaskResponse.
message DownloadPersistentCacheTaskStartedResponse {
// Task content length.
uint64 content_length = 1;
}
// DownloadPersistentCacheTaskResponse represents response of DownloadPersistentCacheTask.
message DownloadPersistentCacheTaskResponse {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
oneof response {
option (validate.required) = true;
DownloadPersistentCacheTaskStartedResponse download_persistent_cache_task_started_response = 4;
DownloadPieceFinishedResponse download_piece_finished_response = 5;
}
}
// UploadPersistentCacheTaskRequest represents request of UploadPersistentCacheTask.
message UploadPersistentCacheTaskRequest {
// content_for_calculating_task_id is the content used to calculate the task id.
// If content_for_calculating_task_id is set, use its value to calculate the task ID.
// Otherwise, calculate the task ID based on the file content, tag and application by crc32 algorithm`.
optional string content_for_calculating_task_id = 1;
// Upload file path of persistent cache task.
string path = 2 [(validate.rules).string = {min_len: 1}];
// Replica count of the persistent persistent cache task.
uint64 persistent_replica_count = 3 [(validate.rules).uint64 = {gte: 1, lte: 5}];
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 4;
// Application of the persistent cache task.
optional string application = 5;
// Piece length of the persistent cache task, the value needs to be greater than or equal to 4194304(4MiB).
optional uint64 piece_length = 6 [(validate.rules).uint64 = {gte: 4194304, ignore_empty: true}];
// TTL of the persistent cache task.
google.protobuf.Duration ttl = 7 [(validate.rules).duration = {gte:{seconds: 60}, lte:{seconds: 604800}}];
// Download timeout.
optional google.protobuf.Duration timeout = 8;
// Remote IP represents the IP address of the client initiating the upload request.
optional string remote_ip = 9 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// UpdatePersistentCacheTaskRequest represents request of UpdatePersistentCacheTask.
message UpdatePersistentCacheTaskRequest {
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Persistent represents whether the persistent cache peer is persistent.
// If the persistent cache peer is persistent, the persistent cache peer will
// not be deleted when dfdaemon runs garbage collection. It only be deleted
// when the task is deleted by the user.
bool persistent = 2;
// Remote IP represents the IP address of the client initiating the update request.
optional string remote_ip = 3 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// StatPersistentCacheTaskRequest represents request of StatPersistentCacheTask.
message StatPersistentCacheTaskRequest {
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Remote IP represents the IP address of the client initiating the stat request.
optional string remote_ip = 2 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// DeletePersistentCacheTaskRequest represents request of DeletePersistentCacheTask.
message DeletePersistentCacheTaskRequest {
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Remote IP represents the IP address of the client initiating the delete request.
optional string remote_ip = 2 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// SyncPersistentCachePiecesRequest represents request of SyncPersistentCachePieces.
message SyncPersistentCachePiecesRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Interested piece numbers.
repeated uint32 interested_piece_numbers = 3 [(validate.rules).repeated = {min_items: 1}];
}
// SyncPersistentCachePiecesResponse represents response of SyncPersistentCachePieces.
message SyncPersistentCachePiecesResponse {
// Exist piece number.
uint32 number = 1;
// Piece offset.
uint64 offset = 2;
// Piece length.
uint64 length = 3;
}
// DownloadPersistentCachePieceRequest represents request of DownloadPersistentCachePiece.
message DownloadPersistentCachePieceRequest{
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Piece number.
uint32 piece_number = 3;
}
// DownloadPersistentCachePieceResponse represents response of DownloadPersistentCachePieces.
message DownloadPersistentCachePieceResponse {
// Piece information.
common.v2.Piece piece = 1 [(validate.rules).message.required = true];
// Piece metadata digest, it is used to verify the integrity of the piece metadata.
optional string digest = 2 [(validate.rules).string = {pattern: "^(md5:[a-fA-F0-9]{32}|sha1:[a-fA-F0-9]{40}|sha256:[a-fA-F0-9]{64}|sha512:[a-fA-F0-9]{128}|blake3:[a-fA-F0-9]{64}|crc32:[a-fA-F0-9]+)$", ignore_empty: true}];
}
// SyncHostRequest represents request of SyncHost.
message SyncHostRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 2 [(validate.rules).string.min_len = 1];
}
// IBVerbsQueuePairEndpoint represents queue pair endpoint of IBVerbs.
message IBVerbsQueuePairEndpoint {
// Number of the queue pair.
uint32 num = 1;
// Local identifier of the context.
uint32 lid = 2;
// Global identifier of the context.
bytes gid = 3 [(validate.rules).bytes.len = 16];
}
// ExchangeIBVerbsQueuePairEndpointRequest represents request of ExchangeIBVerbsQueuePairEndpoint.
message ExchangeIBVerbsQueuePairEndpointRequest {
// Information of the source's queue pair endpoint of IBVerbs.
IBVerbsQueuePairEndpoint endpoint = 1 [(validate.rules).message.required = true];
}
// ExchangeIBVerbsQueuePairEndpointResponse represents response of ExchangeIBVerbsQueuePairEndpoint.
message ExchangeIBVerbsQueuePairEndpointResponse {
// Information of the destination's queue pair endpoint of IBVerbs.
IBVerbsQueuePairEndpoint endpoint = 1 [(validate.rules).message.required = true];
}
// DfdaemonUpload represents dfdaemon upload service.
service DfdaemonUpload {
// DownloadTask downloads task from p2p network.
rpc DownloadTask(DownloadTaskRequest) returns(stream DownloadTaskResponse);
// StatTask stats task information.
rpc StatTask(StatTaskRequest) returns(common.v2.Task);
// DeleteTask deletes task from p2p network.
rpc DeleteTask(DeleteTaskRequest) returns(google.protobuf.Empty);
// SyncPieces syncs piece metadatas from remote peer.
rpc SyncPieces(SyncPiecesRequest) returns(stream SyncPiecesResponse);
// DownloadPiece downloads piece from the remote peer.
rpc DownloadPiece(DownloadPieceRequest)returns(DownloadPieceResponse);
// DownloadPersistentCacheTask downloads persistent cache task from p2p network.
rpc DownloadPersistentCacheTask(DownloadPersistentCacheTaskRequest) returns(stream DownloadPersistentCacheTaskResponse);
// UpdatePersistentCacheTask updates metadate of thr persistent cache task in p2p network.
rpc UpdatePersistentCacheTask(UpdatePersistentCacheTaskRequest) returns(google.protobuf.Empty);
// StatPersistentCacheTask stats persistent cache task information.
rpc StatPersistentCacheTask(StatPersistentCacheTaskRequest) returns(common.v2.PersistentCacheTask);
// DeletePersistentCacheTask deletes persistent cache task from p2p network.
rpc DeletePersistentCacheTask(DeletePersistentCacheTaskRequest) returns(google.protobuf.Empty);
// SyncPersistentCachePieces syncs persistent cache pieces from remote peer.
rpc SyncPersistentCachePieces(SyncPersistentCachePiecesRequest) returns(stream SyncPersistentCachePiecesResponse);
// DownloadPersistentCachePiece downloads persistent cache piece from the remote peer.
rpc DownloadPersistentCachePiece(DownloadPersistentCachePieceRequest)returns(DownloadPersistentCachePieceResponse);
// SyncHost sync host info from parents.
rpc SyncHost(SyncHostRequest) returns (stream common.v2.Host);
// ExchangeIBVerbsQueuePairEndpoint exchanges queue pair endpoint of IBVerbs with remote peer.
rpc ExchangeIBVerbsQueuePairEndpoint(ExchangeIBVerbsQueuePairEndpointRequest) returns(ExchangeIBVerbsQueuePairEndpointResponse);
}
// DfdaemonDownload represents dfdaemon download service.
service DfdaemonDownload {
// DownloadTask downloads task from p2p network.
rpc DownloadTask(DownloadTaskRequest) returns(stream DownloadTaskResponse);
// StatTask stats task information.
rpc StatTask(StatTaskRequest) returns(common.v2.Task);
// ListTaskEntries lists task entries for downloading directory.
rpc ListTaskEntries(ListTaskEntriesRequest) returns(ListTaskEntriesResponse);
// DeleteTask deletes task from p2p network.
rpc DeleteTask(DeleteTaskRequest) returns(google.protobuf.Empty);
// DeleteHost releases host in scheduler.
rpc DeleteHost(google.protobuf.Empty)returns(google.protobuf.Empty);
// DownloadPersistentCacheTask downloads persistent cache task from p2p network.
rpc DownloadPersistentCacheTask(DownloadPersistentCacheTaskRequest) returns(stream DownloadPersistentCacheTaskResponse);
// UploadPersistentCacheTask uploads persistent cache task to p2p network.
rpc UploadPersistentCacheTask(UploadPersistentCacheTaskRequest) returns(common.v2.PersistentCacheTask);
// StatPersistentCacheTask stats persistent cache task information.
rpc StatPersistentCacheTask(StatPersistentCacheTaskRequest) returns(common.v2.PersistentCacheTask);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,19 +0,0 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mocks
//go:generate mockgen -destination dfdaemon_mock.go -source ../dfdaemon_grpc.pb.go -package mocks

View File

@ -15,14 +15,14 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.21.6
// protoc-gen-go v1.28.0
// protoc v3.19.4
// source: pkg/apis/errordetails/v1/errordetails.proto
package errordetails
package v1
import (
v1 "d7y.io/api/v2/pkg/apis/common/v1"
v1 "d7y.io/api/pkg/apis/common/v1"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@ -106,11 +106,10 @@ var file_pkg_apis_errordetails_v1_errordetails_proto_rawDesc = []byte{
0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74,
0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f,
0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69,
0x62, 0x75, 0x74, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x35,
0x5a, 0x33, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x32, 0x2f,
0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65,
0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65,
0x74, 0x61, 0x69, 0x6c, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x62, 0x75, 0x74, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x25,
0x5a, 0x23, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x6b, 0x67,
0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69,
0x6c, 0x73, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@ -1,7 +1,7 @@
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: pkg/apis/errordetails/v1/errordetails.proto
package errordetails
package v1
import (
"bytes"

View File

@ -20,10 +20,11 @@ package errordetails;
import "pkg/apis/common/v1/common.proto";
option go_package = "d7y.io/api/v2/pkg/apis/errordetails/v1;errordetails";
option go_package = "d7y.io/api/pkg/apis/errordetails/v1";
message SourceError {
bool temporary = 1;
// source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header
common.ExtendAttribute metadata = 2;
}

View File

@ -0,0 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: ../errordetails.pb.go
// Package mocks is a generated GoMock package.
package mocks

View File

@ -16,4 +16,4 @@
package mocks
//go:generate mockgen -destination manager_mock.go -source ../manager_grpc.pb.go -package mocks
//go:generate mockgen -destination errordetails_mock.go -source ../errordetails.pb.go -package mocks

View File

@ -1,265 +0,0 @@
//
// Copyright 2024 The Dragonfly Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc v3.21.6
// source: pkg/apis/errordetails/v2/errordetails.proto
package errordetails
import (
_ "github.com/envoyproxy/protoc-gen-validate/validate"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Backend is error detail for Backend.
type Backend struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Backend error message.
Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
// Backend HTTP response header.
Header map[string]string `protobuf:"bytes,2,rep,name=header,proto3" json:"header,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Backend HTTP status code.
StatusCode *int32 `protobuf:"varint,3,opt,name=status_code,json=statusCode,proto3,oneof" json:"status_code,omitempty"`
}
func (x *Backend) Reset() {
*x = Backend{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_errordetails_v2_errordetails_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Backend) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Backend) ProtoMessage() {}
func (x *Backend) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_errordetails_v2_errordetails_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Backend.ProtoReflect.Descriptor instead.
func (*Backend) Descriptor() ([]byte, []int) {
return file_pkg_apis_errordetails_v2_errordetails_proto_rawDescGZIP(), []int{0}
}
func (x *Backend) GetMessage() string {
if x != nil {
return x.Message
}
return ""
}
func (x *Backend) GetHeader() map[string]string {
if x != nil {
return x.Header
}
return nil
}
func (x *Backend) GetStatusCode() int32 {
if x != nil && x.StatusCode != nil {
return *x.StatusCode
}
return 0
}
// Unknown is error detail for Unknown.
type Unknown struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Unknown error message.
Message *string `protobuf:"bytes,1,opt,name=message,proto3,oneof" json:"message,omitempty"`
}
func (x *Unknown) Reset() {
*x = Unknown{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_errordetails_v2_errordetails_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Unknown) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Unknown) ProtoMessage() {}
func (x *Unknown) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_errordetails_v2_errordetails_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Unknown.ProtoReflect.Descriptor instead.
func (*Unknown) Descriptor() ([]byte, []int) {
return file_pkg_apis_errordetails_v2_errordetails_proto_rawDescGZIP(), []int{1}
}
func (x *Unknown) GetMessage() string {
if x != nil && x.Message != nil {
return *x.Message
}
return ""
}
var File_pkg_apis_errordetails_v2_errordetails_proto protoreflect.FileDescriptor
var file_pkg_apis_errordetails_v2_errordetails_proto_rawDesc = []byte{
0x0a, 0x2b, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72,
0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x76, 0x32, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72,
0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x65,
0x72, 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x76, 0x32, 0x1a, 0x17,
0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x01, 0x0a, 0x07, 0x42, 0x61, 0x63, 0x6b,
0x65, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3c, 0x0a,
0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x76, 0x32, 0x2e,
0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x0b, 0x73,
0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xd7, 0x04, 0x28, 0x64, 0x40, 0x01, 0x48, 0x00,
0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x88, 0x01, 0x01, 0x1a,
0x39, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x73,
0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x34, 0x0a, 0x07, 0x55, 0x6e,
0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, 0x1d, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x42, 0x35, 0x5a, 0x33, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72,
0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x76, 0x32, 0x3b, 0x65, 0x72, 0x72, 0x6f, 0x72,
0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_pkg_apis_errordetails_v2_errordetails_proto_rawDescOnce sync.Once
file_pkg_apis_errordetails_v2_errordetails_proto_rawDescData = file_pkg_apis_errordetails_v2_errordetails_proto_rawDesc
)
func file_pkg_apis_errordetails_v2_errordetails_proto_rawDescGZIP() []byte {
file_pkg_apis_errordetails_v2_errordetails_proto_rawDescOnce.Do(func() {
file_pkg_apis_errordetails_v2_errordetails_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_apis_errordetails_v2_errordetails_proto_rawDescData)
})
return file_pkg_apis_errordetails_v2_errordetails_proto_rawDescData
}
var file_pkg_apis_errordetails_v2_errordetails_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_pkg_apis_errordetails_v2_errordetails_proto_goTypes = []interface{}{
(*Backend)(nil), // 0: errordetails.v2.Backend
(*Unknown)(nil), // 1: errordetails.v2.Unknown
nil, // 2: errordetails.v2.Backend.HeaderEntry
}
var file_pkg_apis_errordetails_v2_errordetails_proto_depIdxs = []int32{
2, // 0: errordetails.v2.Backend.header:type_name -> errordetails.v2.Backend.HeaderEntry
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_pkg_apis_errordetails_v2_errordetails_proto_init() }
func file_pkg_apis_errordetails_v2_errordetails_proto_init() {
if File_pkg_apis_errordetails_v2_errordetails_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_pkg_apis_errordetails_v2_errordetails_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Backend); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_errordetails_v2_errordetails_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Unknown); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_pkg_apis_errordetails_v2_errordetails_proto_msgTypes[0].OneofWrappers = []interface{}{}
file_pkg_apis_errordetails_v2_errordetails_proto_msgTypes[1].OneofWrappers = []interface{}{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkg_apis_errordetails_v2_errordetails_proto_rawDesc,
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_pkg_apis_errordetails_v2_errordetails_proto_goTypes,
DependencyIndexes: file_pkg_apis_errordetails_v2_errordetails_proto_depIdxs,
MessageInfos: file_pkg_apis_errordetails_v2_errordetails_proto_msgTypes,
}.Build()
File_pkg_apis_errordetails_v2_errordetails_proto = out.File
file_pkg_apis_errordetails_v2_errordetails_proto_rawDesc = nil
file_pkg_apis_errordetails_v2_errordetails_proto_goTypes = nil
file_pkg_apis_errordetails_v2_errordetails_proto_depIdxs = nil
}

View File

@ -1,259 +0,0 @@
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: pkg/apis/errordetails/v2/errordetails.proto
package errordetails
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = sort.Sort
)
// Validate checks the field values on Backend with the rules defined in the
// proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *Backend) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on Backend with the rules defined in the
// proto definition for this message. If any rules are violated, the result is
// a list of violation errors wrapped in BackendMultiError, or nil if none found.
func (m *Backend) ValidateAll() error {
return m.validate(true)
}
func (m *Backend) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
// no validation rules for Message
// no validation rules for Header
if m.StatusCode != nil {
if m.GetStatusCode() != 0 {
if val := m.GetStatusCode(); val < 100 || val >= 599 {
err := BackendValidationError{
field: "StatusCode",
reason: "value must be inside range [100, 599)",
}
if !all {
return err
}
errors = append(errors, err)
}
}
}
if len(errors) > 0 {
return BackendMultiError(errors)
}
return nil
}
// BackendMultiError is an error wrapping multiple validation errors returned
// by Backend.ValidateAll() if the designated constraints aren't met.
type BackendMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m BackendMultiError) Error() string {
var msgs []string
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m BackendMultiError) AllErrors() []error { return m }
// BackendValidationError is the validation error returned by Backend.Validate
// if the designated constraints aren't met.
type BackendValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e BackendValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e BackendValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e BackendValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e BackendValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e BackendValidationError) ErrorName() string { return "BackendValidationError" }
// Error satisfies the builtin error interface
func (e BackendValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sBackend.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = BackendValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = BackendValidationError{}
// Validate checks the field values on Unknown with the rules defined in the
// proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
func (m *Unknown) Validate() error {
return m.validate(false)
}
// ValidateAll checks the field values on Unknown with the rules defined in the
// proto definition for this message. If any rules are violated, the result is
// a list of violation errors wrapped in UnknownMultiError, or nil if none found.
func (m *Unknown) ValidateAll() error {
return m.validate(true)
}
func (m *Unknown) validate(all bool) error {
if m == nil {
return nil
}
var errors []error
if m.Message != nil {
// no validation rules for Message
}
if len(errors) > 0 {
return UnknownMultiError(errors)
}
return nil
}
// UnknownMultiError is an error wrapping multiple validation errors returned
// by Unknown.ValidateAll() if the designated constraints aren't met.
type UnknownMultiError []error
// Error returns a concatenation of all the error messages it wraps.
func (m UnknownMultiError) Error() string {
var msgs []string
for _, err := range m {
msgs = append(msgs, err.Error())
}
return strings.Join(msgs, "; ")
}
// AllErrors returns a list of validation violation errors.
func (m UnknownMultiError) AllErrors() []error { return m }
// UnknownValidationError is the validation error returned by Unknown.Validate
// if the designated constraints aren't met.
type UnknownValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e UnknownValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e UnknownValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e UnknownValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e UnknownValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e UnknownValidationError) ErrorName() string { return "UnknownValidationError" }
// Error satisfies the builtin error interface
func (e UnknownValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sUnknown.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = UnknownValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = UnknownValidationError{}

View File

@ -1,39 +0,0 @@
/*
* Copyright 2024 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package errordetails.v2;
import "validate/validate.proto";
option go_package = "d7y.io/api/v2/pkg/apis/errordetails/v2;errordetails";
// Backend is error detail for Backend.
message Backend {
// Backend error message.
string message = 1;
// Backend HTTP response header.
map<string, string> header = 2;
// Backend HTTP status code.
optional int32 status_code = 3 [(validate.rules).int32 = {gte: 100, lt: 599, ignore_empty: true}];
}
// Unknown is error detail for Unknown.
message Unknown {
// Unknown error message.
optional string message = 1;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -18,11 +18,10 @@ syntax = "proto3";
package manager;
import "pkg/apis/common/v1/common.proto";
import "google/protobuf/empty.proto";
import "validate/validate.proto";
option go_package = "d7y.io/api/v2/pkg/apis/manager/v1;manager";
option go_package = "d7y.io/api/pkg/apis/manager/v1";
// Request source type.
enum SourceType {
@ -34,6 +33,20 @@ enum SourceType {
SEED_PEER_SOURCE = 2;
}
// SecurityGroup represents security group of cluster.
message SecurityGroup {
// Group id.
uint64 id = 1;
// Group name.
string name = 2;
// Group biography.
string bio = 3;
// Group domain.
string domain = 4;
// Group proxy domain.
string proxy_domain = 5;
}
// SeedPeerCluster represents cluster of seed peer.
message SeedPeerCluster {
// Cluster id.
@ -44,6 +57,10 @@ message SeedPeerCluster {
string bio = 3;
// Cluster configuration.
bytes config = 4;
// Cluster scopes.
bytes scopes = 5;
// Security group to which the seed peer cluster belongs.
SecurityGroup security_group = 6;
}
// SeedPeer represents seed peer for network.
@ -51,11 +68,13 @@ message SeedPeer {
// Seed peer id.
uint64 id = 1;
// Seed peer hostname.
string hostname = 2;
string host_name = 2;
// Seed peer type.
string type = 3;
// Seed peer idc.
string idc = 5;
// Seed peer network topology.
string net_topology = 6;
// Seed peer location.
string location = 7;
// Seed peer ip.
@ -81,43 +100,25 @@ message GetSeedPeerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Seed peer hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
string host_name = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Seed peer ip.
string ip = 4 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// ListSeedPeersRequest represents request of ListSeedPeers.
message ListSeedPeersRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
// Dfdaemon version.
string version = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Dfdaemon commit.
string commit = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
}
// ListSeedPeersResponse represents response of ListSeedPeers.
message ListSeedPeersResponse {
// Seed peers to which the source service belongs.
repeated SeedPeer seed_peers = 1;
}
// UpdateSeedPeerRequest represents request of UpdateSeedPeer.
message UpdateSeedPeerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Seed peer hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
string host_name = 2 [(validate.rules).string.hostname = true];
// Seed peer type.
string type = 3 [(validate.rules).string = {in: ["super", "strong", "weak"]}];
// Seed peer idc.
string idc = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Seed peer network topology.
string net_topology = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Seed peer location.
string location = 7 [(validate.rules).string = {max_len: 1024, ignore_empty: true}];
// Seed peer ip.
@ -146,20 +147,24 @@ message SchedulerCluster {
bytes client_config = 5;
// Cluster scopes.
bytes scopes = 6;
// Security group to which the scheduler cluster belongs.
SecurityGroup security_group = 7;
}
// Scheduler represents scheduler for network.
// SeedPeerCluster represents scheduler for network.
message Scheduler {
// Scheduler id.
uint64 id = 1;
// Scheduler hostname.
string hostname = 2;
string host_name = 2;
// Deprecated: Do not use.
string vips = 3;
// Scheduler idc.
string idc = 4;
// Scheduler location.
string location = 5;
// Deprecated: Use net_topology instead.
bytes net_config = 6;
// Scheduler ip.
string ip = 7;
// Scheduler grpc port.
@ -172,8 +177,8 @@ message Scheduler {
SchedulerCluster scheduler_cluster = 11;
// Seed peers to which the scheduler belongs.
repeated SeedPeer seed_peers = 13;
// Feature flags of scheduler.
bytes features = 14;
// Scheduler network topology.
string net_topology = 14;
}
// GetSchedulerRequest represents request of GetScheduler.
@ -181,7 +186,7 @@ message GetSchedulerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Scheduler hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
string host_name = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Scheduler ip.
@ -193,7 +198,7 @@ message UpdateSchedulerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Scheduler hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
string host_name = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Deprecated: Do not use.
@ -202,12 +207,14 @@ message UpdateSchedulerRequest {
string idc = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Scheduler location.
string location = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Deprecated: Remove net_config params.
// Deprecated: Use net_topology instead.
bytes net_config = 7 [(validate.rules).bytes = {min_len: 1, ignore_empty: true}];
// Scheduler ip.
string ip = 8 [(validate.rules).string = {ip: true}];
// Scheduler port.
int32 port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Scheduler network topology.
string net_topology = 10 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
}
// ListSchedulersRequest represents request of ListSchedulers.
@ -215,15 +222,11 @@ message ListSchedulersRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
string host_name = 2 [(validate.rules).string.hostname = true];
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
// Source service host information.
map<string, string> host_info = 5 [(validate.rules).map.ignore_empty = true];
// Dfdaemon version.
string version = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Dfdaemon commit.
string commit = 7 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
}
// ListSchedulersResponse represents response of ListSchedulers.
@ -234,25 +237,16 @@ message ListSchedulersResponse {
// ObjectStorage represents config of object storage.
message ObjectStorage {
// name is object storage name of type, it can be s3, oss or obs.
// Object storage name of type.
string name = 1 [(validate.rules).string = {min_len: 1, max_len: 1024}];
// Region is storage region.
// Storage region.
string region = 2 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Endpoint is datacenter endpoint.
// Datacenter endpoint.
string endpoint = 3 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// AccessKey is access key ID.
// Access key id.
string access_key = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// SecretKey is access key secret.
// Access key secret.
string secret_key = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// S3ForcePathStyle sets force path style for s3, true by default.
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
// will use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
// Refer to https://github.com/aws/aws-sdk-go/blob/main/aws/config.go#L118.
bool s3_force_path_style = 6;
// Scheme is the scheme of the http client.
string scheme = 7 [(validate.rules).string = {in: ["http", "https"]}];
}
// GetObjectStorageRequest represents request of GetObjectStorage.
@ -260,7 +254,7 @@ message GetObjectStorageRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
string host_name = 2 [(validate.rules).string.hostname = true];
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
}
@ -276,7 +270,7 @@ message ListBucketsRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
string host_name = 2 [(validate.rules).string.hostname = true];
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
}
@ -287,58 +281,12 @@ message ListBucketsResponse {
repeated Bucket buckets = 1;
}
// URLPriority represents config of url priority.
message URLPriority {
// URL regex.
string regex = 1 [(validate.rules).string = {min_len: 1}];
// URL priority value.
common.Priority value = 2;
}
// ApplicationPriority represents config of application priority.
message ApplicationPriority {
// Priority value.
common.Priority value = 1;
// URL priority.
repeated URLPriority urls = 2;
}
// Application represents config of application.
message Application {
// Application id.
uint64 id = 1 [(validate.rules).uint64 = {gte: 1}];
// Application name.
string name = 2 [(validate.rules).string = {min_len: 1, max_len: 1024}];
// Application url.
string url = 3 [(validate.rules).string.uri = true];
// Application biography.
string bio = 4;
// Application priority.
ApplicationPriority priority = 5 [(validate.rules).message.required = true];
}
// ListApplicationsRequest represents request of ListApplications.
message ListApplicationsRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
}
// ListApplicationsResponse represents response of ListApplications.
message ListApplicationsResponse {
// Application configs.
repeated Application applications = 1;
}
// KeepAliveRequest represents request of KeepAlive.
message KeepAliveRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
string host_name = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the source service belongs.
uint64 cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Source service ip.
@ -350,9 +298,6 @@ service Manager {
// Get SeedPeer and SeedPeer cluster configuration.
rpc GetSeedPeer(GetSeedPeerRequest) returns(SeedPeer);
// List acitve schedulers configuration.
rpc ListSeedPeers(ListSeedPeersRequest)returns(ListSeedPeersResponse);
// Update SeedPeer configuration.
rpc UpdateSeedPeer(UpdateSeedPeerRequest) returns(SeedPeer);
@ -371,9 +316,6 @@ service Manager {
// List buckets configuration.
rpc ListBuckets(ListBucketsRequest)returns(ListBucketsResponse);
// List applications configuration.
rpc ListApplications(ListApplicationsRequest)returns(ListApplicationsResponse);
// KeepAlive with manager.
rpc KeepAlive(stream KeepAliveRequest)returns(google.protobuf.Empty);
}

View File

@ -1,483 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.21.6
// source: pkg/apis/manager/v1/manager.proto
package manager
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// ManagerClient is the client API for Manager service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ManagerClient interface {
// Get SeedPeer and SeedPeer cluster configuration.
GetSeedPeer(ctx context.Context, in *GetSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error)
// List acitve schedulers configuration.
ListSeedPeers(ctx context.Context, in *ListSeedPeersRequest, opts ...grpc.CallOption) (*ListSeedPeersResponse, error)
// Update SeedPeer configuration.
UpdateSeedPeer(ctx context.Context, in *UpdateSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error)
// Get Scheduler and Scheduler cluster configuration.
GetScheduler(ctx context.Context, in *GetSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error)
// Update scheduler configuration.
UpdateScheduler(ctx context.Context, in *UpdateSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error)
// List acitve schedulers configuration.
ListSchedulers(ctx context.Context, in *ListSchedulersRequest, opts ...grpc.CallOption) (*ListSchedulersResponse, error)
// Get ObjectStorage configuration.
GetObjectStorage(ctx context.Context, in *GetObjectStorageRequest, opts ...grpc.CallOption) (*ObjectStorage, error)
// List buckets configuration.
ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error)
// List applications configuration.
ListApplications(ctx context.Context, in *ListApplicationsRequest, opts ...grpc.CallOption) (*ListApplicationsResponse, error)
// KeepAlive with manager.
KeepAlive(ctx context.Context, opts ...grpc.CallOption) (Manager_KeepAliveClient, error)
}
type managerClient struct {
cc grpc.ClientConnInterface
}
func NewManagerClient(cc grpc.ClientConnInterface) ManagerClient {
return &managerClient{cc}
}
func (c *managerClient) GetSeedPeer(ctx context.Context, in *GetSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error) {
out := new(SeedPeer)
err := c.cc.Invoke(ctx, "/manager.Manager/GetSeedPeer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) ListSeedPeers(ctx context.Context, in *ListSeedPeersRequest, opts ...grpc.CallOption) (*ListSeedPeersResponse, error) {
out := new(ListSeedPeersResponse)
err := c.cc.Invoke(ctx, "/manager.Manager/ListSeedPeers", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) UpdateSeedPeer(ctx context.Context, in *UpdateSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error) {
out := new(SeedPeer)
err := c.cc.Invoke(ctx, "/manager.Manager/UpdateSeedPeer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) GetScheduler(ctx context.Context, in *GetSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error) {
out := new(Scheduler)
err := c.cc.Invoke(ctx, "/manager.Manager/GetScheduler", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) UpdateScheduler(ctx context.Context, in *UpdateSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error) {
out := new(Scheduler)
err := c.cc.Invoke(ctx, "/manager.Manager/UpdateScheduler", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) ListSchedulers(ctx context.Context, in *ListSchedulersRequest, opts ...grpc.CallOption) (*ListSchedulersResponse, error) {
out := new(ListSchedulersResponse)
err := c.cc.Invoke(ctx, "/manager.Manager/ListSchedulers", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) GetObjectStorage(ctx context.Context, in *GetObjectStorageRequest, opts ...grpc.CallOption) (*ObjectStorage, error) {
out := new(ObjectStorage)
err := c.cc.Invoke(ctx, "/manager.Manager/GetObjectStorage", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) {
out := new(ListBucketsResponse)
err := c.cc.Invoke(ctx, "/manager.Manager/ListBuckets", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) ListApplications(ctx context.Context, in *ListApplicationsRequest, opts ...grpc.CallOption) (*ListApplicationsResponse, error) {
out := new(ListApplicationsResponse)
err := c.cc.Invoke(ctx, "/manager.Manager/ListApplications", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) KeepAlive(ctx context.Context, opts ...grpc.CallOption) (Manager_KeepAliveClient, error) {
stream, err := c.cc.NewStream(ctx, &Manager_ServiceDesc.Streams[0], "/manager.Manager/KeepAlive", opts...)
if err != nil {
return nil, err
}
x := &managerKeepAliveClient{stream}
return x, nil
}
type Manager_KeepAliveClient interface {
Send(*KeepAliveRequest) error
CloseAndRecv() (*emptypb.Empty, error)
grpc.ClientStream
}
type managerKeepAliveClient struct {
grpc.ClientStream
}
func (x *managerKeepAliveClient) Send(m *KeepAliveRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *managerKeepAliveClient) CloseAndRecv() (*emptypb.Empty, error) {
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
m := new(emptypb.Empty)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// ManagerServer is the server API for Manager service.
// All implementations should embed UnimplementedManagerServer
// for forward compatibility
type ManagerServer interface {
// Get SeedPeer and SeedPeer cluster configuration.
GetSeedPeer(context.Context, *GetSeedPeerRequest) (*SeedPeer, error)
// List acitve schedulers configuration.
ListSeedPeers(context.Context, *ListSeedPeersRequest) (*ListSeedPeersResponse, error)
// Update SeedPeer configuration.
UpdateSeedPeer(context.Context, *UpdateSeedPeerRequest) (*SeedPeer, error)
// Get Scheduler and Scheduler cluster configuration.
GetScheduler(context.Context, *GetSchedulerRequest) (*Scheduler, error)
// Update scheduler configuration.
UpdateScheduler(context.Context, *UpdateSchedulerRequest) (*Scheduler, error)
// List acitve schedulers configuration.
ListSchedulers(context.Context, *ListSchedulersRequest) (*ListSchedulersResponse, error)
// Get ObjectStorage configuration.
GetObjectStorage(context.Context, *GetObjectStorageRequest) (*ObjectStorage, error)
// List buckets configuration.
ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error)
// List applications configuration.
ListApplications(context.Context, *ListApplicationsRequest) (*ListApplicationsResponse, error)
// KeepAlive with manager.
KeepAlive(Manager_KeepAliveServer) error
}
// UnimplementedManagerServer should be embedded to have forward compatible implementations.
type UnimplementedManagerServer struct {
}
func (UnimplementedManagerServer) GetSeedPeer(context.Context, *GetSeedPeerRequest) (*SeedPeer, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSeedPeer not implemented")
}
func (UnimplementedManagerServer) ListSeedPeers(context.Context, *ListSeedPeersRequest) (*ListSeedPeersResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListSeedPeers not implemented")
}
func (UnimplementedManagerServer) UpdateSeedPeer(context.Context, *UpdateSeedPeerRequest) (*SeedPeer, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateSeedPeer not implemented")
}
func (UnimplementedManagerServer) GetScheduler(context.Context, *GetSchedulerRequest) (*Scheduler, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetScheduler not implemented")
}
func (UnimplementedManagerServer) UpdateScheduler(context.Context, *UpdateSchedulerRequest) (*Scheduler, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateScheduler not implemented")
}
func (UnimplementedManagerServer) ListSchedulers(context.Context, *ListSchedulersRequest) (*ListSchedulersResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListSchedulers not implemented")
}
func (UnimplementedManagerServer) GetObjectStorage(context.Context, *GetObjectStorageRequest) (*ObjectStorage, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetObjectStorage not implemented")
}
func (UnimplementedManagerServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented")
}
func (UnimplementedManagerServer) ListApplications(context.Context, *ListApplicationsRequest) (*ListApplicationsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListApplications not implemented")
}
func (UnimplementedManagerServer) KeepAlive(Manager_KeepAliveServer) error {
return status.Errorf(codes.Unimplemented, "method KeepAlive not implemented")
}
// UnsafeManagerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ManagerServer will
// result in compilation errors.
type UnsafeManagerServer interface {
mustEmbedUnimplementedManagerServer()
}
func RegisterManagerServer(s grpc.ServiceRegistrar, srv ManagerServer) {
s.RegisterService(&Manager_ServiceDesc, srv)
}
func _Manager_GetSeedPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetSeedPeerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).GetSeedPeer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.Manager/GetSeedPeer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).GetSeedPeer(ctx, req.(*GetSeedPeerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_ListSeedPeers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListSeedPeersRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).ListSeedPeers(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.Manager/ListSeedPeers",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).ListSeedPeers(ctx, req.(*ListSeedPeersRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_UpdateSeedPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateSeedPeerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).UpdateSeedPeer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.Manager/UpdateSeedPeer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).UpdateSeedPeer(ctx, req.(*UpdateSeedPeerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_GetScheduler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetSchedulerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).GetScheduler(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.Manager/GetScheduler",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).GetScheduler(ctx, req.(*GetSchedulerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_UpdateScheduler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateSchedulerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).UpdateScheduler(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.Manager/UpdateScheduler",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).UpdateScheduler(ctx, req.(*UpdateSchedulerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_ListSchedulers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListSchedulersRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).ListSchedulers(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.Manager/ListSchedulers",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).ListSchedulers(ctx, req.(*ListSchedulersRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_GetObjectStorage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetObjectStorageRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).GetObjectStorage(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.Manager/GetObjectStorage",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).GetObjectStorage(ctx, req.(*GetObjectStorageRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListBucketsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).ListBuckets(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.Manager/ListBuckets",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).ListBuckets(ctx, req.(*ListBucketsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_ListApplications_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListApplicationsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).ListApplications(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.Manager/ListApplications",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).ListApplications(ctx, req.(*ListApplicationsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_KeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ManagerServer).KeepAlive(&managerKeepAliveServer{stream})
}
type Manager_KeepAliveServer interface {
SendAndClose(*emptypb.Empty) error
Recv() (*KeepAliveRequest, error)
grpc.ServerStream
}
type managerKeepAliveServer struct {
grpc.ServerStream
}
func (x *managerKeepAliveServer) SendAndClose(m *emptypb.Empty) error {
return x.ServerStream.SendMsg(m)
}
func (x *managerKeepAliveServer) Recv() (*KeepAliveRequest, error) {
m := new(KeepAliveRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Manager_ServiceDesc is the grpc.ServiceDesc for Manager service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Manager_ServiceDesc = grpc.ServiceDesc{
ServiceName: "manager.Manager",
HandlerType: (*ManagerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetSeedPeer",
Handler: _Manager_GetSeedPeer_Handler,
},
{
MethodName: "ListSeedPeers",
Handler: _Manager_ListSeedPeers_Handler,
},
{
MethodName: "UpdateSeedPeer",
Handler: _Manager_UpdateSeedPeer_Handler,
},
{
MethodName: "GetScheduler",
Handler: _Manager_GetScheduler_Handler,
},
{
MethodName: "UpdateScheduler",
Handler: _Manager_UpdateScheduler_Handler,
},
{
MethodName: "ListSchedulers",
Handler: _Manager_ListSchedulers_Handler,
},
{
MethodName: "GetObjectStorage",
Handler: _Manager_GetObjectStorage_Handler,
},
{
MethodName: "ListBuckets",
Handler: _Manager_ListBuckets_Handler,
},
{
MethodName: "ListApplications",
Handler: _Manager_ListApplications_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "KeepAlive",
Handler: _Manager_KeepAlive_Handler,
ClientStreams: true,
},
},
Metadata: "pkg/apis/manager/v1/manager.proto",
}

View File

@ -1,10 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: ../manager_grpc.pb.go
//
// Generated by this command:
//
// mockgen -destination manager_mock.go -source ../manager_grpc.pb.go -package mocks
//
// Source: ../manager.pb.go
// Package mocks is a generated GoMock package.
package mocks
@ -13,8 +8,8 @@ import (
context "context"
reflect "reflect"
manager "d7y.io/api/v2/pkg/apis/manager/v1"
gomock "go.uber.org/mock/gomock"
v1 "d7y.io/api/pkg/apis/manager/v1"
gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
emptypb "google.golang.org/protobuf/types/known/emptypb"
@ -24,7 +19,6 @@ import (
type MockManagerClient struct {
ctrl *gomock.Controller
recorder *MockManagerClientMockRecorder
isgomock struct{}
}
// MockManagerClientMockRecorder is the mock recorder for MockManagerClient.
@ -45,202 +39,162 @@ func (m *MockManagerClient) EXPECT() *MockManagerClientMockRecorder {
}
// GetObjectStorage mocks base method.
func (m *MockManagerClient) GetObjectStorage(ctx context.Context, in *manager.GetObjectStorageRequest, opts ...grpc.CallOption) (*manager.ObjectStorage, error) {
func (m *MockManagerClient) GetObjectStorage(ctx context.Context, in *v1.GetObjectStorageRequest, opts ...grpc.CallOption) (*v1.ObjectStorage, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetObjectStorage", varargs...)
ret0, _ := ret[0].(*manager.ObjectStorage)
ret0, _ := ret[0].(*v1.ObjectStorage)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetObjectStorage indicates an expected call of GetObjectStorage.
func (mr *MockManagerClientMockRecorder) GetObjectStorage(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockManagerClientMockRecorder) GetObjectStorage(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorage", reflect.TypeOf((*MockManagerClient)(nil).GetObjectStorage), varargs...)
}
// GetScheduler mocks base method.
func (m *MockManagerClient) GetScheduler(ctx context.Context, in *manager.GetSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
func (m *MockManagerClient) GetScheduler(ctx context.Context, in *v1.GetSchedulerRequest, opts ...grpc.CallOption) (*v1.Scheduler, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetScheduler", varargs...)
ret0, _ := ret[0].(*manager.Scheduler)
ret0, _ := ret[0].(*v1.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetScheduler indicates an expected call of GetScheduler.
func (mr *MockManagerClientMockRecorder) GetScheduler(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockManagerClientMockRecorder) GetScheduler(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheduler", reflect.TypeOf((*MockManagerClient)(nil).GetScheduler), varargs...)
}
// GetSeedPeer mocks base method.
func (m *MockManagerClient) GetSeedPeer(ctx context.Context, in *manager.GetSeedPeerRequest, opts ...grpc.CallOption) (*manager.SeedPeer, error) {
func (m *MockManagerClient) GetSeedPeer(ctx context.Context, in *v1.GetSeedPeerRequest, opts ...grpc.CallOption) (*v1.SeedPeer, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetSeedPeer", varargs...)
ret0, _ := ret[0].(*manager.SeedPeer)
ret0, _ := ret[0].(*v1.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSeedPeer indicates an expected call of GetSeedPeer.
func (mr *MockManagerClientMockRecorder) GetSeedPeer(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockManagerClientMockRecorder) GetSeedPeer(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSeedPeer", reflect.TypeOf((*MockManagerClient)(nil).GetSeedPeer), varargs...)
}
// KeepAlive mocks base method.
func (m *MockManagerClient) KeepAlive(ctx context.Context, opts ...grpc.CallOption) (manager.Manager_KeepAliveClient, error) {
func (m *MockManagerClient) KeepAlive(ctx context.Context, opts ...grpc.CallOption) (v1.Manager_KeepAliveClient, error) {
m.ctrl.T.Helper()
varargs := []any{ctx}
varargs := []interface{}{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "KeepAlive", varargs...)
ret0, _ := ret[0].(manager.Manager_KeepAliveClient)
ret0, _ := ret[0].(v1.Manager_KeepAliveClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// KeepAlive indicates an expected call of KeepAlive.
func (mr *MockManagerClientMockRecorder) KeepAlive(ctx any, opts ...any) *gomock.Call {
func (mr *MockManagerClientMockRecorder) KeepAlive(ctx interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx}, opts...)
varargs := append([]interface{}{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockManagerClient)(nil).KeepAlive), varargs...)
}
// ListApplications mocks base method.
func (m *MockManagerClient) ListApplications(ctx context.Context, in *manager.ListApplicationsRequest, opts ...grpc.CallOption) (*manager.ListApplicationsResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListApplications", varargs...)
ret0, _ := ret[0].(*manager.ListApplicationsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListApplications indicates an expected call of ListApplications.
func (mr *MockManagerClientMockRecorder) ListApplications(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListApplications", reflect.TypeOf((*MockManagerClient)(nil).ListApplications), varargs...)
}
// ListBuckets mocks base method.
func (m *MockManagerClient) ListBuckets(ctx context.Context, in *manager.ListBucketsRequest, opts ...grpc.CallOption) (*manager.ListBucketsResponse, error) {
func (m *MockManagerClient) ListBuckets(ctx context.Context, in *v1.ListBucketsRequest, opts ...grpc.CallOption) (*v1.ListBucketsResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListBuckets", varargs...)
ret0, _ := ret[0].(*manager.ListBucketsResponse)
ret0, _ := ret[0].(*v1.ListBucketsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListBuckets indicates an expected call of ListBuckets.
func (mr *MockManagerClientMockRecorder) ListBuckets(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockManagerClientMockRecorder) ListBuckets(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBuckets", reflect.TypeOf((*MockManagerClient)(nil).ListBuckets), varargs...)
}
// ListSchedulers mocks base method.
func (m *MockManagerClient) ListSchedulers(ctx context.Context, in *manager.ListSchedulersRequest, opts ...grpc.CallOption) (*manager.ListSchedulersResponse, error) {
func (m *MockManagerClient) ListSchedulers(ctx context.Context, in *v1.ListSchedulersRequest, opts ...grpc.CallOption) (*v1.ListSchedulersResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListSchedulers", varargs...)
ret0, _ := ret[0].(*manager.ListSchedulersResponse)
ret0, _ := ret[0].(*v1.ListSchedulersResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListSchedulers indicates an expected call of ListSchedulers.
func (mr *MockManagerClientMockRecorder) ListSchedulers(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockManagerClientMockRecorder) ListSchedulers(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerClient)(nil).ListSchedulers), varargs...)
}
// ListSeedPeers mocks base method.
func (m *MockManagerClient) ListSeedPeers(ctx context.Context, in *manager.ListSeedPeersRequest, opts ...grpc.CallOption) (*manager.ListSeedPeersResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListSeedPeers", varargs...)
ret0, _ := ret[0].(*manager.ListSeedPeersResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListSeedPeers indicates an expected call of ListSeedPeers.
func (mr *MockManagerClientMockRecorder) ListSeedPeers(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSeedPeers", reflect.TypeOf((*MockManagerClient)(nil).ListSeedPeers), varargs...)
}
// UpdateScheduler mocks base method.
func (m *MockManagerClient) UpdateScheduler(ctx context.Context, in *manager.UpdateSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
func (m *MockManagerClient) UpdateScheduler(ctx context.Context, in *v1.UpdateSchedulerRequest, opts ...grpc.CallOption) (*v1.Scheduler, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpdateScheduler", varargs...)
ret0, _ := ret[0].(*manager.Scheduler)
ret0, _ := ret[0].(*v1.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateScheduler indicates an expected call of UpdateScheduler.
func (mr *MockManagerClientMockRecorder) UpdateScheduler(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockManagerClientMockRecorder) UpdateScheduler(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockManagerClient)(nil).UpdateScheduler), varargs...)
}
// UpdateSeedPeer mocks base method.
func (m *MockManagerClient) UpdateSeedPeer(ctx context.Context, in *manager.UpdateSeedPeerRequest, opts ...grpc.CallOption) (*manager.SeedPeer, error) {
func (m *MockManagerClient) UpdateSeedPeer(ctx context.Context, in *v1.UpdateSeedPeerRequest, opts ...grpc.CallOption) (*v1.SeedPeer, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpdateSeedPeer", varargs...)
ret0, _ := ret[0].(*manager.SeedPeer)
ret0, _ := ret[0].(*v1.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateSeedPeer indicates an expected call of UpdateSeedPeer.
func (mr *MockManagerClientMockRecorder) UpdateSeedPeer(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockManagerClientMockRecorder) UpdateSeedPeer(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSeedPeer", reflect.TypeOf((*MockManagerClient)(nil).UpdateSeedPeer), varargs...)
}
@ -248,7 +202,6 @@ func (mr *MockManagerClientMockRecorder) UpdateSeedPeer(ctx, in any, opts ...any
type MockManager_KeepAliveClient struct {
ctrl *gomock.Controller
recorder *MockManager_KeepAliveClientMockRecorder
isgomock struct{}
}
// MockManager_KeepAliveClientMockRecorder is the mock recorder for MockManager_KeepAliveClient.
@ -327,7 +280,7 @@ func (mr *MockManager_KeepAliveClientMockRecorder) Header() *gomock.Call {
}
// RecvMsg mocks base method.
func (m_2 *MockManager_KeepAliveClient) RecvMsg(m any) error {
func (m_2 *MockManager_KeepAliveClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -335,13 +288,13 @@ func (m_2 *MockManager_KeepAliveClient) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockManager_KeepAliveClientMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockManager_KeepAliveClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockManager_KeepAliveClient) Send(arg0 *manager.KeepAliveRequest) error {
func (m *MockManager_KeepAliveClient) Send(arg0 *v1.KeepAliveRequest) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
@ -349,13 +302,13 @@ func (m *MockManager_KeepAliveClient) Send(arg0 *manager.KeepAliveRequest) error
}
// Send indicates an expected call of Send.
func (mr *MockManager_KeepAliveClientMockRecorder) Send(arg0 any) *gomock.Call {
func (mr *MockManager_KeepAliveClientMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Send), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockManager_KeepAliveClient) SendMsg(m any) error {
func (m_2 *MockManager_KeepAliveClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -363,7 +316,7 @@ func (m_2 *MockManager_KeepAliveClient) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockManager_KeepAliveClientMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockManager_KeepAliveClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).SendMsg), m)
}
@ -386,7 +339,6 @@ func (mr *MockManager_KeepAliveClientMockRecorder) Trailer() *gomock.Call {
type MockManagerServer struct {
ctrl *gomock.Controller
recorder *MockManagerServerMockRecorder
isgomock struct{}
}
// MockManagerServerMockRecorder is the mock recorder for MockManagerServer.
@ -407,52 +359,52 @@ func (m *MockManagerServer) EXPECT() *MockManagerServerMockRecorder {
}
// GetObjectStorage mocks base method.
func (m *MockManagerServer) GetObjectStorage(arg0 context.Context, arg1 *manager.GetObjectStorageRequest) (*manager.ObjectStorage, error) {
func (m *MockManagerServer) GetObjectStorage(arg0 context.Context, arg1 *v1.GetObjectStorageRequest) (*v1.ObjectStorage, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetObjectStorage", arg0, arg1)
ret0, _ := ret[0].(*manager.ObjectStorage)
ret0, _ := ret[0].(*v1.ObjectStorage)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetObjectStorage indicates an expected call of GetObjectStorage.
func (mr *MockManagerServerMockRecorder) GetObjectStorage(arg0, arg1 any) *gomock.Call {
func (mr *MockManagerServerMockRecorder) GetObjectStorage(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorage", reflect.TypeOf((*MockManagerServer)(nil).GetObjectStorage), arg0, arg1)
}
// GetScheduler mocks base method.
func (m *MockManagerServer) GetScheduler(arg0 context.Context, arg1 *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
func (m *MockManagerServer) GetScheduler(arg0 context.Context, arg1 *v1.GetSchedulerRequest) (*v1.Scheduler, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetScheduler", arg0, arg1)
ret0, _ := ret[0].(*manager.Scheduler)
ret0, _ := ret[0].(*v1.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetScheduler indicates an expected call of GetScheduler.
func (mr *MockManagerServerMockRecorder) GetScheduler(arg0, arg1 any) *gomock.Call {
func (mr *MockManagerServerMockRecorder) GetScheduler(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheduler", reflect.TypeOf((*MockManagerServer)(nil).GetScheduler), arg0, arg1)
}
// GetSeedPeer mocks base method.
func (m *MockManagerServer) GetSeedPeer(arg0 context.Context, arg1 *manager.GetSeedPeerRequest) (*manager.SeedPeer, error) {
func (m *MockManagerServer) GetSeedPeer(arg0 context.Context, arg1 *v1.GetSeedPeerRequest) (*v1.SeedPeer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSeedPeer", arg0, arg1)
ret0, _ := ret[0].(*manager.SeedPeer)
ret0, _ := ret[0].(*v1.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSeedPeer indicates an expected call of GetSeedPeer.
func (mr *MockManagerServerMockRecorder) GetSeedPeer(arg0, arg1 any) *gomock.Call {
func (mr *MockManagerServerMockRecorder) GetSeedPeer(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSeedPeer", reflect.TypeOf((*MockManagerServer)(nil).GetSeedPeer), arg0, arg1)
}
// KeepAlive mocks base method.
func (m *MockManagerServer) KeepAlive(arg0 manager.Manager_KeepAliveServer) error {
func (m *MockManagerServer) KeepAlive(arg0 v1.Manager_KeepAliveServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "KeepAlive", arg0)
ret0, _ := ret[0].(error)
@ -460,142 +412,75 @@ func (m *MockManagerServer) KeepAlive(arg0 manager.Manager_KeepAliveServer) erro
}
// KeepAlive indicates an expected call of KeepAlive.
func (mr *MockManagerServerMockRecorder) KeepAlive(arg0 any) *gomock.Call {
func (mr *MockManagerServerMockRecorder) KeepAlive(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockManagerServer)(nil).KeepAlive), arg0)
}
// ListApplications mocks base method.
func (m *MockManagerServer) ListApplications(arg0 context.Context, arg1 *manager.ListApplicationsRequest) (*manager.ListApplicationsResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListApplications", arg0, arg1)
ret0, _ := ret[0].(*manager.ListApplicationsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListApplications indicates an expected call of ListApplications.
func (mr *MockManagerServerMockRecorder) ListApplications(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListApplications", reflect.TypeOf((*MockManagerServer)(nil).ListApplications), arg0, arg1)
}
// ListBuckets mocks base method.
func (m *MockManagerServer) ListBuckets(arg0 context.Context, arg1 *manager.ListBucketsRequest) (*manager.ListBucketsResponse, error) {
func (m *MockManagerServer) ListBuckets(arg0 context.Context, arg1 *v1.ListBucketsRequest) (*v1.ListBucketsResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListBuckets", arg0, arg1)
ret0, _ := ret[0].(*manager.ListBucketsResponse)
ret0, _ := ret[0].(*v1.ListBucketsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListBuckets indicates an expected call of ListBuckets.
func (mr *MockManagerServerMockRecorder) ListBuckets(arg0, arg1 any) *gomock.Call {
func (mr *MockManagerServerMockRecorder) ListBuckets(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBuckets", reflect.TypeOf((*MockManagerServer)(nil).ListBuckets), arg0, arg1)
}
// ListSchedulers mocks base method.
func (m *MockManagerServer) ListSchedulers(arg0 context.Context, arg1 *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) {
func (m *MockManagerServer) ListSchedulers(arg0 context.Context, arg1 *v1.ListSchedulersRequest) (*v1.ListSchedulersResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListSchedulers", arg0, arg1)
ret0, _ := ret[0].(*manager.ListSchedulersResponse)
ret0, _ := ret[0].(*v1.ListSchedulersResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListSchedulers indicates an expected call of ListSchedulers.
func (mr *MockManagerServerMockRecorder) ListSchedulers(arg0, arg1 any) *gomock.Call {
func (mr *MockManagerServerMockRecorder) ListSchedulers(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerServer)(nil).ListSchedulers), arg0, arg1)
}
// ListSeedPeers mocks base method.
func (m *MockManagerServer) ListSeedPeers(arg0 context.Context, arg1 *manager.ListSeedPeersRequest) (*manager.ListSeedPeersResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListSeedPeers", arg0, arg1)
ret0, _ := ret[0].(*manager.ListSeedPeersResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListSeedPeers indicates an expected call of ListSeedPeers.
func (mr *MockManagerServerMockRecorder) ListSeedPeers(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSeedPeers", reflect.TypeOf((*MockManagerServer)(nil).ListSeedPeers), arg0, arg1)
}
// UpdateScheduler mocks base method.
func (m *MockManagerServer) UpdateScheduler(arg0 context.Context, arg1 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
func (m *MockManagerServer) UpdateScheduler(arg0 context.Context, arg1 *v1.UpdateSchedulerRequest) (*v1.Scheduler, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateScheduler", arg0, arg1)
ret0, _ := ret[0].(*manager.Scheduler)
ret0, _ := ret[0].(*v1.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateScheduler indicates an expected call of UpdateScheduler.
func (mr *MockManagerServerMockRecorder) UpdateScheduler(arg0, arg1 any) *gomock.Call {
func (mr *MockManagerServerMockRecorder) UpdateScheduler(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockManagerServer)(nil).UpdateScheduler), arg0, arg1)
}
// UpdateSeedPeer mocks base method.
func (m *MockManagerServer) UpdateSeedPeer(arg0 context.Context, arg1 *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) {
func (m *MockManagerServer) UpdateSeedPeer(arg0 context.Context, arg1 *v1.UpdateSeedPeerRequest) (*v1.SeedPeer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateSeedPeer", arg0, arg1)
ret0, _ := ret[0].(*manager.SeedPeer)
ret0, _ := ret[0].(*v1.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateSeedPeer indicates an expected call of UpdateSeedPeer.
func (mr *MockManagerServerMockRecorder) UpdateSeedPeer(arg0, arg1 any) *gomock.Call {
func (mr *MockManagerServerMockRecorder) UpdateSeedPeer(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSeedPeer", reflect.TypeOf((*MockManagerServer)(nil).UpdateSeedPeer), arg0, arg1)
}
// MockUnsafeManagerServer is a mock of UnsafeManagerServer interface.
type MockUnsafeManagerServer struct {
ctrl *gomock.Controller
recorder *MockUnsafeManagerServerMockRecorder
isgomock struct{}
}
// MockUnsafeManagerServerMockRecorder is the mock recorder for MockUnsafeManagerServer.
type MockUnsafeManagerServerMockRecorder struct {
mock *MockUnsafeManagerServer
}
// NewMockUnsafeManagerServer creates a new mock instance.
func NewMockUnsafeManagerServer(ctrl *gomock.Controller) *MockUnsafeManagerServer {
mock := &MockUnsafeManagerServer{ctrl: ctrl}
mock.recorder = &MockUnsafeManagerServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockUnsafeManagerServer) EXPECT() *MockUnsafeManagerServerMockRecorder {
return m.recorder
}
// mustEmbedUnimplementedManagerServer mocks base method.
func (m *MockUnsafeManagerServer) mustEmbedUnimplementedManagerServer() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "mustEmbedUnimplementedManagerServer")
}
// mustEmbedUnimplementedManagerServer indicates an expected call of mustEmbedUnimplementedManagerServer.
func (mr *MockUnsafeManagerServerMockRecorder) mustEmbedUnimplementedManagerServer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedManagerServer", reflect.TypeOf((*MockUnsafeManagerServer)(nil).mustEmbedUnimplementedManagerServer))
}
// MockManager_KeepAliveServer is a mock of Manager_KeepAliveServer interface.
type MockManager_KeepAliveServer struct {
ctrl *gomock.Controller
recorder *MockManager_KeepAliveServerMockRecorder
isgomock struct{}
}
// MockManager_KeepAliveServerMockRecorder is the mock recorder for MockManager_KeepAliveServer.
@ -630,10 +515,10 @@ func (mr *MockManager_KeepAliveServerMockRecorder) Context() *gomock.Call {
}
// Recv mocks base method.
func (m *MockManager_KeepAliveServer) Recv() (*manager.KeepAliveRequest, error) {
func (m *MockManager_KeepAliveServer) Recv() (*v1.KeepAliveRequest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*manager.KeepAliveRequest)
ret0, _ := ret[0].(*v1.KeepAliveRequest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -645,7 +530,7 @@ func (mr *MockManager_KeepAliveServerMockRecorder) Recv() *gomock.Call {
}
// RecvMsg mocks base method.
func (m_2 *MockManager_KeepAliveServer) RecvMsg(m any) error {
func (m_2 *MockManager_KeepAliveServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -653,7 +538,7 @@ func (m_2 *MockManager_KeepAliveServer) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockManager_KeepAliveServerMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockManager_KeepAliveServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).RecvMsg), m)
}
@ -667,7 +552,7 @@ func (m *MockManager_KeepAliveServer) SendAndClose(arg0 *emptypb.Empty) error {
}
// SendAndClose indicates an expected call of SendAndClose.
func (mr *MockManager_KeepAliveServerMockRecorder) SendAndClose(arg0 any) *gomock.Call {
func (mr *MockManager_KeepAliveServerMockRecorder) SendAndClose(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAndClose", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendAndClose), arg0)
}
@ -681,13 +566,13 @@ func (m *MockManager_KeepAliveServer) SendHeader(arg0 metadata.MD) error {
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockManager_KeepAliveServerMockRecorder) SendHeader(arg0 any) *gomock.Call {
func (mr *MockManager_KeepAliveServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockManager_KeepAliveServer) SendMsg(m any) error {
func (m_2 *MockManager_KeepAliveServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -695,7 +580,7 @@ func (m_2 *MockManager_KeepAliveServer) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockManager_KeepAliveServerMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockManager_KeepAliveServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendMsg), m)
}
@ -709,7 +594,7 @@ func (m *MockManager_KeepAliveServer) SetHeader(arg0 metadata.MD) error {
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockManager_KeepAliveServerMockRecorder) SetHeader(arg0 any) *gomock.Call {
func (mr *MockManager_KeepAliveServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SetHeader), arg0)
}
@ -721,7 +606,7 @@ func (m *MockManager_KeepAliveServer) SetTrailer(arg0 metadata.MD) {
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockManager_KeepAliveServerMockRecorder) SetTrailer(arg0 any) *gomock.Call {
func (mr *MockManager_KeepAliveServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SetTrailer), arg0)
}

View File

@ -16,4 +16,4 @@
package mocks
//go:generate mockgen -destination manager_mock.go -source ../manager_grpc.pb.go -package mocks
//go:generate mockgen -destination manager_mock.go -source ../manager.pb.go -package mocks

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,333 +0,0 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package manager.v2;
import "pkg/apis/common/v2/common.proto";
import "google/protobuf/empty.proto";
import "validate/validate.proto";
option go_package = "d7y.io/api/v2/pkg/apis/manager/v2;manager";
// Request source type.
enum SourceType {
// Scheduler service.
SCHEDULER_SOURCE = 0;
// Peer service.
PEER_SOURCE = 1;
// SeedPeer service.
SEED_PEER_SOURCE = 2;
}
// SeedPeerCluster represents cluster of seed peer.
message SeedPeerCluster {
// Cluster id.
uint64 id = 1;
// Cluster name.
string name = 2;
// Cluster biography.
string bio = 3;
// Cluster configuration.
bytes config = 4;
}
// SeedPeer represents seed peer for network.
message SeedPeer {
// Seed peer id.
uint64 id = 1;
// Seed peer hostname.
string hostname = 2;
// Seed peer type.
string type = 3;
// Seed peer idc.
optional string idc = 4;
// Seed peer location.
optional string location = 5;
// Seed peer ip.
string ip = 6;
// Seed peer grpc port.
int32 port = 7;
// Seed peer download port.
int32 download_port = 8;
// Seed peer state.
string state = 9;
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 10;
// Cluster to which the seed peer belongs.
SeedPeerCluster seed_peer_cluster = 11;
// Schedulers included in seed peer.
repeated Scheduler schedulers = 12;
}
// GetSeedPeerRequest represents request of GetSeedPeer.
message GetSeedPeerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Seed peer hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Seed peer ip.
string ip = 4 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// ListSeedPeersRequest represents request of ListSeedPeers.
message ListSeedPeersRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
// Dfdaemon version.
string version = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Dfdaemon commit.
string commit = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
}
// ListSeedPeersResponse represents response of ListSeedPeers.
message ListSeedPeersResponse {
// Seed peers to which the source service belongs.
repeated SeedPeer seed_peers = 1;
}
// UpdateSeedPeerRequest represents request of UpdateSeedPeer.
message UpdateSeedPeerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Seed peer hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
// Seed peer type.
string type = 3 [(validate.rules).string = {in: ["super", "strong", "weak"]}];
// Seed peer idc.
optional string idc = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Seed peer location.
optional string location = 5 [(validate.rules).string = {max_len: 1024, ignore_empty: true}];
// Seed peer ip.
string ip = 6 [(validate.rules).string = {ip: true}];
// Seed peer port.
int32 port = 7 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Seed peer download port.
int32 download_port = 8 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 9 [(validate.rules).uint64 = {gte: 1}];
}
// DeleteSeedPeerRequest represents request of DeleteSeedPeer.
message DeleteSeedPeerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Seed peer hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Seed peer ip.
string ip = 4 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// SeedPeerCluster represents cluster of scheduler.
message SchedulerCluster {
// Cluster id.
uint64 id = 1;
// Cluster name.
string name = 2;
// Cluster biography.
string bio = 3;
// Cluster config.
bytes config = 4;
// Cluster client config.
bytes client_config = 5;
// Cluster scopes.
bytes scopes = 6;
}
// SeedPeerCluster represents scheduler for network.
message Scheduler {
// Scheduler id.
uint64 id = 1;
// Scheduler hostname.
string hostname = 2;
// Scheduler idc.
optional string idc = 3;
// Scheduler location.
optional string location = 4;
// Scheduler ip.
string ip = 5;
// Scheduler grpc port.
int32 port = 6;
// Scheduler state.
string state = 7;
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 8;
// Cluster to which the scheduler belongs.
SchedulerCluster scheduler_cluster = 9;
// Seed peers to which the scheduler belongs.
repeated SeedPeer seed_peers = 10;
// Feature flags of scheduler.
bytes features = 11;
}
// GetSchedulerRequest represents request of GetScheduler.
message GetSchedulerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Scheduler hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Scheduler ip.
string ip = 4 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// UpdateSchedulerRequest represents request of UpdateScheduler.
message UpdateSchedulerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Scheduler hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Scheduler idc.
optional string idc = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Scheduler location.
optional string location = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Scheduler ip.
string ip = 6 [(validate.rules).string = {ip: true}];
// Scheduler port.
int32 port = 7 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Scheduler features.
repeated string features = 8;
// Scheduler configuration.
bytes config = 9;
}
// ListSchedulersRequest represents request of ListSchedulers.
message ListSchedulersRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
// Source idc.
optional string idc = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Source location.
optional string location = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Dfdaemon version.
string version = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Dfdaemon commit.
string commit = 7 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 8;
}
// ListSchedulersResponse represents response of ListSchedulers.
message ListSchedulersResponse {
// Schedulers to which the source service belongs.
repeated Scheduler schedulers = 1;
}
// URLPriority represents config of url priority.
message URLPriority {
// URL regex.
string regex = 1 [(validate.rules).string = {min_len: 1}];
// URL priority value.
common.v2.Priority value = 2;
}
// ApplicationPriority represents config of application priority.
message ApplicationPriority {
// Priority value.
common.v2.Priority value = 1;
// URL priority.
repeated URLPriority urls = 2;
}
// Application represents config of application.
message Application {
// Application id.
uint64 id = 1 [(validate.rules).uint64 = {gte: 1}];
// Application name.
string name = 2 [(validate.rules).string = {min_len: 1, max_len: 1024}];
// Application url.
string url = 3 [(validate.rules).string.uri = true];
// Application biography.
string bio = 4;
// Application priority.
ApplicationPriority priority = 5 [(validate.rules).message.required = true];
}
// ListApplicationsRequest represents request of ListApplications.
message ListApplicationsRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
}
// ListApplicationsResponse represents response of ListApplications.
message ListApplicationsResponse {
// Application configs.
repeated Application applications = 1;
}
// KeepAliveRequest represents request of KeepAlive.
message KeepAliveRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string hostname = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the source service belongs.
uint64 cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Source service ip.
string ip = 4 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// Manager RPC Service.
service Manager {
// Get SeedPeer and SeedPeer cluster configuration.
rpc GetSeedPeer(GetSeedPeerRequest) returns(SeedPeer);
// List acitve schedulers configuration.
rpc ListSeedPeers(ListSeedPeersRequest)returns(ListSeedPeersResponse);
// Update SeedPeer configuration.
rpc UpdateSeedPeer(UpdateSeedPeerRequest) returns(SeedPeer);
// Delete SeedPeer configuration.
rpc DeleteSeedPeer(DeleteSeedPeerRequest) returns(google.protobuf.Empty);
// Get Scheduler and Scheduler cluster configuration.
rpc GetScheduler(GetSchedulerRequest)returns(Scheduler);
// Update scheduler configuration.
rpc UpdateScheduler(UpdateSchedulerRequest) returns(Scheduler);
// List acitve schedulers configuration.
rpc ListSchedulers(ListSchedulersRequest)returns(ListSchedulersResponse);
// List applications configuration.
rpc ListApplications(ListApplicationsRequest)returns(ListApplicationsResponse);
// KeepAlive with manager.
rpc KeepAlive(stream KeepAliveRequest)returns(google.protobuf.Empty);
}

View File

@ -1,445 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.21.6
// source: pkg/apis/manager/v2/manager.proto
package manager
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// ManagerClient is the client API for Manager service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ManagerClient interface {
// Get SeedPeer and SeedPeer cluster configuration.
GetSeedPeer(ctx context.Context, in *GetSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error)
// List acitve schedulers configuration.
ListSeedPeers(ctx context.Context, in *ListSeedPeersRequest, opts ...grpc.CallOption) (*ListSeedPeersResponse, error)
// Update SeedPeer configuration.
UpdateSeedPeer(ctx context.Context, in *UpdateSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error)
// Delete SeedPeer configuration.
DeleteSeedPeer(ctx context.Context, in *DeleteSeedPeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Get Scheduler and Scheduler cluster configuration.
GetScheduler(ctx context.Context, in *GetSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error)
// Update scheduler configuration.
UpdateScheduler(ctx context.Context, in *UpdateSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error)
// List acitve schedulers configuration.
ListSchedulers(ctx context.Context, in *ListSchedulersRequest, opts ...grpc.CallOption) (*ListSchedulersResponse, error)
// List applications configuration.
ListApplications(ctx context.Context, in *ListApplicationsRequest, opts ...grpc.CallOption) (*ListApplicationsResponse, error)
// KeepAlive with manager.
KeepAlive(ctx context.Context, opts ...grpc.CallOption) (Manager_KeepAliveClient, error)
}
type managerClient struct {
cc grpc.ClientConnInterface
}
func NewManagerClient(cc grpc.ClientConnInterface) ManagerClient {
return &managerClient{cc}
}
func (c *managerClient) GetSeedPeer(ctx context.Context, in *GetSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error) {
out := new(SeedPeer)
err := c.cc.Invoke(ctx, "/manager.v2.Manager/GetSeedPeer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) ListSeedPeers(ctx context.Context, in *ListSeedPeersRequest, opts ...grpc.CallOption) (*ListSeedPeersResponse, error) {
out := new(ListSeedPeersResponse)
err := c.cc.Invoke(ctx, "/manager.v2.Manager/ListSeedPeers", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) UpdateSeedPeer(ctx context.Context, in *UpdateSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error) {
out := new(SeedPeer)
err := c.cc.Invoke(ctx, "/manager.v2.Manager/UpdateSeedPeer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) DeleteSeedPeer(ctx context.Context, in *DeleteSeedPeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/manager.v2.Manager/DeleteSeedPeer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) GetScheduler(ctx context.Context, in *GetSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error) {
out := new(Scheduler)
err := c.cc.Invoke(ctx, "/manager.v2.Manager/GetScheduler", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) UpdateScheduler(ctx context.Context, in *UpdateSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error) {
out := new(Scheduler)
err := c.cc.Invoke(ctx, "/manager.v2.Manager/UpdateScheduler", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) ListSchedulers(ctx context.Context, in *ListSchedulersRequest, opts ...grpc.CallOption) (*ListSchedulersResponse, error) {
out := new(ListSchedulersResponse)
err := c.cc.Invoke(ctx, "/manager.v2.Manager/ListSchedulers", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) ListApplications(ctx context.Context, in *ListApplicationsRequest, opts ...grpc.CallOption) (*ListApplicationsResponse, error) {
out := new(ListApplicationsResponse)
err := c.cc.Invoke(ctx, "/manager.v2.Manager/ListApplications", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *managerClient) KeepAlive(ctx context.Context, opts ...grpc.CallOption) (Manager_KeepAliveClient, error) {
stream, err := c.cc.NewStream(ctx, &Manager_ServiceDesc.Streams[0], "/manager.v2.Manager/KeepAlive", opts...)
if err != nil {
return nil, err
}
x := &managerKeepAliveClient{stream}
return x, nil
}
type Manager_KeepAliveClient interface {
Send(*KeepAliveRequest) error
CloseAndRecv() (*emptypb.Empty, error)
grpc.ClientStream
}
type managerKeepAliveClient struct {
grpc.ClientStream
}
func (x *managerKeepAliveClient) Send(m *KeepAliveRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *managerKeepAliveClient) CloseAndRecv() (*emptypb.Empty, error) {
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
m := new(emptypb.Empty)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// ManagerServer is the server API for Manager service.
// All implementations should embed UnimplementedManagerServer
// for forward compatibility
type ManagerServer interface {
// Get SeedPeer and SeedPeer cluster configuration.
GetSeedPeer(context.Context, *GetSeedPeerRequest) (*SeedPeer, error)
// List acitve schedulers configuration.
ListSeedPeers(context.Context, *ListSeedPeersRequest) (*ListSeedPeersResponse, error)
// Update SeedPeer configuration.
UpdateSeedPeer(context.Context, *UpdateSeedPeerRequest) (*SeedPeer, error)
// Delete SeedPeer configuration.
DeleteSeedPeer(context.Context, *DeleteSeedPeerRequest) (*emptypb.Empty, error)
// Get Scheduler and Scheduler cluster configuration.
GetScheduler(context.Context, *GetSchedulerRequest) (*Scheduler, error)
// Update scheduler configuration.
UpdateScheduler(context.Context, *UpdateSchedulerRequest) (*Scheduler, error)
// List acitve schedulers configuration.
ListSchedulers(context.Context, *ListSchedulersRequest) (*ListSchedulersResponse, error)
// List applications configuration.
ListApplications(context.Context, *ListApplicationsRequest) (*ListApplicationsResponse, error)
// KeepAlive with manager.
KeepAlive(Manager_KeepAliveServer) error
}
// UnimplementedManagerServer should be embedded to have forward compatible implementations.
type UnimplementedManagerServer struct {
}
func (UnimplementedManagerServer) GetSeedPeer(context.Context, *GetSeedPeerRequest) (*SeedPeer, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSeedPeer not implemented")
}
func (UnimplementedManagerServer) ListSeedPeers(context.Context, *ListSeedPeersRequest) (*ListSeedPeersResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListSeedPeers not implemented")
}
func (UnimplementedManagerServer) UpdateSeedPeer(context.Context, *UpdateSeedPeerRequest) (*SeedPeer, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateSeedPeer not implemented")
}
func (UnimplementedManagerServer) DeleteSeedPeer(context.Context, *DeleteSeedPeerRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteSeedPeer not implemented")
}
func (UnimplementedManagerServer) GetScheduler(context.Context, *GetSchedulerRequest) (*Scheduler, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetScheduler not implemented")
}
func (UnimplementedManagerServer) UpdateScheduler(context.Context, *UpdateSchedulerRequest) (*Scheduler, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateScheduler not implemented")
}
func (UnimplementedManagerServer) ListSchedulers(context.Context, *ListSchedulersRequest) (*ListSchedulersResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListSchedulers not implemented")
}
func (UnimplementedManagerServer) ListApplications(context.Context, *ListApplicationsRequest) (*ListApplicationsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListApplications not implemented")
}
func (UnimplementedManagerServer) KeepAlive(Manager_KeepAliveServer) error {
return status.Errorf(codes.Unimplemented, "method KeepAlive not implemented")
}
// UnsafeManagerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ManagerServer will
// result in compilation errors.
type UnsafeManagerServer interface {
mustEmbedUnimplementedManagerServer()
}
func RegisterManagerServer(s grpc.ServiceRegistrar, srv ManagerServer) {
s.RegisterService(&Manager_ServiceDesc, srv)
}
func _Manager_GetSeedPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetSeedPeerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).GetSeedPeer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.v2.Manager/GetSeedPeer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).GetSeedPeer(ctx, req.(*GetSeedPeerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_ListSeedPeers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListSeedPeersRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).ListSeedPeers(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.v2.Manager/ListSeedPeers",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).ListSeedPeers(ctx, req.(*ListSeedPeersRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_UpdateSeedPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateSeedPeerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).UpdateSeedPeer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.v2.Manager/UpdateSeedPeer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).UpdateSeedPeer(ctx, req.(*UpdateSeedPeerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_DeleteSeedPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteSeedPeerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).DeleteSeedPeer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.v2.Manager/DeleteSeedPeer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).DeleteSeedPeer(ctx, req.(*DeleteSeedPeerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_GetScheduler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetSchedulerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).GetScheduler(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.v2.Manager/GetScheduler",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).GetScheduler(ctx, req.(*GetSchedulerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_UpdateScheduler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateSchedulerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).UpdateScheduler(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.v2.Manager/UpdateScheduler",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).UpdateScheduler(ctx, req.(*UpdateSchedulerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_ListSchedulers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListSchedulersRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).ListSchedulers(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.v2.Manager/ListSchedulers",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).ListSchedulers(ctx, req.(*ListSchedulersRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_ListApplications_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListApplicationsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ManagerServer).ListApplications(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/manager.v2.Manager/ListApplications",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ManagerServer).ListApplications(ctx, req.(*ListApplicationsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Manager_KeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(ManagerServer).KeepAlive(&managerKeepAliveServer{stream})
}
type Manager_KeepAliveServer interface {
SendAndClose(*emptypb.Empty) error
Recv() (*KeepAliveRequest, error)
grpc.ServerStream
}
type managerKeepAliveServer struct {
grpc.ServerStream
}
func (x *managerKeepAliveServer) SendAndClose(m *emptypb.Empty) error {
return x.ServerStream.SendMsg(m)
}
func (x *managerKeepAliveServer) Recv() (*KeepAliveRequest, error) {
m := new(KeepAliveRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Manager_ServiceDesc is the grpc.ServiceDesc for Manager service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Manager_ServiceDesc = grpc.ServiceDesc{
ServiceName: "manager.v2.Manager",
HandlerType: (*ManagerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetSeedPeer",
Handler: _Manager_GetSeedPeer_Handler,
},
{
MethodName: "ListSeedPeers",
Handler: _Manager_ListSeedPeers_Handler,
},
{
MethodName: "UpdateSeedPeer",
Handler: _Manager_UpdateSeedPeer_Handler,
},
{
MethodName: "DeleteSeedPeer",
Handler: _Manager_DeleteSeedPeer_Handler,
},
{
MethodName: "GetScheduler",
Handler: _Manager_GetScheduler_Handler,
},
{
MethodName: "UpdateScheduler",
Handler: _Manager_UpdateScheduler_Handler,
},
{
MethodName: "ListSchedulers",
Handler: _Manager_ListSchedulers_Handler,
},
{
MethodName: "ListApplications",
Handler: _Manager_ListApplications_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "KeepAlive",
Handler: _Manager_KeepAlive_Handler,
ClientStreams: true,
},
},
Metadata: "pkg/apis/manager/v2/manager.proto",
}

View File

@ -1,692 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: ../manager_grpc.pb.go
//
// Generated by this command:
//
// mockgen -destination manager_mock.go -source ../manager_grpc.pb.go -package mocks
//
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
manager "d7y.io/api/v2/pkg/apis/manager/v2"
gomock "go.uber.org/mock/gomock"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// MockManagerClient is a mock of ManagerClient interface.
type MockManagerClient struct {
ctrl *gomock.Controller
recorder *MockManagerClientMockRecorder
isgomock struct{}
}
// MockManagerClientMockRecorder is the mock recorder for MockManagerClient.
type MockManagerClientMockRecorder struct {
mock *MockManagerClient
}
// NewMockManagerClient creates a new mock instance.
func NewMockManagerClient(ctrl *gomock.Controller) *MockManagerClient {
mock := &MockManagerClient{ctrl: ctrl}
mock.recorder = &MockManagerClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManagerClient) EXPECT() *MockManagerClientMockRecorder {
return m.recorder
}
// DeleteSeedPeer mocks base method.
func (m *MockManagerClient) DeleteSeedPeer(ctx context.Context, in *manager.DeleteSeedPeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "DeleteSeedPeer", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteSeedPeer indicates an expected call of DeleteSeedPeer.
func (mr *MockManagerClientMockRecorder) DeleteSeedPeer(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSeedPeer", reflect.TypeOf((*MockManagerClient)(nil).DeleteSeedPeer), varargs...)
}
// GetScheduler mocks base method.
func (m *MockManagerClient) GetScheduler(ctx context.Context, in *manager.GetSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetScheduler", varargs...)
ret0, _ := ret[0].(*manager.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetScheduler indicates an expected call of GetScheduler.
func (mr *MockManagerClientMockRecorder) GetScheduler(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheduler", reflect.TypeOf((*MockManagerClient)(nil).GetScheduler), varargs...)
}
// GetSeedPeer mocks base method.
func (m *MockManagerClient) GetSeedPeer(ctx context.Context, in *manager.GetSeedPeerRequest, opts ...grpc.CallOption) (*manager.SeedPeer, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetSeedPeer", varargs...)
ret0, _ := ret[0].(*manager.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSeedPeer indicates an expected call of GetSeedPeer.
func (mr *MockManagerClientMockRecorder) GetSeedPeer(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSeedPeer", reflect.TypeOf((*MockManagerClient)(nil).GetSeedPeer), varargs...)
}
// KeepAlive mocks base method.
func (m *MockManagerClient) KeepAlive(ctx context.Context, opts ...grpc.CallOption) (manager.Manager_KeepAliveClient, error) {
m.ctrl.T.Helper()
varargs := []any{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "KeepAlive", varargs...)
ret0, _ := ret[0].(manager.Manager_KeepAliveClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// KeepAlive indicates an expected call of KeepAlive.
func (mr *MockManagerClientMockRecorder) KeepAlive(ctx any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockManagerClient)(nil).KeepAlive), varargs...)
}
// ListApplications mocks base method.
func (m *MockManagerClient) ListApplications(ctx context.Context, in *manager.ListApplicationsRequest, opts ...grpc.CallOption) (*manager.ListApplicationsResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListApplications", varargs...)
ret0, _ := ret[0].(*manager.ListApplicationsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListApplications indicates an expected call of ListApplications.
func (mr *MockManagerClientMockRecorder) ListApplications(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListApplications", reflect.TypeOf((*MockManagerClient)(nil).ListApplications), varargs...)
}
// ListSchedulers mocks base method.
func (m *MockManagerClient) ListSchedulers(ctx context.Context, in *manager.ListSchedulersRequest, opts ...grpc.CallOption) (*manager.ListSchedulersResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListSchedulers", varargs...)
ret0, _ := ret[0].(*manager.ListSchedulersResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListSchedulers indicates an expected call of ListSchedulers.
func (mr *MockManagerClientMockRecorder) ListSchedulers(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerClient)(nil).ListSchedulers), varargs...)
}
// ListSeedPeers mocks base method.
func (m *MockManagerClient) ListSeedPeers(ctx context.Context, in *manager.ListSeedPeersRequest, opts ...grpc.CallOption) (*manager.ListSeedPeersResponse, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListSeedPeers", varargs...)
ret0, _ := ret[0].(*manager.ListSeedPeersResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListSeedPeers indicates an expected call of ListSeedPeers.
func (mr *MockManagerClientMockRecorder) ListSeedPeers(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSeedPeers", reflect.TypeOf((*MockManagerClient)(nil).ListSeedPeers), varargs...)
}
// UpdateScheduler mocks base method.
func (m *MockManagerClient) UpdateScheduler(ctx context.Context, in *manager.UpdateSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpdateScheduler", varargs...)
ret0, _ := ret[0].(*manager.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateScheduler indicates an expected call of UpdateScheduler.
func (mr *MockManagerClientMockRecorder) UpdateScheduler(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockManagerClient)(nil).UpdateScheduler), varargs...)
}
// UpdateSeedPeer mocks base method.
func (m *MockManagerClient) UpdateSeedPeer(ctx context.Context, in *manager.UpdateSeedPeerRequest, opts ...grpc.CallOption) (*manager.SeedPeer, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpdateSeedPeer", varargs...)
ret0, _ := ret[0].(*manager.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateSeedPeer indicates an expected call of UpdateSeedPeer.
func (mr *MockManagerClientMockRecorder) UpdateSeedPeer(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSeedPeer", reflect.TypeOf((*MockManagerClient)(nil).UpdateSeedPeer), varargs...)
}
// MockManager_KeepAliveClient is a mock of Manager_KeepAliveClient interface.
type MockManager_KeepAliveClient struct {
ctrl *gomock.Controller
recorder *MockManager_KeepAliveClientMockRecorder
isgomock struct{}
}
// MockManager_KeepAliveClientMockRecorder is the mock recorder for MockManager_KeepAliveClient.
type MockManager_KeepAliveClientMockRecorder struct {
mock *MockManager_KeepAliveClient
}
// NewMockManager_KeepAliveClient creates a new mock instance.
func NewMockManager_KeepAliveClient(ctrl *gomock.Controller) *MockManager_KeepAliveClient {
mock := &MockManager_KeepAliveClient{ctrl: ctrl}
mock.recorder = &MockManager_KeepAliveClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager_KeepAliveClient) EXPECT() *MockManager_KeepAliveClientMockRecorder {
return m.recorder
}
// CloseAndRecv mocks base method.
func (m *MockManager_KeepAliveClient) CloseAndRecv() (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CloseAndRecv")
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CloseAndRecv indicates an expected call of CloseAndRecv.
func (mr *MockManager_KeepAliveClientMockRecorder) CloseAndRecv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).CloseAndRecv))
}
// CloseSend mocks base method.
func (m *MockManager_KeepAliveClient) CloseSend() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CloseSend")
ret0, _ := ret[0].(error)
return ret0
}
// CloseSend indicates an expected call of CloseSend.
func (mr *MockManager_KeepAliveClientMockRecorder) CloseSend() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).CloseSend))
}
// Context mocks base method.
func (m *MockManager_KeepAliveClient) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockManager_KeepAliveClientMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Context))
}
// Header mocks base method.
func (m *MockManager_KeepAliveClient) Header() (metadata.MD, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Header")
ret0, _ := ret[0].(metadata.MD)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Header indicates an expected call of Header.
func (mr *MockManager_KeepAliveClientMockRecorder) Header() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Header))
}
// RecvMsg mocks base method.
func (m_2 *MockManager_KeepAliveClient) RecvMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockManager_KeepAliveClientMockRecorder) RecvMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockManager_KeepAliveClient) Send(arg0 *manager.KeepAliveRequest) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockManager_KeepAliveClientMockRecorder) Send(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Send), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockManager_KeepAliveClient) SendMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockManager_KeepAliveClientMockRecorder) SendMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).SendMsg), m)
}
// Trailer mocks base method.
func (m *MockManager_KeepAliveClient) Trailer() metadata.MD {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Trailer")
ret0, _ := ret[0].(metadata.MD)
return ret0
}
// Trailer indicates an expected call of Trailer.
func (mr *MockManager_KeepAliveClientMockRecorder) Trailer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Trailer))
}
// MockManagerServer is a mock of ManagerServer interface.
type MockManagerServer struct {
ctrl *gomock.Controller
recorder *MockManagerServerMockRecorder
isgomock struct{}
}
// MockManagerServerMockRecorder is the mock recorder for MockManagerServer.
type MockManagerServerMockRecorder struct {
mock *MockManagerServer
}
// NewMockManagerServer creates a new mock instance.
func NewMockManagerServer(ctrl *gomock.Controller) *MockManagerServer {
mock := &MockManagerServer{ctrl: ctrl}
mock.recorder = &MockManagerServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManagerServer) EXPECT() *MockManagerServerMockRecorder {
return m.recorder
}
// DeleteSeedPeer mocks base method.
func (m *MockManagerServer) DeleteSeedPeer(arg0 context.Context, arg1 *manager.DeleteSeedPeerRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteSeedPeer", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteSeedPeer indicates an expected call of DeleteSeedPeer.
func (mr *MockManagerServerMockRecorder) DeleteSeedPeer(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSeedPeer", reflect.TypeOf((*MockManagerServer)(nil).DeleteSeedPeer), arg0, arg1)
}
// GetScheduler mocks base method.
func (m *MockManagerServer) GetScheduler(arg0 context.Context, arg1 *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetScheduler", arg0, arg1)
ret0, _ := ret[0].(*manager.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetScheduler indicates an expected call of GetScheduler.
func (mr *MockManagerServerMockRecorder) GetScheduler(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheduler", reflect.TypeOf((*MockManagerServer)(nil).GetScheduler), arg0, arg1)
}
// GetSeedPeer mocks base method.
func (m *MockManagerServer) GetSeedPeer(arg0 context.Context, arg1 *manager.GetSeedPeerRequest) (*manager.SeedPeer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSeedPeer", arg0, arg1)
ret0, _ := ret[0].(*manager.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSeedPeer indicates an expected call of GetSeedPeer.
func (mr *MockManagerServerMockRecorder) GetSeedPeer(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSeedPeer", reflect.TypeOf((*MockManagerServer)(nil).GetSeedPeer), arg0, arg1)
}
// KeepAlive mocks base method.
func (m *MockManagerServer) KeepAlive(arg0 manager.Manager_KeepAliveServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "KeepAlive", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// KeepAlive indicates an expected call of KeepAlive.
func (mr *MockManagerServerMockRecorder) KeepAlive(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockManagerServer)(nil).KeepAlive), arg0)
}
// ListApplications mocks base method.
func (m *MockManagerServer) ListApplications(arg0 context.Context, arg1 *manager.ListApplicationsRequest) (*manager.ListApplicationsResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListApplications", arg0, arg1)
ret0, _ := ret[0].(*manager.ListApplicationsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListApplications indicates an expected call of ListApplications.
func (mr *MockManagerServerMockRecorder) ListApplications(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListApplications", reflect.TypeOf((*MockManagerServer)(nil).ListApplications), arg0, arg1)
}
// ListSchedulers mocks base method.
func (m *MockManagerServer) ListSchedulers(arg0 context.Context, arg1 *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListSchedulers", arg0, arg1)
ret0, _ := ret[0].(*manager.ListSchedulersResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListSchedulers indicates an expected call of ListSchedulers.
func (mr *MockManagerServerMockRecorder) ListSchedulers(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerServer)(nil).ListSchedulers), arg0, arg1)
}
// ListSeedPeers mocks base method.
func (m *MockManagerServer) ListSeedPeers(arg0 context.Context, arg1 *manager.ListSeedPeersRequest) (*manager.ListSeedPeersResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListSeedPeers", arg0, arg1)
ret0, _ := ret[0].(*manager.ListSeedPeersResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListSeedPeers indicates an expected call of ListSeedPeers.
func (mr *MockManagerServerMockRecorder) ListSeedPeers(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSeedPeers", reflect.TypeOf((*MockManagerServer)(nil).ListSeedPeers), arg0, arg1)
}
// UpdateScheduler mocks base method.
func (m *MockManagerServer) UpdateScheduler(arg0 context.Context, arg1 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateScheduler", arg0, arg1)
ret0, _ := ret[0].(*manager.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateScheduler indicates an expected call of UpdateScheduler.
func (mr *MockManagerServerMockRecorder) UpdateScheduler(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockManagerServer)(nil).UpdateScheduler), arg0, arg1)
}
// UpdateSeedPeer mocks base method.
func (m *MockManagerServer) UpdateSeedPeer(arg0 context.Context, arg1 *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateSeedPeer", arg0, arg1)
ret0, _ := ret[0].(*manager.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateSeedPeer indicates an expected call of UpdateSeedPeer.
func (mr *MockManagerServerMockRecorder) UpdateSeedPeer(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSeedPeer", reflect.TypeOf((*MockManagerServer)(nil).UpdateSeedPeer), arg0, arg1)
}
// MockUnsafeManagerServer is a mock of UnsafeManagerServer interface.
type MockUnsafeManagerServer struct {
ctrl *gomock.Controller
recorder *MockUnsafeManagerServerMockRecorder
isgomock struct{}
}
// MockUnsafeManagerServerMockRecorder is the mock recorder for MockUnsafeManagerServer.
type MockUnsafeManagerServerMockRecorder struct {
mock *MockUnsafeManagerServer
}
// NewMockUnsafeManagerServer creates a new mock instance.
func NewMockUnsafeManagerServer(ctrl *gomock.Controller) *MockUnsafeManagerServer {
mock := &MockUnsafeManagerServer{ctrl: ctrl}
mock.recorder = &MockUnsafeManagerServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockUnsafeManagerServer) EXPECT() *MockUnsafeManagerServerMockRecorder {
return m.recorder
}
// mustEmbedUnimplementedManagerServer mocks base method.
func (m *MockUnsafeManagerServer) mustEmbedUnimplementedManagerServer() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "mustEmbedUnimplementedManagerServer")
}
// mustEmbedUnimplementedManagerServer indicates an expected call of mustEmbedUnimplementedManagerServer.
func (mr *MockUnsafeManagerServerMockRecorder) mustEmbedUnimplementedManagerServer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedManagerServer", reflect.TypeOf((*MockUnsafeManagerServer)(nil).mustEmbedUnimplementedManagerServer))
}
// MockManager_KeepAliveServer is a mock of Manager_KeepAliveServer interface.
type MockManager_KeepAliveServer struct {
ctrl *gomock.Controller
recorder *MockManager_KeepAliveServerMockRecorder
isgomock struct{}
}
// MockManager_KeepAliveServerMockRecorder is the mock recorder for MockManager_KeepAliveServer.
type MockManager_KeepAliveServerMockRecorder struct {
mock *MockManager_KeepAliveServer
}
// NewMockManager_KeepAliveServer creates a new mock instance.
func NewMockManager_KeepAliveServer(ctrl *gomock.Controller) *MockManager_KeepAliveServer {
mock := &MockManager_KeepAliveServer{ctrl: ctrl}
mock.recorder = &MockManager_KeepAliveServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager_KeepAliveServer) EXPECT() *MockManager_KeepAliveServerMockRecorder {
return m.recorder
}
// Context mocks base method.
func (m *MockManager_KeepAliveServer) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockManager_KeepAliveServerMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).Context))
}
// Recv mocks base method.
func (m *MockManager_KeepAliveServer) Recv() (*manager.KeepAliveRequest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*manager.KeepAliveRequest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockManager_KeepAliveServerMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockManager_KeepAliveServer) RecvMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockManager_KeepAliveServerMockRecorder) RecvMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).RecvMsg), m)
}
// SendAndClose mocks base method.
func (m *MockManager_KeepAliveServer) SendAndClose(arg0 *emptypb.Empty) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendAndClose", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SendAndClose indicates an expected call of SendAndClose.
func (mr *MockManager_KeepAliveServerMockRecorder) SendAndClose(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAndClose", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendAndClose), arg0)
}
// SendHeader mocks base method.
func (m *MockManager_KeepAliveServer) SendHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockManager_KeepAliveServerMockRecorder) SendHeader(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockManager_KeepAliveServer) SendMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockManager_KeepAliveServerMockRecorder) SendMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendMsg), m)
}
// SetHeader mocks base method.
func (m *MockManager_KeepAliveServer) SetHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockManager_KeepAliveServerMockRecorder) SetHeader(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SetHeader), arg0)
}
// SetTrailer mocks base method.
func (m *MockManager_KeepAliveServer) SetTrailer(arg0 metadata.MD) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetTrailer", arg0)
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockManager_KeepAliveServerMockRecorder) SetTrailer(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SetTrailer), arg0)
}

View File

@ -16,4 +16,4 @@
package mocks
//go:generate mockgen -destination scheduler_mock.go -source ../scheduler_grpc.pb.go -package mocks
//go:generate mockgen -destination scheduler_mock.go -source ../scheduler.pb.go -package mocks

View File

@ -1,10 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: ../scheduler_grpc.pb.go
//
// Generated by this command:
//
// mockgen -destination scheduler_mock.go -source ../scheduler_grpc.pb.go -package mocks
//
// Source: ../scheduler.pb.go
// Package mocks is a generated GoMock package.
package mocks
@ -13,18 +8,122 @@ import (
context "context"
reflect "reflect"
scheduler "d7y.io/api/v2/pkg/apis/scheduler/v1"
gomock "go.uber.org/mock/gomock"
v1 "d7y.io/api/pkg/apis/scheduler/v1"
gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// MockisRegisterResult_DirectPiece is a mock of isRegisterResult_DirectPiece interface.
type MockisRegisterResult_DirectPiece struct {
ctrl *gomock.Controller
recorder *MockisRegisterResult_DirectPieceMockRecorder
}
// MockisRegisterResult_DirectPieceMockRecorder is the mock recorder for MockisRegisterResult_DirectPiece.
type MockisRegisterResult_DirectPieceMockRecorder struct {
mock *MockisRegisterResult_DirectPiece
}
// NewMockisRegisterResult_DirectPiece creates a new mock instance.
func NewMockisRegisterResult_DirectPiece(ctrl *gomock.Controller) *MockisRegisterResult_DirectPiece {
mock := &MockisRegisterResult_DirectPiece{ctrl: ctrl}
mock.recorder = &MockisRegisterResult_DirectPieceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockisRegisterResult_DirectPiece) EXPECT() *MockisRegisterResult_DirectPieceMockRecorder {
return m.recorder
}
// isRegisterResult_DirectPiece mocks base method.
func (m *MockisRegisterResult_DirectPiece) isRegisterResult_DirectPiece() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "isRegisterResult_DirectPiece")
}
// isRegisterResult_DirectPiece indicates an expected call of isRegisterResult_DirectPiece.
func (mr *MockisRegisterResult_DirectPieceMockRecorder) isRegisterResult_DirectPiece() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isRegisterResult_DirectPiece", reflect.TypeOf((*MockisRegisterResult_DirectPiece)(nil).isRegisterResult_DirectPiece))
}
// MockisPeerPacket_Errordetails is a mock of isPeerPacket_Errordetails interface.
type MockisPeerPacket_Errordetails struct {
ctrl *gomock.Controller
recorder *MockisPeerPacket_ErrordetailsMockRecorder
}
// MockisPeerPacket_ErrordetailsMockRecorder is the mock recorder for MockisPeerPacket_Errordetails.
type MockisPeerPacket_ErrordetailsMockRecorder struct {
mock *MockisPeerPacket_Errordetails
}
// NewMockisPeerPacket_Errordetails creates a new mock instance.
func NewMockisPeerPacket_Errordetails(ctrl *gomock.Controller) *MockisPeerPacket_Errordetails {
mock := &MockisPeerPacket_Errordetails{ctrl: ctrl}
mock.recorder = &MockisPeerPacket_ErrordetailsMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockisPeerPacket_Errordetails) EXPECT() *MockisPeerPacket_ErrordetailsMockRecorder {
return m.recorder
}
// isPeerPacket_Errordetails mocks base method.
func (m *MockisPeerPacket_Errordetails) isPeerPacket_Errordetails() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "isPeerPacket_Errordetails")
}
// isPeerPacket_Errordetails indicates an expected call of isPeerPacket_Errordetails.
func (mr *MockisPeerPacket_ErrordetailsMockRecorder) isPeerPacket_Errordetails() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isPeerPacket_Errordetails", reflect.TypeOf((*MockisPeerPacket_Errordetails)(nil).isPeerPacket_Errordetails))
}
// MockisPeerResult_Errordetails is a mock of isPeerResult_Errordetails interface.
type MockisPeerResult_Errordetails struct {
ctrl *gomock.Controller
recorder *MockisPeerResult_ErrordetailsMockRecorder
}
// MockisPeerResult_ErrordetailsMockRecorder is the mock recorder for MockisPeerResult_Errordetails.
type MockisPeerResult_ErrordetailsMockRecorder struct {
mock *MockisPeerResult_Errordetails
}
// NewMockisPeerResult_Errordetails creates a new mock instance.
func NewMockisPeerResult_Errordetails(ctrl *gomock.Controller) *MockisPeerResult_Errordetails {
mock := &MockisPeerResult_Errordetails{ctrl: ctrl}
mock.recorder = &MockisPeerResult_ErrordetailsMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockisPeerResult_Errordetails) EXPECT() *MockisPeerResult_ErrordetailsMockRecorder {
return m.recorder
}
// isPeerResult_Errordetails mocks base method.
func (m *MockisPeerResult_Errordetails) isPeerResult_Errordetails() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "isPeerResult_Errordetails")
}
// isPeerResult_Errordetails indicates an expected call of isPeerResult_Errordetails.
func (mr *MockisPeerResult_ErrordetailsMockRecorder) isPeerResult_Errordetails() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isPeerResult_Errordetails", reflect.TypeOf((*MockisPeerResult_Errordetails)(nil).isPeerResult_Errordetails))
}
// MockSchedulerClient is a mock of SchedulerClient interface.
type MockSchedulerClient struct {
ctrl *gomock.Controller
recorder *MockSchedulerClientMockRecorder
isgomock struct{}
}
// MockSchedulerClientMockRecorder is the mock recorder for MockSchedulerClient.
@ -44,30 +143,10 @@ func (m *MockSchedulerClient) EXPECT() *MockSchedulerClientMockRecorder {
return m.recorder
}
// AnnounceHost mocks base method.
func (m *MockSchedulerClient) AnnounceHost(ctx context.Context, in *scheduler.AnnounceHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "AnnounceHost", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AnnounceHost indicates an expected call of AnnounceHost.
func (mr *MockSchedulerClientMockRecorder) AnnounceHost(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceHost", reflect.TypeOf((*MockSchedulerClient)(nil).AnnounceHost), varargs...)
}
// AnnounceTask mocks base method.
func (m *MockSchedulerClient) AnnounceTask(ctx context.Context, in *scheduler.AnnounceTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
func (m *MockSchedulerClient) AnnounceTask(ctx context.Context, in *v1.AnnounceTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
@ -78,36 +157,16 @@ func (m *MockSchedulerClient) AnnounceTask(ctx context.Context, in *scheduler.An
}
// AnnounceTask indicates an expected call of AnnounceTask.
func (mr *MockSchedulerClientMockRecorder) AnnounceTask(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockSchedulerClientMockRecorder) AnnounceTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceTask", reflect.TypeOf((*MockSchedulerClient)(nil).AnnounceTask), varargs...)
}
// LeaveHost mocks base method.
func (m *MockSchedulerClient) LeaveHost(ctx context.Context, in *scheduler.LeaveHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "LeaveHost", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LeaveHost indicates an expected call of LeaveHost.
func (mr *MockSchedulerClientMockRecorder) LeaveHost(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveHost", reflect.TypeOf((*MockSchedulerClient)(nil).LeaveHost), varargs...)
}
// LeaveTask mocks base method.
func (m *MockSchedulerClient) LeaveTask(ctx context.Context, in *scheduler.PeerTarget, opts ...grpc.CallOption) (*emptypb.Empty, error) {
func (m *MockSchedulerClient) LeaveTask(ctx context.Context, in *v1.PeerTarget, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
@ -118,36 +177,36 @@ func (m *MockSchedulerClient) LeaveTask(ctx context.Context, in *scheduler.PeerT
}
// LeaveTask indicates an expected call of LeaveTask.
func (mr *MockSchedulerClientMockRecorder) LeaveTask(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockSchedulerClientMockRecorder) LeaveTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveTask", reflect.TypeOf((*MockSchedulerClient)(nil).LeaveTask), varargs...)
}
// RegisterPeerTask mocks base method.
func (m *MockSchedulerClient) RegisterPeerTask(ctx context.Context, in *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (*scheduler.RegisterResult, error) {
func (m *MockSchedulerClient) RegisterPeerTask(ctx context.Context, in *v1.PeerTaskRequest, opts ...grpc.CallOption) (*v1.RegisterResult, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "RegisterPeerTask", varargs...)
ret0, _ := ret[0].(*scheduler.RegisterResult)
ret0, _ := ret[0].(*v1.RegisterResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RegisterPeerTask indicates an expected call of RegisterPeerTask.
func (mr *MockSchedulerClientMockRecorder) RegisterPeerTask(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockSchedulerClientMockRecorder) RegisterPeerTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterPeerTask", reflect.TypeOf((*MockSchedulerClient)(nil).RegisterPeerTask), varargs...)
}
// ReportPeerResult mocks base method.
func (m *MockSchedulerClient) ReportPeerResult(ctx context.Context, in *scheduler.PeerResult, opts ...grpc.CallOption) (*emptypb.Empty, error) {
func (m *MockSchedulerClient) ReportPeerResult(ctx context.Context, in *v1.PeerResult, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
@ -158,49 +217,49 @@ func (m *MockSchedulerClient) ReportPeerResult(ctx context.Context, in *schedule
}
// ReportPeerResult indicates an expected call of ReportPeerResult.
func (mr *MockSchedulerClientMockRecorder) ReportPeerResult(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockSchedulerClientMockRecorder) ReportPeerResult(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeerResult", reflect.TypeOf((*MockSchedulerClient)(nil).ReportPeerResult), varargs...)
}
// ReportPieceResult mocks base method.
func (m *MockSchedulerClient) ReportPieceResult(ctx context.Context, opts ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) {
func (m *MockSchedulerClient) ReportPieceResult(ctx context.Context, opts ...grpc.CallOption) (v1.Scheduler_ReportPieceResultClient, error) {
m.ctrl.T.Helper()
varargs := []any{ctx}
varargs := []interface{}{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ReportPieceResult", varargs...)
ret0, _ := ret[0].(scheduler.Scheduler_ReportPieceResultClient)
ret0, _ := ret[0].(v1.Scheduler_ReportPieceResultClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReportPieceResult indicates an expected call of ReportPieceResult.
func (mr *MockSchedulerClientMockRecorder) ReportPieceResult(ctx any, opts ...any) *gomock.Call {
func (mr *MockSchedulerClientMockRecorder) ReportPieceResult(ctx interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx}, opts...)
varargs := append([]interface{}{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPieceResult", reflect.TypeOf((*MockSchedulerClient)(nil).ReportPieceResult), varargs...)
}
// StatTask mocks base method.
func (m *MockSchedulerClient) StatTask(ctx context.Context, in *scheduler.StatTaskRequest, opts ...grpc.CallOption) (*scheduler.Task, error) {
func (m *MockSchedulerClient) StatTask(ctx context.Context, in *v1.StatTaskRequest, opts ...grpc.CallOption) (*v1.Task, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "StatTask", varargs...)
ret0, _ := ret[0].(*scheduler.Task)
ret0, _ := ret[0].(*v1.Task)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StatTask indicates an expected call of StatTask.
func (mr *MockSchedulerClientMockRecorder) StatTask(ctx, in any, opts ...any) *gomock.Call {
func (mr *MockSchedulerClientMockRecorder) StatTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockSchedulerClient)(nil).StatTask), varargs...)
}
@ -208,7 +267,6 @@ func (mr *MockSchedulerClientMockRecorder) StatTask(ctx, in any, opts ...any) *g
type MockScheduler_ReportPieceResultClient struct {
ctrl *gomock.Controller
recorder *MockScheduler_ReportPieceResultClientMockRecorder
isgomock struct{}
}
// MockScheduler_ReportPieceResultClientMockRecorder is the mock recorder for MockScheduler_ReportPieceResultClient.
@ -272,10 +330,10 @@ func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Header() *gomock.Ca
}
// Recv mocks base method.
func (m *MockScheduler_ReportPieceResultClient) Recv() (*scheduler.PeerPacket, error) {
func (m *MockScheduler_ReportPieceResultClient) Recv() (*v1.PeerPacket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*scheduler.PeerPacket)
ret0, _ := ret[0].(*v1.PeerPacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -287,7 +345,7 @@ func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Recv() *gomock.Call
}
// RecvMsg mocks base method.
func (m_2 *MockScheduler_ReportPieceResultClient) RecvMsg(m any) error {
func (m_2 *MockScheduler_ReportPieceResultClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -295,13 +353,13 @@ func (m_2 *MockScheduler_ReportPieceResultClient) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockScheduler_ReportPieceResultClient) Send(arg0 *scheduler.PieceResult) error {
func (m *MockScheduler_ReportPieceResultClient) Send(arg0 *v1.PieceResult) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
@ -309,13 +367,13 @@ func (m *MockScheduler_ReportPieceResultClient) Send(arg0 *scheduler.PieceResult
}
// Send indicates an expected call of Send.
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Send(arg0 any) *gomock.Call {
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Send), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockScheduler_ReportPieceResultClient) SendMsg(m any) error {
func (m_2 *MockScheduler_ReportPieceResultClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -323,7 +381,7 @@ func (m_2 *MockScheduler_ReportPieceResultClient) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).SendMsg), m)
}
@ -346,7 +404,6 @@ func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Trailer() *gomock.C
type MockSchedulerServer struct {
ctrl *gomock.Controller
recorder *MockSchedulerServerMockRecorder
isgomock struct{}
}
// MockSchedulerServerMockRecorder is the mock recorder for MockSchedulerServer.
@ -366,23 +423,8 @@ func (m *MockSchedulerServer) EXPECT() *MockSchedulerServerMockRecorder {
return m.recorder
}
// AnnounceHost mocks base method.
func (m *MockSchedulerServer) AnnounceHost(arg0 context.Context, arg1 *scheduler.AnnounceHostRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AnnounceHost", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AnnounceHost indicates an expected call of AnnounceHost.
func (mr *MockSchedulerServerMockRecorder) AnnounceHost(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceHost", reflect.TypeOf((*MockSchedulerServer)(nil).AnnounceHost), arg0, arg1)
}
// AnnounceTask mocks base method.
func (m *MockSchedulerServer) AnnounceTask(arg0 context.Context, arg1 *scheduler.AnnounceTaskRequest) (*emptypb.Empty, error) {
func (m *MockSchedulerServer) AnnounceTask(arg0 context.Context, arg1 *v1.AnnounceTaskRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AnnounceTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
@ -391,28 +433,13 @@ func (m *MockSchedulerServer) AnnounceTask(arg0 context.Context, arg1 *scheduler
}
// AnnounceTask indicates an expected call of AnnounceTask.
func (mr *MockSchedulerServerMockRecorder) AnnounceTask(arg0, arg1 any) *gomock.Call {
func (mr *MockSchedulerServerMockRecorder) AnnounceTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceTask", reflect.TypeOf((*MockSchedulerServer)(nil).AnnounceTask), arg0, arg1)
}
// LeaveHost mocks base method.
func (m *MockSchedulerServer) LeaveHost(arg0 context.Context, arg1 *scheduler.LeaveHostRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LeaveHost", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LeaveHost indicates an expected call of LeaveHost.
func (mr *MockSchedulerServerMockRecorder) LeaveHost(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveHost", reflect.TypeOf((*MockSchedulerServer)(nil).LeaveHost), arg0, arg1)
}
// LeaveTask mocks base method.
func (m *MockSchedulerServer) LeaveTask(arg0 context.Context, arg1 *scheduler.PeerTarget) (*emptypb.Empty, error) {
func (m *MockSchedulerServer) LeaveTask(arg0 context.Context, arg1 *v1.PeerTarget) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LeaveTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
@ -421,28 +448,28 @@ func (m *MockSchedulerServer) LeaveTask(arg0 context.Context, arg1 *scheduler.Pe
}
// LeaveTask indicates an expected call of LeaveTask.
func (mr *MockSchedulerServerMockRecorder) LeaveTask(arg0, arg1 any) *gomock.Call {
func (mr *MockSchedulerServerMockRecorder) LeaveTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveTask", reflect.TypeOf((*MockSchedulerServer)(nil).LeaveTask), arg0, arg1)
}
// RegisterPeerTask mocks base method.
func (m *MockSchedulerServer) RegisterPeerTask(arg0 context.Context, arg1 *scheduler.PeerTaskRequest) (*scheduler.RegisterResult, error) {
func (m *MockSchedulerServer) RegisterPeerTask(arg0 context.Context, arg1 *v1.PeerTaskRequest) (*v1.RegisterResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RegisterPeerTask", arg0, arg1)
ret0, _ := ret[0].(*scheduler.RegisterResult)
ret0, _ := ret[0].(*v1.RegisterResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RegisterPeerTask indicates an expected call of RegisterPeerTask.
func (mr *MockSchedulerServerMockRecorder) RegisterPeerTask(arg0, arg1 any) *gomock.Call {
func (mr *MockSchedulerServerMockRecorder) RegisterPeerTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterPeerTask", reflect.TypeOf((*MockSchedulerServer)(nil).RegisterPeerTask), arg0, arg1)
}
// ReportPeerResult mocks base method.
func (m *MockSchedulerServer) ReportPeerResult(arg0 context.Context, arg1 *scheduler.PeerResult) (*emptypb.Empty, error) {
func (m *MockSchedulerServer) ReportPeerResult(arg0 context.Context, arg1 *v1.PeerResult) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReportPeerResult", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
@ -451,13 +478,13 @@ func (m *MockSchedulerServer) ReportPeerResult(arg0 context.Context, arg1 *sched
}
// ReportPeerResult indicates an expected call of ReportPeerResult.
func (mr *MockSchedulerServerMockRecorder) ReportPeerResult(arg0, arg1 any) *gomock.Call {
func (mr *MockSchedulerServerMockRecorder) ReportPeerResult(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeerResult", reflect.TypeOf((*MockSchedulerServer)(nil).ReportPeerResult), arg0, arg1)
}
// ReportPieceResult mocks base method.
func (m *MockSchedulerServer) ReportPieceResult(arg0 scheduler.Scheduler_ReportPieceResultServer) error {
func (m *MockSchedulerServer) ReportPieceResult(arg0 v1.Scheduler_ReportPieceResultServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReportPieceResult", arg0)
ret0, _ := ret[0].(error)
@ -465,67 +492,30 @@ func (m *MockSchedulerServer) ReportPieceResult(arg0 scheduler.Scheduler_ReportP
}
// ReportPieceResult indicates an expected call of ReportPieceResult.
func (mr *MockSchedulerServerMockRecorder) ReportPieceResult(arg0 any) *gomock.Call {
func (mr *MockSchedulerServerMockRecorder) ReportPieceResult(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPieceResult", reflect.TypeOf((*MockSchedulerServer)(nil).ReportPieceResult), arg0)
}
// StatTask mocks base method.
func (m *MockSchedulerServer) StatTask(arg0 context.Context, arg1 *scheduler.StatTaskRequest) (*scheduler.Task, error) {
func (m *MockSchedulerServer) StatTask(arg0 context.Context, arg1 *v1.StatTaskRequest) (*v1.Task, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StatTask", arg0, arg1)
ret0, _ := ret[0].(*scheduler.Task)
ret0, _ := ret[0].(*v1.Task)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StatTask indicates an expected call of StatTask.
func (mr *MockSchedulerServerMockRecorder) StatTask(arg0, arg1 any) *gomock.Call {
func (mr *MockSchedulerServerMockRecorder) StatTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockSchedulerServer)(nil).StatTask), arg0, arg1)
}
// MockUnsafeSchedulerServer is a mock of UnsafeSchedulerServer interface.
type MockUnsafeSchedulerServer struct {
ctrl *gomock.Controller
recorder *MockUnsafeSchedulerServerMockRecorder
isgomock struct{}
}
// MockUnsafeSchedulerServerMockRecorder is the mock recorder for MockUnsafeSchedulerServer.
type MockUnsafeSchedulerServerMockRecorder struct {
mock *MockUnsafeSchedulerServer
}
// NewMockUnsafeSchedulerServer creates a new mock instance.
func NewMockUnsafeSchedulerServer(ctrl *gomock.Controller) *MockUnsafeSchedulerServer {
mock := &MockUnsafeSchedulerServer{ctrl: ctrl}
mock.recorder = &MockUnsafeSchedulerServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockUnsafeSchedulerServer) EXPECT() *MockUnsafeSchedulerServerMockRecorder {
return m.recorder
}
// mustEmbedUnimplementedSchedulerServer mocks base method.
func (m *MockUnsafeSchedulerServer) mustEmbedUnimplementedSchedulerServer() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "mustEmbedUnimplementedSchedulerServer")
}
// mustEmbedUnimplementedSchedulerServer indicates an expected call of mustEmbedUnimplementedSchedulerServer.
func (mr *MockUnsafeSchedulerServerMockRecorder) mustEmbedUnimplementedSchedulerServer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedSchedulerServer", reflect.TypeOf((*MockUnsafeSchedulerServer)(nil).mustEmbedUnimplementedSchedulerServer))
}
// MockScheduler_ReportPieceResultServer is a mock of Scheduler_ReportPieceResultServer interface.
type MockScheduler_ReportPieceResultServer struct {
ctrl *gomock.Controller
recorder *MockScheduler_ReportPieceResultServerMockRecorder
isgomock struct{}
}
// MockScheduler_ReportPieceResultServerMockRecorder is the mock recorder for MockScheduler_ReportPieceResultServer.
@ -560,10 +550,10 @@ func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Context() *gomock.C
}
// Recv mocks base method.
func (m *MockScheduler_ReportPieceResultServer) Recv() (*scheduler.PieceResult, error) {
func (m *MockScheduler_ReportPieceResultServer) Recv() (*v1.PieceResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*scheduler.PieceResult)
ret0, _ := ret[0].(*v1.PieceResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@ -575,7 +565,7 @@ func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Recv() *gomock.Call
}
// RecvMsg mocks base method.
func (m_2 *MockScheduler_ReportPieceResultServer) RecvMsg(m any) error {
func (m_2 *MockScheduler_ReportPieceResultServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
@ -583,13 +573,13 @@ func (m_2 *MockScheduler_ReportPieceResultServer) RecvMsg(m any) error {
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) RecvMsg(m any) *gomock.Call {
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockScheduler_ReportPieceResultServer) Send(arg0 *scheduler.PeerPacket) error {
func (m *MockScheduler_ReportPieceResultServer) Send(arg0 *v1.PeerPacket) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
@ -597,7 +587,7 @@ func (m *MockScheduler_ReportPieceResultServer) Send(arg0 *scheduler.PeerPacket)
}
// Send indicates an expected call of Send.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Send(arg0 any) *gomock.Call {
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).Send), arg0)
}
@ -611,13 +601,13 @@ func (m *MockScheduler_ReportPieceResultServer) SendHeader(arg0 metadata.MD) err
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SendHeader(arg0 any) *gomock.Call {
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockScheduler_ReportPieceResultServer) SendMsg(m any) error {
func (m_2 *MockScheduler_ReportPieceResultServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
@ -625,7 +615,7 @@ func (m_2 *MockScheduler_ReportPieceResultServer) SendMsg(m any) error {
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SendMsg(m any) *gomock.Call {
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SendMsg), m)
}
@ -639,7 +629,7 @@ func (m *MockScheduler_ReportPieceResultServer) SetHeader(arg0 metadata.MD) erro
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SetHeader(arg0 any) *gomock.Call {
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SetHeader), arg0)
}
@ -651,7 +641,7 @@ func (m *MockScheduler_ReportPieceResultServer) SetTrailer(arg0 metadata.MD) {
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SetTrailer(arg0 any) *gomock.Call {
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SetTrailer), arg0)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -23,7 +23,7 @@ import "pkg/apis/errordetails/v1/errordetails.proto";
import "validate/validate.proto";
import "google/protobuf/empty.proto";
option go_package = "d7y.io/api/v2/pkg/apis/scheduler/v1;scheduler";
option go_package = "d7y.io/api/pkg/apis/scheduler/v1";
// PeerTaskRequest represents request of RegisterPeerTask.
message PeerTaskRequest{
@ -35,10 +35,12 @@ message PeerTaskRequest{
string peer_id = 3 [(validate.rules).string.min_len = 1];
// Peer host info.
PeerHost peer_host = 4;
// When requesting for range, it will trigger seed peer to download entire task.
bool prefetch = 5;
// Peer host load.
common.HostLoad host_load = 5;
// Whether this request is caused by migration.
bool is_migrating = 6;
// Pattern includes p2p, seed-peer and source.
common.Pattern pattern = 7;
// Task id.
string task_id = 8;
}
@ -84,11 +86,15 @@ message PeerHost{
// Port of download server.
int32 down_port = 4 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Peer hostname.
string hostname = 5 [(validate.rules).string.hostname = true];
string host_name = 5 [(validate.rules).string.hostname = true];
// Security domain for network.
string security_domain = 6;
// Location path(area|country|province|city|...).
string location = 7;
// IDC where the peer host is located
string idc = 8;
// Network topology(switch|router|...).
string net_topology = 9;
}
// PieceResult represents request of ReportPieceResult.
@ -109,6 +115,8 @@ message PieceResult{
bool success = 7;
// Result code.
common.Code code = 8;
// Peer host load.
common.HostLoad host_load = 9;
// Finished count.
int32 finished_count = 10;
// Task extend attribute,
@ -131,6 +139,8 @@ message PeerPacket{
string task_id = 2 [(validate.rules).string.min_len = 1];
// Source peer id.
string src_pid = 3 [(validate.rules).string.min_len = 1];
// Concurrent downloading count from main peer.
int32 parallel_count = 4 [(validate.rules).int32.gte = 1];
// Main peer.
DestPeer main_peer = 5;
// Candidate peers.
@ -152,6 +162,8 @@ message PeerResult{
string peer_id = 2 [(validate.rules).string.min_len = 1];
// Source host ip.
string src_ip = 3 [(validate.rules).string.ip = true];
// Security domain.
string security_domain = 4;
// IDC where the peer host is located
string idc = 5;
// Download url.
@ -175,20 +187,12 @@ message PeerResult{
}
}
// AnnounceTaskRequest represents request of AnnounceTask.
message AnnounceTaskRequest{
// PeerTarget represents request of LeaveTask.
message PeerTarget{
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Download url.
string url = 2 [(validate.rules).string = {uri: true, ignore_empty: true}];
// URL meta info.
common.UrlMeta url_meta = 3 [(validate.rules).message.required = true];
// Peer host info.
PeerHost peer_host = 4;
// Task piece info.
common.PiecePacket piece_packet = 5 [(validate.rules).message.required = true];
// Task type.
common.TaskType task_type = 6;
// Peer id.
string peer_id = 2 [(validate.rules).string.min_len = 1];
}
// StatTaskRequest represents request of StatTask.
@ -215,157 +219,20 @@ message Task{
bool hasAvailablePeer = 7;
}
// PeerTarget represents request of LeaveTask.
message PeerTarget{
// AnnounceTaskRequest represents request of AnnounceTask.
message AnnounceTaskRequest{
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 2 [(validate.rules).string.min_len = 1];
}
// LeaveHostRequest represents request of LeaveHost.
message LeaveHostRequest{
// Host id.
string id = 1 [(validate.rules).string.min_len = 1];
}
// AnnounceHostRequest represents request of AnnounceHost.
message AnnounceHostRequest{
// Host id.
string id = 1 [(validate.rules).string.min_len = 1];
// Host type.
string type = 2 [(validate.rules).string = {in: ["normal", "super", "strong", "weak"]}];
// Hostname.
string hostname = 3 [(validate.rules).string.min_len = 1];
// Host ip.
string ip = 4 [(validate.rules).string.ip = true];
// Port of grpc service.
int32 port = 5 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Port of download server.
int32 download_port = 6 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Host OS.
string os = 7;
// Host platform.
string platform = 8;
// Host platform family.
string platform_family = 9;
// Host platform version.
string platform_version = 10;
// Host kernel version.
string kernel_version = 11;
// CPU Stat.
CPU cpu = 12 [(validate.rules).message.required = true];
// Memory Stat.
Memory memory = 13 [(validate.rules).message.required = true];
// Network Stat.
Network network = 14 [(validate.rules).message.required = true];
// Disk Stat.
Disk disk = 15 [(validate.rules).message.required = true];
// Build information.
Build build = 16 [(validate.rules).message.required = true];
// ID of the cluster to which the host belongs.
uint64 scheduler_cluster_id = 17;
// Port of object storage server.
int32 object_storage_port = 18 [(validate.rules).int32 = {gte: 1024, lt: 65535, ignore_empty: true}];
}
// CPU Stat.
message CPU {
// Number of logical cores in the system.
uint32 logical_count = 1;
// Number of physical cores in the system
uint32 physical_count = 2;
// Percent calculates the percentage of cpu used.
double percent = 3 [(validate.rules).double.gte = 0];
// Calculates the percentage of cpu used by process.
double process_percent = 4 [(validate.rules).double.gte = 0];
// CPUTimes contains the amounts of time the CPU has spent performing different kinds of work.
CPUTimes times = 5 [(validate.rules).message.required = true];
}
// CPUTimes contains the amounts of time the CPU has spent performing different
// kinds of work. Time units are in seconds.
message CPUTimes {
// CPU time of user.
double user = 1 [(validate.rules).double.gte = 0];
// CPU time of system.
double system = 2 [(validate.rules).double.gte = 0];
// CPU time of idle.
double idle = 3 [(validate.rules).double.gte = 0];
// CPU time of nice.
double nice = 4 [(validate.rules).double.gte = 0];
// CPU time of iowait.
double iowait = 5 [(validate.rules).double.gte = 0];
// CPU time of irq.
double irq = 6 [(validate.rules).double.gte = 0];
// CPU time of softirq.
double softirq = 7 [(validate.rules).double.gte = 0];
// CPU time of steal.
double steal = 8 [(validate.rules).double.gte = 0];
// CPU time of guest.
double guest = 9 [(validate.rules).double.gte = 0];
// CPU time of guest nice.
double guest_nice = 10 [(validate.rules).double.gte = 0];
}
// Memory Stat.
message Memory {
// Total amount of RAM on this system.
uint64 total = 1;
// RAM available for programs to allocate.
uint64 available = 2;
// RAM used by programs.
uint64 used = 3;
// Percentage of RAM used by programs.
double used_percent = 4;
// Calculates the percentage of memory used by process.
double process_used_percent = 5;
// This is the kernel's notion of free memory.
uint64 free = 6;
}
// Network Stat.
message Network {
// Return count of tcp connections opened and status is ESTABLISHED.
uint32 tcp_connection_count = 1;
// Return count of upload tcp connections opened and status is ESTABLISHED.
uint32 upload_tcp_connection_count = 2;
// Location path(area|country|province|city|...).
string location = 4;
// IDC where the peer host is located
string idc = 5;
}
// Disk Stat.
message Disk {
// Total amount of disk on the data path of dragonfly.
uint64 total = 1;
// Free amount of disk on the data path of dragonfly.
uint64 free = 2;
// Used amount of disk on the data path of dragonfly.
uint64 used = 3;
// Used percent of disk on the data path of dragonfly directory.
double used_percent = 4 [(validate.rules).double = {gte: 0, lte: 100}];
// Total amount of indoes on the data path of dragonfly directory.
uint64 inodes_total = 5;
// Used amount of indoes on the data path of dragonfly directory.
uint64 inodes_used = 6;
// Free amount of indoes on the data path of dragonfly directory.
uint64 inodes_free = 7;
// Used percent of indoes on the data path of dragonfly directory.
double inodes_used_percent = 8 [(validate.rules).double = {gte: 0, lte: 100}];
}
// Build information.
message Build {
// Git version.
string git_version = 1;
// Git commit.
string git_commit = 2;
// Golang version.
string go_version = 3;
// Build platform.
string platform = 4;
// Download url.
string url = 2 [(validate.rules).string = {uri: true, ignore_empty: true}];
// URL meta info.
common.UrlMeta url_meta = 3 [(validate.rules).message.required = true];
// Peer host info.
PeerHost peer_host = 4;
// Task piece info.
common.PiecePacket piece_packet = 5 [(validate.rules).message.required = true];
// Task type.
common.TaskType task_type = 6;
}
// Scheduler RPC Service.
@ -379,18 +246,12 @@ service Scheduler{
// ReportPeerResult reports downloading result for the peer.
rpc ReportPeerResult(PeerResult)returns(google.protobuf.Empty);
// A peer announces that it has the announced task to other peers.
rpc AnnounceTask(AnnounceTaskRequest) returns(google.protobuf.Empty);
// LeaveTask makes the peer leaving from task.
rpc LeaveTask(PeerTarget)returns(google.protobuf.Empty);
// Checks if any peer has the given task.
rpc StatTask(StatTaskRequest)returns(Task);
// LeaveTask makes the peer leaving from task.
rpc LeaveTask(PeerTarget)returns(google.protobuf.Empty);
// AnnounceHost announces host to scheduler.
rpc AnnounceHost(AnnounceHostRequest)returns(google.protobuf.Empty);
// LeaveHost makes the peers leaving from host.
rpc LeaveHost(LeaveHostRequest)returns(google.protobuf.Empty);
// A peer announces that it has the announced task to other peers.
rpc AnnounceTask(AnnounceTaskRequest) returns(google.protobuf.Empty);
}

View File

@ -1,405 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.21.6
// source: pkg/apis/scheduler/v1/scheduler.proto
package scheduler
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// SchedulerClient is the client API for Scheduler service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type SchedulerClient interface {
// RegisterPeerTask registers a peer into task.
RegisterPeerTask(ctx context.Context, in *PeerTaskRequest, opts ...grpc.CallOption) (*RegisterResult, error)
// ReportPieceResult reports piece results and receives peer packets.
ReportPieceResult(ctx context.Context, opts ...grpc.CallOption) (Scheduler_ReportPieceResultClient, error)
// ReportPeerResult reports downloading result for the peer.
ReportPeerResult(ctx context.Context, in *PeerResult, opts ...grpc.CallOption) (*emptypb.Empty, error)
// A peer announces that it has the announced task to other peers.
AnnounceTask(ctx context.Context, in *AnnounceTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Checks if any peer has the given task.
StatTask(ctx context.Context, in *StatTaskRequest, opts ...grpc.CallOption) (*Task, error)
// LeaveTask makes the peer leaving from task.
LeaveTask(ctx context.Context, in *PeerTarget, opts ...grpc.CallOption) (*emptypb.Empty, error)
// AnnounceHost announces host to scheduler.
AnnounceHost(ctx context.Context, in *AnnounceHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// LeaveHost makes the peers leaving from host.
LeaveHost(ctx context.Context, in *LeaveHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
type schedulerClient struct {
cc grpc.ClientConnInterface
}
func NewSchedulerClient(cc grpc.ClientConnInterface) SchedulerClient {
return &schedulerClient{cc}
}
func (c *schedulerClient) RegisterPeerTask(ctx context.Context, in *PeerTaskRequest, opts ...grpc.CallOption) (*RegisterResult, error) {
out := new(RegisterResult)
err := c.cc.Invoke(ctx, "/scheduler.Scheduler/RegisterPeerTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) ReportPieceResult(ctx context.Context, opts ...grpc.CallOption) (Scheduler_ReportPieceResultClient, error) {
stream, err := c.cc.NewStream(ctx, &Scheduler_ServiceDesc.Streams[0], "/scheduler.Scheduler/ReportPieceResult", opts...)
if err != nil {
return nil, err
}
x := &schedulerReportPieceResultClient{stream}
return x, nil
}
type Scheduler_ReportPieceResultClient interface {
Send(*PieceResult) error
Recv() (*PeerPacket, error)
grpc.ClientStream
}
type schedulerReportPieceResultClient struct {
grpc.ClientStream
}
func (x *schedulerReportPieceResultClient) Send(m *PieceResult) error {
return x.ClientStream.SendMsg(m)
}
func (x *schedulerReportPieceResultClient) Recv() (*PeerPacket, error) {
m := new(PeerPacket)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *schedulerClient) ReportPeerResult(ctx context.Context, in *PeerResult, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.Scheduler/ReportPeerResult", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) AnnounceTask(ctx context.Context, in *AnnounceTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.Scheduler/AnnounceTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) StatTask(ctx context.Context, in *StatTaskRequest, opts ...grpc.CallOption) (*Task, error) {
out := new(Task)
err := c.cc.Invoke(ctx, "/scheduler.Scheduler/StatTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) LeaveTask(ctx context.Context, in *PeerTarget, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.Scheduler/LeaveTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) AnnounceHost(ctx context.Context, in *AnnounceHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.Scheduler/AnnounceHost", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) LeaveHost(ctx context.Context, in *LeaveHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.Scheduler/LeaveHost", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// SchedulerServer is the server API for Scheduler service.
// All implementations should embed UnimplementedSchedulerServer
// for forward compatibility
type SchedulerServer interface {
// RegisterPeerTask registers a peer into task.
RegisterPeerTask(context.Context, *PeerTaskRequest) (*RegisterResult, error)
// ReportPieceResult reports piece results and receives peer packets.
ReportPieceResult(Scheduler_ReportPieceResultServer) error
// ReportPeerResult reports downloading result for the peer.
ReportPeerResult(context.Context, *PeerResult) (*emptypb.Empty, error)
// A peer announces that it has the announced task to other peers.
AnnounceTask(context.Context, *AnnounceTaskRequest) (*emptypb.Empty, error)
// Checks if any peer has the given task.
StatTask(context.Context, *StatTaskRequest) (*Task, error)
// LeaveTask makes the peer leaving from task.
LeaveTask(context.Context, *PeerTarget) (*emptypb.Empty, error)
// AnnounceHost announces host to scheduler.
AnnounceHost(context.Context, *AnnounceHostRequest) (*emptypb.Empty, error)
// LeaveHost makes the peers leaving from host.
LeaveHost(context.Context, *LeaveHostRequest) (*emptypb.Empty, error)
}
// UnimplementedSchedulerServer should be embedded to have forward compatible implementations.
type UnimplementedSchedulerServer struct {
}
func (UnimplementedSchedulerServer) RegisterPeerTask(context.Context, *PeerTaskRequest) (*RegisterResult, error) {
return nil, status.Errorf(codes.Unimplemented, "method RegisterPeerTask not implemented")
}
func (UnimplementedSchedulerServer) ReportPieceResult(Scheduler_ReportPieceResultServer) error {
return status.Errorf(codes.Unimplemented, "method ReportPieceResult not implemented")
}
func (UnimplementedSchedulerServer) ReportPeerResult(context.Context, *PeerResult) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method ReportPeerResult not implemented")
}
func (UnimplementedSchedulerServer) AnnounceTask(context.Context, *AnnounceTaskRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method AnnounceTask not implemented")
}
func (UnimplementedSchedulerServer) StatTask(context.Context, *StatTaskRequest) (*Task, error) {
return nil, status.Errorf(codes.Unimplemented, "method StatTask not implemented")
}
func (UnimplementedSchedulerServer) LeaveTask(context.Context, *PeerTarget) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method LeaveTask not implemented")
}
func (UnimplementedSchedulerServer) AnnounceHost(context.Context, *AnnounceHostRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method AnnounceHost not implemented")
}
func (UnimplementedSchedulerServer) LeaveHost(context.Context, *LeaveHostRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method LeaveHost not implemented")
}
// UnsafeSchedulerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SchedulerServer will
// result in compilation errors.
type UnsafeSchedulerServer interface {
mustEmbedUnimplementedSchedulerServer()
}
func RegisterSchedulerServer(s grpc.ServiceRegistrar, srv SchedulerServer) {
s.RegisterService(&Scheduler_ServiceDesc, srv)
}
func _Scheduler_RegisterPeerTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PeerTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).RegisterPeerTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.Scheduler/RegisterPeerTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).RegisterPeerTask(ctx, req.(*PeerTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_ReportPieceResult_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SchedulerServer).ReportPieceResult(&schedulerReportPieceResultServer{stream})
}
type Scheduler_ReportPieceResultServer interface {
Send(*PeerPacket) error
Recv() (*PieceResult, error)
grpc.ServerStream
}
type schedulerReportPieceResultServer struct {
grpc.ServerStream
}
func (x *schedulerReportPieceResultServer) Send(m *PeerPacket) error {
return x.ServerStream.SendMsg(m)
}
func (x *schedulerReportPieceResultServer) Recv() (*PieceResult, error) {
m := new(PieceResult)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _Scheduler_ReportPeerResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PeerResult)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).ReportPeerResult(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.Scheduler/ReportPeerResult",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).ReportPeerResult(ctx, req.(*PeerResult))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_AnnounceTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AnnounceTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).AnnounceTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.Scheduler/AnnounceTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).AnnounceTask(ctx, req.(*AnnounceTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_StatTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).StatTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.Scheduler/StatTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).StatTask(ctx, req.(*StatTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_LeaveTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PeerTarget)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).LeaveTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.Scheduler/LeaveTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).LeaveTask(ctx, req.(*PeerTarget))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_AnnounceHost_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AnnounceHostRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).AnnounceHost(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.Scheduler/AnnounceHost",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).AnnounceHost(ctx, req.(*AnnounceHostRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_LeaveHost_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(LeaveHostRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).LeaveHost(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.Scheduler/LeaveHost",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).LeaveHost(ctx, req.(*LeaveHostRequest))
}
return interceptor(ctx, in, info, handler)
}
// Scheduler_ServiceDesc is the grpc.ServiceDesc for Scheduler service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Scheduler_ServiceDesc = grpc.ServiceDesc{
ServiceName: "scheduler.Scheduler",
HandlerType: (*SchedulerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "RegisterPeerTask",
Handler: _Scheduler_RegisterPeerTask_Handler,
},
{
MethodName: "ReportPeerResult",
Handler: _Scheduler_ReportPeerResult_Handler,
},
{
MethodName: "AnnounceTask",
Handler: _Scheduler_AnnounceTask_Handler,
},
{
MethodName: "StatTask",
Handler: _Scheduler_StatTask_Handler,
},
{
MethodName: "LeaveTask",
Handler: _Scheduler_LeaveTask_Handler,
},
{
MethodName: "AnnounceHost",
Handler: _Scheduler_AnnounceHost_Handler,
},
{
MethodName: "LeaveHost",
Handler: _Scheduler_LeaveHost_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ReportPieceResult",
Handler: _Scheduler_ReportPieceResult_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "pkg/apis/scheduler/v1/scheduler.proto",
}

View File

@ -1,19 +0,0 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mocks
//go:generate mockgen -destination scheduler_mock.go -source ../scheduler_grpc.pb.go -package mocks

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,443 +0,0 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package scheduler.v2;
import "pkg/apis/common/v2/common.proto";
import "pkg/apis/errordetails/v2/errordetails.proto";
import "validate/validate.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/duration.proto";
option go_package = "d7y.io/api/v2/pkg/apis/scheduler/v2;scheduler";
// RegisterPeerRequest represents peer registered request of AnnouncePeerRequest.
message RegisterPeerRequest {
// Download information.
common.v2.Download download = 1 [(validate.rules).message.required = true];
}
// DownloadPeerStartedRequest represents peer download started request of AnnouncePeerRequest.
message DownloadPeerStartedRequest {
}
// DownloadPeerBackToSourceStartedRequest represents peer download back-to-source started request of AnnouncePeerRequest.
message DownloadPeerBackToSourceStartedRequest {
// The description of the back-to-source reason.
optional string description = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// ReschedulePeerRequest represents reschedule request of AnnouncePeerRequest.
message ReschedulePeerRequest {
// Candidate parent ids.
repeated common.v2.Peer candidate_parents = 1;
// The description of the reschedule reason.
optional string description = 2 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// DownloadPeerFinishedRequest represents peer download finished request of AnnouncePeerRequest.
message DownloadPeerFinishedRequest {
// Total content length.
uint64 content_length = 1;
// Total piece count.
uint32 piece_count = 2;
}
// DownloadPeerBackToSourceFinishedRequest represents peer download back-to-source finished request of AnnouncePeerRequest.
message DownloadPeerBackToSourceFinishedRequest {
// Total content length.
uint64 content_length = 1;
// Total piece count.
uint32 piece_count = 2;
}
// DownloadPeerFailedRequest represents peer download failed request of AnnouncePeerRequest.
message DownloadPeerFailedRequest {
// The description of the download failed.
optional string description = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// DownloadPeerBackToSourceFailedRequest represents peer download back-to-source failed request of AnnouncePeerRequest.
message DownloadPeerBackToSourceFailedRequest {
// The description of the download back-to-source failed.
optional string description = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// DownloadPieceFinishedRequest represents piece download finished request of AnnouncePeerRequest.
message DownloadPieceFinishedRequest {
// Piece info.
common.v2.Piece piece = 1 [(validate.rules).message.required = true];
}
// DownloadPieceBackToSourceFinishedRequest represents piece download back-to-source finished request of AnnouncePeerRequest.
message DownloadPieceBackToSourceFinishedRequest {
// Piece info.
common.v2.Piece piece = 1 [(validate.rules).message.required = true];
}
// DownloadPieceFailedRequest downloads piece failed request of AnnouncePeerRequest.
message DownloadPieceFailedRequest {
// Piece number.
optional uint32 piece_number = 1;
// Parent id.
string parent_id = 2 [(validate.rules).string.min_len = 1];
// Temporary indicates whether the error is temporary.
bool temporary = 3;
}
// DownloadPieceBackToSourceFailedRequest downloads piece back-to-source failed request of AnnouncePeerRequest.
message DownloadPieceBackToSourceFailedRequest {
// Piece number.
optional uint32 piece_number = 1;
oneof response {
option (validate.required) = true;
errordetails.v2.Backend backend = 2;
errordetails.v2.Unknown unknown = 3;
}
}
// AnnouncePeerRequest represents request of AnnouncePeer.
message AnnouncePeerRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
oneof request {
option (validate.required) = true;
RegisterPeerRequest register_peer_request = 4;
DownloadPeerStartedRequest download_peer_started_request = 5;
DownloadPeerBackToSourceStartedRequest download_peer_back_to_source_started_request = 6;
ReschedulePeerRequest reschedule_peer_request = 7;
DownloadPeerFinishedRequest download_peer_finished_request = 8;
DownloadPeerBackToSourceFinishedRequest download_peer_back_to_source_finished_request = 9;
DownloadPeerFailedRequest download_peer_failed_request = 10;
DownloadPeerBackToSourceFailedRequest download_peer_back_to_source_failed_request = 11;
DownloadPieceFinishedRequest download_piece_finished_request = 12;
DownloadPieceBackToSourceFinishedRequest download_piece_back_to_source_finished_request = 13;
DownloadPieceFailedRequest download_piece_failed_request = 14;
DownloadPieceBackToSourceFailedRequest download_piece_back_to_source_failed_request = 15;
}
}
// EmptyTaskResponse represents empty task response of AnnouncePeerResponse.
message EmptyTaskResponse {
}
// NormalTaskResponse represents normal task response of AnnouncePeerResponse.
message NormalTaskResponse {
// Candidate parents.
repeated common.v2.Peer candidate_parents = 1 [(validate.rules).repeated.min_items = 1];
}
// NeedBackToSourceResponse represents need back-to-source response of AnnouncePeerResponse.
message NeedBackToSourceResponse {
// The description of the back-to-source reason.
optional string description = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// AnnouncePeerResponse represents response of AnnouncePeer.
message AnnouncePeerResponse {
oneof response {
option (validate.required) = true;
EmptyTaskResponse empty_task_response = 1;
NormalTaskResponse normal_task_response = 2;
NeedBackToSourceResponse need_back_to_source_response = 3;
}
}
// StatPeerRequest represents request of StatPeer.
message StatPeerRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
}
// DeletePeerRequest represents request of DeletePeer.
message DeletePeerRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
}
// StatTaskRequest represents request of StatTask.
message StatTaskRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
}
// DeleteTaskRequest represents request of DeleteTask.
message DeleteTaskRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
}
// AnnounceHostRequest represents request of AnnounceHost.
message AnnounceHostRequest {
// Host info.
common.v2.Host host = 1 [(validate.rules).message.required = true];
// The interval between dfdaemon announces to scheduler.
optional google.protobuf.Duration interval = 2;
}
// ListHostsResponse represents response of ListHosts.
message ListHostsResponse {
// Hosts info.
repeated common.v2.Host hosts = 1;
}
// DeleteHostRequest represents request of DeleteHost.
message DeleteHostRequest{
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
}
// RegisterPersistentCachePeerRequest represents persistent cache peer registered request of AnnouncePersistentCachePeerRequest.
message RegisterPersistentCachePeerRequest {
// Persistent represents whether the persistent cache task is persistent.
// If the persistent cache task is persistent, the persistent cache peer will
// not be deleted when dfdaemon runs garbage collection.
bool persistent = 1;
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 2;
// Application of task.
optional string application = 3;
// Task piece length, the value needs to be greater than or equal to 4194304(4MiB)
uint64 piece_length = 4 [(validate.rules).uint64.gte = 4194304];
// File path to be exported.
optional string output_path = 5 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
// Download timeout.
optional google.protobuf.Duration timeout = 6;
}
// DownloadPersistentCachePeerStartedRequest represents persistent cache peer download started request of AnnouncePersistentCachePeerRequest.
message DownloadPersistentCachePeerStartedRequest {
}
// ReschedulePersistentCachePeerRequest represents reschedule request of AnnouncePersistentCachePeerRequest.
message ReschedulePersistentCachePeerRequest {
// Candidate parent ids.
repeated common.v2.PersistentCachePeer candidate_parents = 1;
// The description of the reschedule reason.
optional string description = 2 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// DownloadPersistentCachePeerFinishedRequest represents persistent cache peer download finished request of AnnouncePersistentCachePeerRequest.
message DownloadPersistentCachePeerFinishedRequest {
// Total piece count.
uint32 piece_count = 1;
}
// DownloadPersistentCachePeerFailedRequest represents persistent cache peer download failed request of AnnouncePersistentCachePeerRequest.
message DownloadPersistentCachePeerFailedRequest {
// The description of the download failed.
optional string description = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// AnnouncePersistentCachePeerRequest represents request of AnnouncePersistentCachePeer.
message AnnouncePersistentCachePeerRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
oneof request {
option (validate.required) = true;
RegisterPersistentCachePeerRequest register_persistent_cache_peer_request = 4;
DownloadPersistentCachePeerStartedRequest download_persistent_cache_peer_started_request = 5;
ReschedulePersistentCachePeerRequest reschedule_persistent_cache_peer_request = 6;
DownloadPersistentCachePeerFinishedRequest download_persistent_cache_peer_finished_request = 7;
DownloadPersistentCachePeerFailedRequest download_persistent_cache_peer_failed_request = 8;
DownloadPieceFinishedRequest download_piece_finished_request = 9;
DownloadPieceFailedRequest download_piece_failed_request = 10;
}
}
// EmptyPersistentCacheTaskResponse represents empty persistent cache task response of AnnouncePersistentCachePeerResponse.
message EmptyPersistentCacheTaskResponse {
}
// NormalPersistentCacheTaskResponse represents normal persistent cache task response of AnnouncePersistentCachePeerResponse.
message NormalPersistentCacheTaskResponse {
// Candidate parents.
repeated common.v2.PersistentCachePeer candidate_parents = 1 [(validate.rules).repeated.min_items = 1];
}
// AnnouncePersistentCachePeerResponse represents response of AnnouncePersistentCachePeer.
message AnnouncePersistentCachePeerResponse {
oneof response {
option (validate.required) = true;
EmptyPersistentCacheTaskResponse empty_persistent_cache_task_response = 1;
NormalPersistentCacheTaskResponse normal_persistent_cache_task_response = 2;
}
}
// StatPersistentCachePeerRequest represents request of StatPersistentCachePeer.
message StatPersistentCachePeerRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
}
// DeletePersistentCachePeerRequest represents request of DeletePersistentCachePeer.
message DeletePersistentCachePeerRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
}
// UploadPersistentCacheTaskStartedRequest represents upload persistent cache task started request of UploadPersistentCacheTaskStartedRequest.
message UploadPersistentCacheTaskStartedRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
// Replica count of the persistent cache task.
uint64 persistent_replica_count = 4 [(validate.rules).uint64.gte = 1];
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Task piece length, the value needs to be greater than or equal to 4194304(4MiB)
uint64 piece_length = 7 [(validate.rules).uint64.gte = 4194304];
// Task content length.
uint64 content_length = 8;
// Task piece count.
uint32 piece_count = 9;
// TTL of the persistent cache task.
google.protobuf.Duration ttl = 10 [(validate.rules).duration = {gte:{seconds: 300}, lte:{seconds: 604800}}];
}
// UploadPersistentCacheTaskFinishedRequest represents upload persistent cache task finished request of UploadPersistentCacheTaskFinishedRequest.
message UploadPersistentCacheTaskFinishedRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
}
// UploadPersistentCacheTaskFailedRequest represents upload persistent cache task failed request of UploadPersistentCacheTaskFailedRequest.
message UploadPersistentCacheTaskFailedRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
// The description of the upload failed.
optional string description = 4 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// StatPersistentCacheTaskRequest represents request of StatPersistentCacheTask.
message StatPersistentCacheTaskRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
}
// DeletePersistentCacheTaskRequest represents request of DeletePersistentCacheTask.
message DeletePersistentCacheTaskRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
}
// Scheduler RPC Service.
service Scheduler {
// AnnouncePeer announces peer to scheduler.
rpc AnnouncePeer(stream AnnouncePeerRequest) returns(stream AnnouncePeerResponse);
// Checks information of peer.
rpc StatPeer(StatPeerRequest)returns(common.v2.Peer);
// DeletePeer releases peer in scheduler.
rpc DeletePeer(DeletePeerRequest)returns(google.protobuf.Empty);
// Checks information of task.
rpc StatTask(StatTaskRequest)returns(common.v2.Task);
// DeleteTask releases task in scheduler.
rpc DeleteTask(DeleteTaskRequest)returns(google.protobuf.Empty);
// AnnounceHost announces host to scheduler.
rpc AnnounceHost(AnnounceHostRequest)returns(google.protobuf.Empty);
// ListHosts lists hosts in scheduler.
rpc ListHosts(google.protobuf.Empty)returns(ListHostsResponse);
// DeleteHost releases host in scheduler.
rpc DeleteHost(DeleteHostRequest)returns(google.protobuf.Empty);
// AnnouncePersistentCachePeer announces persistent cache peer to scheduler.
rpc AnnouncePersistentCachePeer(stream AnnouncePersistentCachePeerRequest) returns(stream AnnouncePersistentCachePeerResponse);
// Checks information of persistent cache peer.
rpc StatPersistentCachePeer(StatPersistentCachePeerRequest)returns(common.v2.PersistentCachePeer);
// DeletePersistentCachePeer releases persistent cache peer in scheduler.
rpc DeletePersistentCachePeer(DeletePersistentCachePeerRequest)returns(google.protobuf.Empty);
// UploadPersistentCacheTaskStarted uploads persistent cache task started to scheduler.
rpc UploadPersistentCacheTaskStarted(UploadPersistentCacheTaskStartedRequest)returns(google.protobuf.Empty);
// UploadPersistentCacheTaskFinished uploads persistent cache task finished to scheduler.
rpc UploadPersistentCacheTaskFinished(UploadPersistentCacheTaskFinishedRequest)returns(common.v2.PersistentCacheTask);
// UploadPersistentCacheTaskFailed uploads persistent cache task failed to scheduler.
rpc UploadPersistentCacheTaskFailed(UploadPersistentCacheTaskFailedRequest)returns(google.protobuf.Empty);
// Checks information of persistent cache task.
rpc StatPersistentCacheTask(StatPersistentCacheTaskRequest)returns(common.v2.PersistentCacheTask);
// DeletePersistentCacheTask releases persistent cache task in scheduler.
rpc DeletePersistentCacheTask(DeletePersistentCacheTaskRequest)returns(google.protobuf.Empty);
}

View File

@ -1,742 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.21.6
// source: pkg/apis/scheduler/v2/scheduler.proto
package scheduler
import (
context "context"
v2 "d7y.io/api/v2/pkg/apis/common/v2"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// SchedulerClient is the client API for Scheduler service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type SchedulerClient interface {
// AnnouncePeer announces peer to scheduler.
AnnouncePeer(ctx context.Context, opts ...grpc.CallOption) (Scheduler_AnnouncePeerClient, error)
// Checks information of peer.
StatPeer(ctx context.Context, in *StatPeerRequest, opts ...grpc.CallOption) (*v2.Peer, error)
// DeletePeer releases peer in scheduler.
DeletePeer(ctx context.Context, in *DeletePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Checks information of task.
StatTask(ctx context.Context, in *StatTaskRequest, opts ...grpc.CallOption) (*v2.Task, error)
// DeleteTask releases task in scheduler.
DeleteTask(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// AnnounceHost announces host to scheduler.
AnnounceHost(ctx context.Context, in *AnnounceHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// ListHosts lists hosts in scheduler.
ListHosts(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListHostsResponse, error)
// DeleteHost releases host in scheduler.
DeleteHost(ctx context.Context, in *DeleteHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// AnnouncePersistentCachePeer announces persistent cache peer to scheduler.
AnnouncePersistentCachePeer(ctx context.Context, opts ...grpc.CallOption) (Scheduler_AnnouncePersistentCachePeerClient, error)
// Checks information of persistent cache peer.
StatPersistentCachePeer(ctx context.Context, in *StatPersistentCachePeerRequest, opts ...grpc.CallOption) (*v2.PersistentCachePeer, error)
// DeletePersistentCachePeer releases persistent cache peer in scheduler.
DeletePersistentCachePeer(ctx context.Context, in *DeletePersistentCachePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// UploadPersistentCacheTaskStarted uploads persistent cache task started to scheduler.
UploadPersistentCacheTaskStarted(ctx context.Context, in *UploadPersistentCacheTaskStartedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// UploadPersistentCacheTaskFinished uploads persistent cache task finished to scheduler.
UploadPersistentCacheTaskFinished(ctx context.Context, in *UploadPersistentCacheTaskFinishedRequest, opts ...grpc.CallOption) (*v2.PersistentCacheTask, error)
// UploadPersistentCacheTaskFailed uploads persistent cache task failed to scheduler.
UploadPersistentCacheTaskFailed(ctx context.Context, in *UploadPersistentCacheTaskFailedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Checks information of persistent cache task.
StatPersistentCacheTask(ctx context.Context, in *StatPersistentCacheTaskRequest, opts ...grpc.CallOption) (*v2.PersistentCacheTask, error)
// DeletePersistentCacheTask releases persistent cache task in scheduler.
DeletePersistentCacheTask(ctx context.Context, in *DeletePersistentCacheTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
type schedulerClient struct {
cc grpc.ClientConnInterface
}
func NewSchedulerClient(cc grpc.ClientConnInterface) SchedulerClient {
return &schedulerClient{cc}
}
func (c *schedulerClient) AnnouncePeer(ctx context.Context, opts ...grpc.CallOption) (Scheduler_AnnouncePeerClient, error) {
stream, err := c.cc.NewStream(ctx, &Scheduler_ServiceDesc.Streams[0], "/scheduler.v2.Scheduler/AnnouncePeer", opts...)
if err != nil {
return nil, err
}
x := &schedulerAnnouncePeerClient{stream}
return x, nil
}
type Scheduler_AnnouncePeerClient interface {
Send(*AnnouncePeerRequest) error
Recv() (*AnnouncePeerResponse, error)
grpc.ClientStream
}
type schedulerAnnouncePeerClient struct {
grpc.ClientStream
}
func (x *schedulerAnnouncePeerClient) Send(m *AnnouncePeerRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *schedulerAnnouncePeerClient) Recv() (*AnnouncePeerResponse, error) {
m := new(AnnouncePeerResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *schedulerClient) StatPeer(ctx context.Context, in *StatPeerRequest, opts ...grpc.CallOption) (*v2.Peer, error) {
out := new(v2.Peer)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/StatPeer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) DeletePeer(ctx context.Context, in *DeletePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/DeletePeer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) StatTask(ctx context.Context, in *StatTaskRequest, opts ...grpc.CallOption) (*v2.Task, error) {
out := new(v2.Task)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/StatTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) DeleteTask(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/DeleteTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) AnnounceHost(ctx context.Context, in *AnnounceHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/AnnounceHost", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) ListHosts(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListHostsResponse, error) {
out := new(ListHostsResponse)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/ListHosts", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) DeleteHost(ctx context.Context, in *DeleteHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/DeleteHost", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) AnnouncePersistentCachePeer(ctx context.Context, opts ...grpc.CallOption) (Scheduler_AnnouncePersistentCachePeerClient, error) {
stream, err := c.cc.NewStream(ctx, &Scheduler_ServiceDesc.Streams[1], "/scheduler.v2.Scheduler/AnnouncePersistentCachePeer", opts...)
if err != nil {
return nil, err
}
x := &schedulerAnnouncePersistentCachePeerClient{stream}
return x, nil
}
type Scheduler_AnnouncePersistentCachePeerClient interface {
Send(*AnnouncePersistentCachePeerRequest) error
Recv() (*AnnouncePersistentCachePeerResponse, error)
grpc.ClientStream
}
type schedulerAnnouncePersistentCachePeerClient struct {
grpc.ClientStream
}
func (x *schedulerAnnouncePersistentCachePeerClient) Send(m *AnnouncePersistentCachePeerRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *schedulerAnnouncePersistentCachePeerClient) Recv() (*AnnouncePersistentCachePeerResponse, error) {
m := new(AnnouncePersistentCachePeerResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *schedulerClient) StatPersistentCachePeer(ctx context.Context, in *StatPersistentCachePeerRequest, opts ...grpc.CallOption) (*v2.PersistentCachePeer, error) {
out := new(v2.PersistentCachePeer)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/StatPersistentCachePeer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) DeletePersistentCachePeer(ctx context.Context, in *DeletePersistentCachePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/DeletePersistentCachePeer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) UploadPersistentCacheTaskStarted(ctx context.Context, in *UploadPersistentCacheTaskStartedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/UploadPersistentCacheTaskStarted", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) UploadPersistentCacheTaskFinished(ctx context.Context, in *UploadPersistentCacheTaskFinishedRequest, opts ...grpc.CallOption) (*v2.PersistentCacheTask, error) {
out := new(v2.PersistentCacheTask)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/UploadPersistentCacheTaskFinished", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) UploadPersistentCacheTaskFailed(ctx context.Context, in *UploadPersistentCacheTaskFailedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/UploadPersistentCacheTaskFailed", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) StatPersistentCacheTask(ctx context.Context, in *StatPersistentCacheTaskRequest, opts ...grpc.CallOption) (*v2.PersistentCacheTask, error) {
out := new(v2.PersistentCacheTask)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/StatPersistentCacheTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) DeletePersistentCacheTask(ctx context.Context, in *DeletePersistentCacheTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/DeletePersistentCacheTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// SchedulerServer is the server API for Scheduler service.
// All implementations should embed UnimplementedSchedulerServer
// for forward compatibility
type SchedulerServer interface {
// AnnouncePeer announces peer to scheduler.
AnnouncePeer(Scheduler_AnnouncePeerServer) error
// Checks information of peer.
StatPeer(context.Context, *StatPeerRequest) (*v2.Peer, error)
// DeletePeer releases peer in scheduler.
DeletePeer(context.Context, *DeletePeerRequest) (*emptypb.Empty, error)
// Checks information of task.
StatTask(context.Context, *StatTaskRequest) (*v2.Task, error)
// DeleteTask releases task in scheduler.
DeleteTask(context.Context, *DeleteTaskRequest) (*emptypb.Empty, error)
// AnnounceHost announces host to scheduler.
AnnounceHost(context.Context, *AnnounceHostRequest) (*emptypb.Empty, error)
// ListHosts lists hosts in scheduler.
ListHosts(context.Context, *emptypb.Empty) (*ListHostsResponse, error)
// DeleteHost releases host in scheduler.
DeleteHost(context.Context, *DeleteHostRequest) (*emptypb.Empty, error)
// AnnouncePersistentCachePeer announces persistent cache peer to scheduler.
AnnouncePersistentCachePeer(Scheduler_AnnouncePersistentCachePeerServer) error
// Checks information of persistent cache peer.
StatPersistentCachePeer(context.Context, *StatPersistentCachePeerRequest) (*v2.PersistentCachePeer, error)
// DeletePersistentCachePeer releases persistent cache peer in scheduler.
DeletePersistentCachePeer(context.Context, *DeletePersistentCachePeerRequest) (*emptypb.Empty, error)
// UploadPersistentCacheTaskStarted uploads persistent cache task started to scheduler.
UploadPersistentCacheTaskStarted(context.Context, *UploadPersistentCacheTaskStartedRequest) (*emptypb.Empty, error)
// UploadPersistentCacheTaskFinished uploads persistent cache task finished to scheduler.
UploadPersistentCacheTaskFinished(context.Context, *UploadPersistentCacheTaskFinishedRequest) (*v2.PersistentCacheTask, error)
// UploadPersistentCacheTaskFailed uploads persistent cache task failed to scheduler.
UploadPersistentCacheTaskFailed(context.Context, *UploadPersistentCacheTaskFailedRequest) (*emptypb.Empty, error)
// Checks information of persistent cache task.
StatPersistentCacheTask(context.Context, *StatPersistentCacheTaskRequest) (*v2.PersistentCacheTask, error)
// DeletePersistentCacheTask releases persistent cache task in scheduler.
DeletePersistentCacheTask(context.Context, *DeletePersistentCacheTaskRequest) (*emptypb.Empty, error)
}
// UnimplementedSchedulerServer should be embedded to have forward compatible implementations.
type UnimplementedSchedulerServer struct {
}
func (UnimplementedSchedulerServer) AnnouncePeer(Scheduler_AnnouncePeerServer) error {
return status.Errorf(codes.Unimplemented, "method AnnouncePeer not implemented")
}
func (UnimplementedSchedulerServer) StatPeer(context.Context, *StatPeerRequest) (*v2.Peer, error) {
return nil, status.Errorf(codes.Unimplemented, "method StatPeer not implemented")
}
func (UnimplementedSchedulerServer) DeletePeer(context.Context, *DeletePeerRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeletePeer not implemented")
}
func (UnimplementedSchedulerServer) StatTask(context.Context, *StatTaskRequest) (*v2.Task, error) {
return nil, status.Errorf(codes.Unimplemented, "method StatTask not implemented")
}
func (UnimplementedSchedulerServer) DeleteTask(context.Context, *DeleteTaskRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteTask not implemented")
}
func (UnimplementedSchedulerServer) AnnounceHost(context.Context, *AnnounceHostRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method AnnounceHost not implemented")
}
func (UnimplementedSchedulerServer) ListHosts(context.Context, *emptypb.Empty) (*ListHostsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListHosts not implemented")
}
func (UnimplementedSchedulerServer) DeleteHost(context.Context, *DeleteHostRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteHost not implemented")
}
func (UnimplementedSchedulerServer) AnnouncePersistentCachePeer(Scheduler_AnnouncePersistentCachePeerServer) error {
return status.Errorf(codes.Unimplemented, "method AnnouncePersistentCachePeer not implemented")
}
func (UnimplementedSchedulerServer) StatPersistentCachePeer(context.Context, *StatPersistentCachePeerRequest) (*v2.PersistentCachePeer, error) {
return nil, status.Errorf(codes.Unimplemented, "method StatPersistentCachePeer not implemented")
}
func (UnimplementedSchedulerServer) DeletePersistentCachePeer(context.Context, *DeletePersistentCachePeerRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeletePersistentCachePeer not implemented")
}
func (UnimplementedSchedulerServer) UploadPersistentCacheTaskStarted(context.Context, *UploadPersistentCacheTaskStartedRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method UploadPersistentCacheTaskStarted not implemented")
}
func (UnimplementedSchedulerServer) UploadPersistentCacheTaskFinished(context.Context, *UploadPersistentCacheTaskFinishedRequest) (*v2.PersistentCacheTask, error) {
return nil, status.Errorf(codes.Unimplemented, "method UploadPersistentCacheTaskFinished not implemented")
}
func (UnimplementedSchedulerServer) UploadPersistentCacheTaskFailed(context.Context, *UploadPersistentCacheTaskFailedRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method UploadPersistentCacheTaskFailed not implemented")
}
func (UnimplementedSchedulerServer) StatPersistentCacheTask(context.Context, *StatPersistentCacheTaskRequest) (*v2.PersistentCacheTask, error) {
return nil, status.Errorf(codes.Unimplemented, "method StatPersistentCacheTask not implemented")
}
func (UnimplementedSchedulerServer) DeletePersistentCacheTask(context.Context, *DeletePersistentCacheTaskRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeletePersistentCacheTask not implemented")
}
// UnsafeSchedulerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SchedulerServer will
// result in compilation errors.
type UnsafeSchedulerServer interface {
mustEmbedUnimplementedSchedulerServer()
}
func RegisterSchedulerServer(s grpc.ServiceRegistrar, srv SchedulerServer) {
s.RegisterService(&Scheduler_ServiceDesc, srv)
}
func _Scheduler_AnnouncePeer_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SchedulerServer).AnnouncePeer(&schedulerAnnouncePeerServer{stream})
}
type Scheduler_AnnouncePeerServer interface {
Send(*AnnouncePeerResponse) error
Recv() (*AnnouncePeerRequest, error)
grpc.ServerStream
}
type schedulerAnnouncePeerServer struct {
grpc.ServerStream
}
func (x *schedulerAnnouncePeerServer) Send(m *AnnouncePeerResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *schedulerAnnouncePeerServer) Recv() (*AnnouncePeerRequest, error) {
m := new(AnnouncePeerRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _Scheduler_StatPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatPeerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).StatPeer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/StatPeer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).StatPeer(ctx, req.(*StatPeerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_DeletePeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeletePeerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).DeletePeer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/DeletePeer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).DeletePeer(ctx, req.(*DeletePeerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_StatTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).StatTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/StatTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).StatTask(ctx, req.(*StatTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_DeleteTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).DeleteTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/DeleteTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).DeleteTask(ctx, req.(*DeleteTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_AnnounceHost_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AnnounceHostRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).AnnounceHost(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/AnnounceHost",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).AnnounceHost(ctx, req.(*AnnounceHostRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_ListHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(emptypb.Empty)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).ListHosts(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/ListHosts",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).ListHosts(ctx, req.(*emptypb.Empty))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_DeleteHost_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteHostRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).DeleteHost(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/DeleteHost",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).DeleteHost(ctx, req.(*DeleteHostRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_AnnouncePersistentCachePeer_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SchedulerServer).AnnouncePersistentCachePeer(&schedulerAnnouncePersistentCachePeerServer{stream})
}
type Scheduler_AnnouncePersistentCachePeerServer interface {
Send(*AnnouncePersistentCachePeerResponse) error
Recv() (*AnnouncePersistentCachePeerRequest, error)
grpc.ServerStream
}
type schedulerAnnouncePersistentCachePeerServer struct {
grpc.ServerStream
}
func (x *schedulerAnnouncePersistentCachePeerServer) Send(m *AnnouncePersistentCachePeerResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *schedulerAnnouncePersistentCachePeerServer) Recv() (*AnnouncePersistentCachePeerRequest, error) {
m := new(AnnouncePersistentCachePeerRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _Scheduler_StatPersistentCachePeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatPersistentCachePeerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).StatPersistentCachePeer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/StatPersistentCachePeer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).StatPersistentCachePeer(ctx, req.(*StatPersistentCachePeerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_DeletePersistentCachePeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeletePersistentCachePeerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).DeletePersistentCachePeer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/DeletePersistentCachePeer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).DeletePersistentCachePeer(ctx, req.(*DeletePersistentCachePeerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_UploadPersistentCacheTaskStarted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UploadPersistentCacheTaskStartedRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).UploadPersistentCacheTaskStarted(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/UploadPersistentCacheTaskStarted",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).UploadPersistentCacheTaskStarted(ctx, req.(*UploadPersistentCacheTaskStartedRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_UploadPersistentCacheTaskFinished_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UploadPersistentCacheTaskFinishedRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).UploadPersistentCacheTaskFinished(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/UploadPersistentCacheTaskFinished",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).UploadPersistentCacheTaskFinished(ctx, req.(*UploadPersistentCacheTaskFinishedRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_UploadPersistentCacheTaskFailed_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UploadPersistentCacheTaskFailedRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).UploadPersistentCacheTaskFailed(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/UploadPersistentCacheTaskFailed",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).UploadPersistentCacheTaskFailed(ctx, req.(*UploadPersistentCacheTaskFailedRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_StatPersistentCacheTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatPersistentCacheTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).StatPersistentCacheTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/StatPersistentCacheTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).StatPersistentCacheTask(ctx, req.(*StatPersistentCacheTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_DeletePersistentCacheTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeletePersistentCacheTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).DeletePersistentCacheTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/DeletePersistentCacheTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).DeletePersistentCacheTask(ctx, req.(*DeletePersistentCacheTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
// Scheduler_ServiceDesc is the grpc.ServiceDesc for Scheduler service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Scheduler_ServiceDesc = grpc.ServiceDesc{
ServiceName: "scheduler.v2.Scheduler",
HandlerType: (*SchedulerServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "StatPeer",
Handler: _Scheduler_StatPeer_Handler,
},
{
MethodName: "DeletePeer",
Handler: _Scheduler_DeletePeer_Handler,
},
{
MethodName: "StatTask",
Handler: _Scheduler_StatTask_Handler,
},
{
MethodName: "DeleteTask",
Handler: _Scheduler_DeleteTask_Handler,
},
{
MethodName: "AnnounceHost",
Handler: _Scheduler_AnnounceHost_Handler,
},
{
MethodName: "ListHosts",
Handler: _Scheduler_ListHosts_Handler,
},
{
MethodName: "DeleteHost",
Handler: _Scheduler_DeleteHost_Handler,
},
{
MethodName: "StatPersistentCachePeer",
Handler: _Scheduler_StatPersistentCachePeer_Handler,
},
{
MethodName: "DeletePersistentCachePeer",
Handler: _Scheduler_DeletePersistentCachePeer_Handler,
},
{
MethodName: "UploadPersistentCacheTaskStarted",
Handler: _Scheduler_UploadPersistentCacheTaskStarted_Handler,
},
{
MethodName: "UploadPersistentCacheTaskFinished",
Handler: _Scheduler_UploadPersistentCacheTaskFinished_Handler,
},
{
MethodName: "UploadPersistentCacheTaskFailed",
Handler: _Scheduler_UploadPersistentCacheTaskFailed_Handler,
},
{
MethodName: "StatPersistentCacheTask",
Handler: _Scheduler_StatPersistentCacheTask_Handler,
},
{
MethodName: "DeletePersistentCacheTask",
Handler: _Scheduler_DeletePersistentCacheTask_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "AnnouncePeer",
Handler: _Scheduler_AnnouncePeer_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "AnnouncePersistentCachePeer",
Handler: _Scheduler_AnnouncePersistentCachePeer_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "pkg/apis/scheduler/v2/scheduler.proto",
}

View File

@ -1,513 +0,0 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package common.v2;
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
// SizeScope represents size scope of task.
enum SizeScope {
// size > one piece size.
NORMAL = 0;
// 128 byte < size <= one piece size and be plain type.
SMALL = 1;
// size <= 128 byte and be plain type.
TINY = 2;
// size == 0 byte and be plain type.
EMPTY = 3;
}
// TaskType represents type of task.
enum TaskType {
// STANDARD is standard type of task, it can download from source, remote peer and
// local peer(local cache). When the standard task is never downloaded in the
// P2P cluster, dfdaemon will download the task from the source. When the standard
// task is downloaded in the P2P cluster, dfdaemon will download the task from
// the remote peer or local peer(local cache).
STANDARD = 0;
// PERSISTENT is persistent type of task, it can import file and export file in P2P cluster.
// When the persistent task is imported into the P2P cluster, dfdaemon will store
// the task in the peer's disk and copy multiple replicas to remote peers to
// prevent data loss.
PERSISTENT = 1;
// PERSISTENT_CACHE is persistent cache type of task, it can import file and export file in P2P cluster.
// When the persistent cache task is imported into the P2P cluster, dfdaemon will store
// the task in the peer's disk and copy multiple replicas to remote peers to prevent data loss.
// When the expiration time is reached, task will be deleted in the P2P cluster.
PERSISTENT_CACHE = 2;
}
// TrafficType represents type of traffic.
enum TrafficType {
// BACK_TO_SOURCE is to download traffic from the source.
BACK_TO_SOURCE = 0;
// REMOTE_PEER is to download traffic from the remote peer.
REMOTE_PEER = 1;
// LOCAL_PEER is to download traffic from the local peer.
LOCAL_PEER = 2;
}
// Priority represents priority of application.
enum Priority {
// LEVEL0 has no special meaning for scheduler.
LEVEL0 = 0;
// LEVEL1 represents the download task is forbidden,
// and an error code is returned during the registration.
LEVEL1 = 1;
// LEVEL2 represents when the task is downloaded for the first time,
// allow peers to download from the other peers,
// but not back-to-source. When the task is not downloaded for
// the first time, it is scheduled normally.
LEVEL2 = 2;
// LEVEL3 represents when the task is downloaded for the first time,
// the normal peer is first to download back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL3 = 3;
// LEVEL4 represents when the task is downloaded for the first time,
// the weak peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL4 = 4;
// LEVEL5 represents when the task is downloaded for the first time,
// the strong peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL5 = 5;
// LEVEL6 represents when the task is downloaded for the first time,
// the super peer is first triggered to back-to-source.
// When the task is not downloaded for the first time, it is scheduled normally.
LEVEL6 = 6;
}
// Peer metadata.
message Peer {
// Peer id.
string id = 1;
// Range is url range of request.
optional Range range = 2;
// Peer priority.
Priority priority = 3;
// Pieces of peer.
repeated Piece pieces = 4;
// Peer downloads costs time.
google.protobuf.Duration cost = 5;
// Peer state.
string state = 6;
// Task info.
Task task = 7;
// Host info.
Host host = 8;
// NeedBackToSource needs downloaded from source.
bool need_back_to_source = 9;
// Peer create time.
google.protobuf.Timestamp created_at = 10;
// Peer update time.
google.protobuf.Timestamp updated_at = 11;
}
// PersistentCachePeer metadata.
message PersistentCachePeer {
// Peer id.
string id = 1;
// Persistent represents whether the persistent cache peer is persistent.
// If the persistent cache peer is persistent, the persistent cache peer will
// not be deleted when dfdaemon runs garbage collection. It only be deleted
// when the task is deleted by the user.
bool persistent = 2;
// Peer downloads costs time.
google.protobuf.Duration cost = 3;
// Peer state.
string state = 4;
// Persistent task info.
PersistentCacheTask task = 5;
// Host info.
Host host = 6;
// Peer create time.
google.protobuf.Timestamp created_at = 7;
// Peer update time.
google.protobuf.Timestamp updated_at = 8;
}
// Task metadata.
message Task {
// Task id.
string id = 1;
// Task type.
TaskType type = 2;
// Download url.
string url = 3;
// Verifies task data integrity after download using a digest. Supports CRC32, SHA256, and SHA512 algorithms.
// Format: `<algorithm>:<hash>`, e.g., `crc32:xxx`, `sha256:yyy`, `sha512:zzz`.
// Returns an error if the computed digest mismatches the expected value.
//
// Performance
// Digest calculation increases processing time. Enable only when data integrity verification is critical.
optional string digest = 4;
// URL tag identifies different task for same url.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Filtered query params to generate the task id.
// When filter is ["Signature", "Expires", "ns"], for example:
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
// will generate the same task id.
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
repeated string filtered_query_params = 7;
// Task request headers.
map<string, string> request_header = 8;
// Task content length.
uint64 content_length = 9;
// Task piece count.
uint32 piece_count = 10;
// Task size scope.
SizeScope size_scope = 11;
// Pieces of task.
repeated Piece pieces = 12;
// Task state.
string state = 13;
// Task peer count.
uint32 peer_count = 14;
// Task contains available peer.
bool has_available_peer = 15;
// Task create time.
google.protobuf.Timestamp created_at = 16;
// Task update time.
google.protobuf.Timestamp updated_at = 17;
}
// PersistentCacheTask metadata.
message PersistentCacheTask {
// Task id.
string id = 1;
// Replica count of the persistent cache task. The persistent cache task will
// not be deleted when dfdamon runs garbage collection. It only be deleted
// when the task is deleted by the user.
uint64 persistent_replica_count = 2;
// Current replica count of the persistent cache task. The persistent cache task
// will not be deleted when dfdaemon runs garbage collection. It only be deleted
// when the task is deleted by the user.
uint64 current_persistent_replica_count = 3;
// Current replica count of the cache task. If cache task is not persistent,
// the persistent cache task will be deleted when dfdaemon runs garbage collection.
uint64 current_replica_count = 4;
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Task piece length.
uint64 piece_length = 7;
// Task content length.
uint64 content_length = 8;
// Task piece count.
uint32 piece_count = 9;
// Task state.
string state = 10;
// TTL of the persistent cache task.
google.protobuf.Duration ttl = 11;
// Task create time.
google.protobuf.Timestamp created_at = 12;
// Task update time.
google.protobuf.Timestamp updated_at = 13;
}
// Host metadata.
message Host {
// Host id.
string id = 1;
// Host type.
uint32 type = 2;
// Hostname.
string hostname = 3;
// Host ip.
string ip = 4;
// Port of grpc service.
int32 port = 5;
// Port of download server.
int32 download_port = 6;
// Host OS.
string os = 7;
// Host platform.
string platform = 8;
// Host platform family.
string platform_family = 9;
// Host platform version.
string platform_version = 10;
// Host kernel version.
string kernel_version = 11;
// CPU Stat.
optional CPU cpu = 12;
// Memory Stat.
optional Memory memory = 13;
// Network Stat.
optional Network network = 14;
// Disk Stat.
optional Disk disk = 15;
// Build information.
optional Build build = 16;
// ID of the cluster to which the host belongs.
uint64 scheduler_cluster_id = 17;
// Disable shared data for other peers.
bool disable_shared = 18;
}
// CPU Stat.
message CPU {
// Number of logical cores in the system.
uint32 logical_count = 1;
// Number of physical cores in the system
uint32 physical_count = 2;
// Percent calculates the percentage of cpu used.
double percent = 3;
// Calculates the percentage of cpu used by process.
double process_percent = 4;
// CPUTimes contains the amounts of time the CPU has spent performing different kinds of work.
optional CPUTimes times = 5;
}
// CPUTimes contains the amounts of time the CPU has spent performing different
// kinds of work. Time units are in seconds.
message CPUTimes {
// CPU time of user.
double user = 1;
// CPU time of system.
double system = 2;
// CPU time of idle.
double idle = 3;
// CPU time of nice.
double nice = 4;
// CPU time of iowait.
double iowait = 5;
// CPU time of irq.
double irq = 6;
// CPU time of softirq.
double softirq = 7;
// CPU time of steal.
double steal = 8;
// CPU time of guest.
double guest = 9;
// CPU time of guest nice.
double guest_nice = 10;
}
// Memory Stat.
message Memory {
// Total amount of RAM on this system.
uint64 total = 1;
// RAM available for programs to allocate.
uint64 available = 2;
// RAM used by programs.
uint64 used = 3;
// Percentage of RAM used by programs.
double used_percent = 4;
// Calculates the percentage of memory used by process.
double process_used_percent = 5;
// This is the kernel's notion of free memory.
uint64 free = 6;
}
// Network Stat.
message Network {
// Return count of tcp connections opened and status is ESTABLISHED.
uint32 tcp_connection_count = 1;
// Return count of upload tcp connections opened and status is ESTABLISHED.
uint32 upload_tcp_connection_count = 2;
// Location path(area|country|province|city|...).
optional string location = 3;
// IDC where the peer host is located
optional string idc = 4;
// Download rate is received bytes per second.
uint64 download_rate = 5;
// Download rate is the limit of received bytes per second.
uint64 download_rate_limit = 6;
// Upload rate is transmitted bytes per second.
uint64 upload_rate = 7;
// Upload rate is the limit of transmitted bytes per second.
uint64 upload_rate_limit = 8;
}
// Disk Stat.
message Disk {
// Total amount of disk on the data path of dragonfly.
uint64 total = 1;
// Free amount of disk on the data path of dragonfly.
uint64 free = 2;
// Used amount of disk on the data path of dragonfly.
uint64 used = 3;
// Used percent of disk on the data path of dragonfly directory.
double used_percent = 4;
// Total amount of indoes on the data path of dragonfly directory.
uint64 inodes_total = 5;
// Used amount of indoes on the data path of dragonfly directory.
uint64 inodes_used = 6;
// Free amount of indoes on the data path of dragonfly directory.
uint64 inodes_free = 7;
// Used percent of indoes on the data path of dragonfly directory.
double inodes_used_percent = 8;
// Disk read bandwidth, in bytes per second.
uint64 read_bandwidth = 9;
// Disk write bandwidth, in bytes per second.
uint64 write_bandwidth = 10;
}
// Build information.
message Build {
// Git version.
string git_version = 1;
// Git commit.
optional string git_commit = 2;
// Golang version.
optional string go_version = 3;
// Rust version.
optional string rust_version = 4;
// Build platform.
optional string platform = 5;
}
// Download information.
message Download {
// Download url.
string url = 1;
// Digest of the task digest, for example :xxx or sha256:yyy.
optional string digest = 2;
// Range is url range of request. If protocol is http, range
// will set in request header. If protocol is others, range
// will set in range field.
optional Range range = 3;
// Task type.
TaskType type = 4;
// URL tag identifies different task for same url.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Peer priority.
Priority priority = 7;
// Filtered query params to generate the task id.
// When filter is ["Signature", "Expires", "ns"], for example:
// http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io and http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io
// will generate the same task id.
// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
repeated string filtered_query_params = 8;
// Task request headers.
map<string, string> request_header = 9;
// Task piece length.
optional uint64 piece_length = 10;
// File path to be downloaded. If output_path is set, the downloaded file will be saved to the specified path.
// Dfdaemon will try to create hard link to the output path before starting the download. If hard link creation fails,
// it will copy the file to the output path after the download is completed.
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
optional string output_path = 11;
// Download timeout.
optional google.protobuf.Duration timeout = 12;
// Dfdaemon cannot download the task from the source if disable_back_to_source is true.
bool disable_back_to_source = 13;
// Scheduler needs to schedule the task downloads from the source if need_back_to_source is true.
bool need_back_to_source = 14;
// certificate_chain is the client certs with DER format for the backend client to download back-to-source.
repeated bytes certificate_chain = 15;
// Prefetch pre-downloads all pieces of the task when the download task request is a range request.
bool prefetch = 16;
// Object storage protocol information.
optional ObjectStorage object_storage = 17;
// HDFS protocol information.
optional HDFS hdfs = 18;
// is_prefetch is the flag to indicate whether the request is a prefetch request.
bool is_prefetch = 19;
// need_piece_content is the flag to indicate whether the response needs to return piece content.
bool need_piece_content = 20;
// load_to_cache indicates whether the content downloaded will be stored in the cache storage.
// Cache storage is designed to store downloaded piece content from preheat tasks,
// allowing other peers to access the content from memory instead of disk.
bool load_to_cache = 21;
// force_hard_link is the flag to indicate whether the download file must be hard linked to the output path.
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
bool force_hard_link = 22;
// content_for_calculating_task_id is the content used to calculate the task id.
// If content_for_calculating_task_id is set, use its value to calculate the task ID.
// Otherwise, calculate the task ID based on url, piece_length, tag, application, and filtered_query_params.
optional string content_for_calculating_task_id = 23;
// remote_ip represents the IP address of the client initiating the download request.
// For proxy requests, it is set to the IP address of the request source.
// For dfget requests, it is set to the IP address of the dfget.
optional string remote_ip = 24;
}
// Object Storage related information.
message ObjectStorage {
// Region is the region of the object storage service.
optional string region = 1;
// Endpoint is the endpoint of the object storage service.
optional string endpoint = 2;
// Access key that used to access the object storage service.
optional string access_key_id = 3;
// Access secret that used to access the object storage service.
optional string access_key_secret = 4;
// Session token that used to access s3 storage service.
optional string session_token = 5;
// Local path to credential file for Google Cloud Storage service OAuth2 authentication.
optional string credential_path = 6;
// Predefined ACL that used for the Google Cloud Storage service.
optional string predefined_acl = 7;
}
// HDFS related information.
message HDFS {
// Delegation token for Web HDFS operator.
optional string delegation_token = 1;
}
// Range represents download range.
message Range {
// Start of range.
uint64 start = 1;
// Length of range.
uint64 length = 2;
}
// Piece represents information of piece.
message Piece {
// Piece number.
uint32 number = 1;
// Parent peer id.
optional string parent_id = 2;
// Piece offset.
uint64 offset = 3;
// Piece length.
uint64 length = 4;
// Digest of the piece data, for example blake3:xxx or sha256:yyy.
string digest = 5;
// Piece content.
optional bytes content = 6;
// Traffic type.
optional TrafficType traffic_type = 7;
// Downloading piece costs time.
google.protobuf.Duration cost = 8;
// Piece create time.
google.protobuf.Timestamp created_at = 9;
}

View File

@ -1,409 +0,0 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package dfdaemon.v2;
import "common.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/empty.proto";
// DownloadTaskRequest represents request of DownloadTask.
message DownloadTaskRequest {
// Download information.
common.v2.Download download = 1;
}
// DownloadTaskStartedResponse represents task download started response of DownloadTaskResponse.
message DownloadTaskStartedResponse {
// Task content length.
uint64 content_length = 1;
// Range is url range of request. If protocol is http, range
// is parsed from http header. If other protocol, range comes
// from download range field.
optional common.v2.Range range = 2;
// Task response headers.
map<string, string> response_header = 3;
// Need to download pieces.
repeated common.v2.Piece pieces = 4;
}
// DownloadPieceFinishedResponse represents piece download finished response of DownloadTaskResponse.
message DownloadPieceFinishedResponse {
// Finished piece of task.
common.v2.Piece piece = 1;
}
// DownloadTaskResponse represents response of DownloadTask.
message DownloadTaskResponse {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
oneof response {
DownloadTaskStartedResponse download_task_started_response = 4;
DownloadPieceFinishedResponse download_piece_finished_response = 5;
}
}
// SyncPiecesRequest represents request of SyncPieces.
message SyncPiecesRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Interested piece numbers.
repeated uint32 interested_piece_numbers = 3;
}
// SyncPiecesResponse represents response of SyncPieces.
message SyncPiecesResponse {
// Exist piece number.
uint32 number = 1;
// Piece offset.
uint64 offset = 2;
// Piece length.
uint64 length = 3;
}
// DownloadPieceRequest represents request of DownloadPiece.
message DownloadPieceRequest{
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Piece number.
uint32 piece_number = 3;
}
// DownloadPieceResponse represents response of DownloadPieces.
message DownloadPieceResponse {
// Piece information.
common.v2.Piece piece = 1;
// Piece metadata digest, it is used to verify the integrity of the piece metadata.
optional string digest = 2;
}
// StatTaskRequest represents request of StatTask.
message StatTaskRequest {
// Task id.
string task_id = 1;
// Remote IP represents the IP address of the client initiating the stat request.
optional string remote_ip = 2;
}
// ListTaskEntriesRequest represents request of ListTaskEntries.
message ListTaskEntriesRequest {
// Task id.
string task_id = 1;
// URL to be listed the entries.
string url = 2;
// HTTP header to be sent with the request.
map<string, string> request_header = 3;
// List timeout.
optional google.protobuf.Duration timeout = 4;
// certificate_chain is the client certs with DER format for the backend client to list the entries.
repeated bytes certificate_chain = 5;
// Object storage protocol information.
optional common.v2.ObjectStorage object_storage = 6;
// HDFS protocol information.
optional common.v2.HDFS hdfs = 7;
// Remote IP represents the IP address of the client initiating the list request.
optional string remote_ip = 8;
}
// ListTaskEntriesResponse represents response of ListTaskEntries.
message ListTaskEntriesResponse {
// Content length is the content length of the response
uint64 content_length = 1;
// HTTP header to be sent with the request.
map<string, string> response_header = 2;
// Backend HTTP status code.
optional int32 status_code = 3;
/// Entries is the information of the entries in the directory.
repeated Entry entries = 4;
}
// Entry represents an entry in a directory.
message Entry {
// URL of the entry.
string url = 1;
// Size of the entry.
uint64 content_length = 2;
// Is directory or not.
bool is_dir = 3;
}
// DeleteTaskRequest represents request of DeleteTask.
message DeleteTaskRequest {
// Task id.
string task_id = 1;
// Remote IP represents the IP address of the client initiating the delete request.
optional string remote_ip = 2;
}
// DownloadPersistentCacheTaskRequest represents request of DownloadPersistentCacheTask.
message DownloadPersistentCacheTaskRequest {
// Task id.
string task_id = 1;
// Persistent represents whether the persistent cache task is persistent.
// If the persistent cache task is persistent, the persistent cache peer will
// not be deleted when dfdaemon runs garbage collection.
bool persistent = 2;
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 3;
// Application of task.
optional string application = 4;
// File path to be exported. If output_path is set, the exported file will be saved to the specified path.
// Dfdaemon will try to create hard link to the output path before starting the export. If hard link creation fails,
// it will copy the file to the output path after the export is completed.
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
optional string output_path = 5;
// Download timeout.
optional google.protobuf.Duration timeout = 6;
// need_piece_content is the flag to indicate whether the response needs to return piece content.
bool need_piece_content = 7;
// force_hard_link is the flag to indicate whether the exported file must be hard linked to the output path.
// For more details refer to https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.
bool force_hard_link = 8;
// Verifies task data integrity after download using a digest. Supports CRC32, SHA256, and SHA512 algorithms.
// Format: `<algorithm>:<hash>`, e.g., `crc32:xxx`, `sha256:yyy`, `sha512:zzz`.
// Returns an error if the computed digest mismatches the expected value.
//
// Performance
// Digest calculation increases processing time. Enable only when data integrity verification is critical.
optional string digest = 9;
// Remote IP represents the IP address of the client initiating the download request.
optional string remote_ip = 10;
}
// DownloadPersistentCacheTaskStartedResponse represents task download started response of DownloadPersistentCacheTaskResponse.
message DownloadPersistentCacheTaskStartedResponse {
// Task content length.
uint64 content_length = 1;
}
// DownloadPersistentCacheTaskResponse represents response of DownloadPersistentCacheTask.
message DownloadPersistentCacheTaskResponse {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
oneof response {
DownloadPersistentCacheTaskStartedResponse download_persistent_cache_task_started_response = 4;
DownloadPieceFinishedResponse download_piece_finished_response = 5;
}
}
// UploadPersistentCacheTaskRequest represents request of UploadPersistentCacheTask.
message UploadPersistentCacheTaskRequest {
// content_for_calculating_task_id is the content used to calculate the task id.
// If content_for_calculating_task_id is set, use its value to calculate the task ID.
// Otherwise, calculate the task ID based on the file content, tag and application by crc32 algorithm`.
optional string content_for_calculating_task_id = 1;
// Upload file path of persistent cache task.
string path = 2;
// Replica count of the persistent persistent cache task.
uint64 persistent_replica_count = 3;
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 4;
// Application of the persistent cache task.
optional string application = 5;
// Piece length of the persistent cache task, the value needs to be greater than or equal to 4194304(4MiB).
optional uint64 piece_length = 6;
// TTL of the persistent cache task.
google.protobuf.Duration ttl = 7;
// Download timeout.
optional google.protobuf.Duration timeout = 8;
// Remote IP represents the IP address of the client initiating the upload request.
optional string remote_ip = 9;
}
// UpdatePersistentCacheTaskRequest represents request of UpdatePersistentCacheTask.
message UpdatePersistentCacheTaskRequest {
// Task id.
string task_id = 1;
// Persistent represents whether the persistent cache peer is persistent.
// If the persistent cache peer is persistent, the persistent cache peer will
// not be deleted when dfdaemon runs garbage collection. It only be deleted
// when the task is deleted by the user.
bool persistent = 2;
// Remote IP represents the IP address of the client initiating the list request.
optional string remote_ip = 3;
}
// StatPersistentCacheTaskRequest represents request of StatPersistentCacheTask.
message StatPersistentCacheTaskRequest {
// Task id.
string task_id = 1;
// Remote IP represents the IP address of the client initiating the stat request.
optional string remote_ip = 2;
}
// DeletePersistentCacheTaskRequest represents request of DeletePersistentCacheTask.
message DeletePersistentCacheTaskRequest {
// Task id.
string task_id = 1;
// Remote IP represents the IP address of the client initiating the delete request.
optional string remote_ip = 2;
}
// SyncPersistentCachePiecesRequest represents request of SyncPersistentCachePieces.
message SyncPersistentCachePiecesRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Interested piece numbers.
repeated uint32 interested_piece_numbers = 3;
}
// SyncPersistentCachePiecesResponse represents response of SyncPersistentCachePieces.
message SyncPersistentCachePiecesResponse {
// Exist piece number.
uint32 number = 1;
// Piece offset.
uint64 offset = 2;
// Piece length.
uint64 length = 3;
}
// DownloadPersistentCachePieceRequest represents request of DownloadPersistentCachePiece.
message DownloadPersistentCachePieceRequest{
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Piece number.
uint32 piece_number = 3;
}
// DownloadPersistentCachePieceResponse represents response of DownloadPersistentCachePieces.
message DownloadPersistentCachePieceResponse {
// Piece information.
common.v2.Piece piece = 1;
// Piece metadata digest, it is used to verify the integrity of the piece metadata.
optional string digest = 2;
}
// SyncHostRequest represents request of SyncHost.
message SyncHostRequest {
// Host id.
string host_id = 1;
// Peer id.
string peer_id = 2;
}
// IBVerbsQueuePairEndpoint represents queue pair endpoint of IBVerbs.
message IBVerbsQueuePairEndpoint {
// Number of the queue pair.
uint32 num = 1;
// Local identifier of the context.
uint32 lid = 2;
// Global identifier of the context.
bytes gid = 3;
}
// ExchangeIBVerbsQueuePairEndpointRequest represents request of ExchangeIBVerbsQueuePairEndpoint.
message ExchangeIBVerbsQueuePairEndpointRequest {
// Information of the source's queue pair endpoint of IBVerbs.
IBVerbsQueuePairEndpoint endpoint = 1;
}
// ExchangeIBVerbsQueuePairEndpointResponse represents response of ExchangeIBVerbsQueuePairEndpoint.
message ExchangeIBVerbsQueuePairEndpointResponse {
// Information of the destination's queue pair endpoint of IBVerbs.
IBVerbsQueuePairEndpoint endpoint = 1;
}
// DfdaemonUpload represents upload service of dfdaemon.
service DfdaemonUpload {
// DownloadTask downloads task from p2p network.
rpc DownloadTask(DownloadTaskRequest) returns(stream DownloadTaskResponse);
// StatTask stats task information.
rpc StatTask(StatTaskRequest) returns(common.v2.Task);
// DeleteTask deletes task from p2p network.
rpc DeleteTask(DeleteTaskRequest) returns(google.protobuf.Empty);
// SyncPieces syncs piece metadatas from remote peer.
rpc SyncPieces(SyncPiecesRequest) returns(stream SyncPiecesResponse);
// DownloadPiece downloads piece from the remote peer.
rpc DownloadPiece(DownloadPieceRequest)returns(DownloadPieceResponse);
// DownloadPersistentCacheTask downloads persistent cache task from p2p network.
rpc DownloadPersistentCacheTask(DownloadPersistentCacheTaskRequest) returns(stream DownloadPersistentCacheTaskResponse);
// UpdatePersistentCacheTask updates metadate of thr persistent cache task in p2p network.
rpc UpdatePersistentCacheTask(UpdatePersistentCacheTaskRequest) returns(google.protobuf.Empty);
// StatPersistentCacheTask stats persistent cache task information.
rpc StatPersistentCacheTask(StatPersistentCacheTaskRequest) returns(common.v2.PersistentCacheTask);
// DeletePersistentCacheTask deletes persistent cache task from p2p network.
rpc DeletePersistentCacheTask(DeletePersistentCacheTaskRequest) returns(google.protobuf.Empty);
// SyncPersistentCachePieces syncs persistent cache pieces from remote peer.
rpc SyncPersistentCachePieces(SyncPersistentCachePiecesRequest) returns(stream SyncPersistentCachePiecesResponse);
// DownloadPersistentCachePiece downloads persistent cache piece from p2p network.
rpc DownloadPersistentCachePiece(DownloadPersistentCachePieceRequest)returns(DownloadPersistentCachePieceResponse);
// SyncHost sync host info from parents.
rpc SyncHost(SyncHostRequest) returns (stream common.v2.Host);
// ExchangeIBVerbsQueuePairEndpoint exchanges queue pair endpoint of IBVerbs with remote peer.
rpc ExchangeIBVerbsQueuePairEndpoint(ExchangeIBVerbsQueuePairEndpointRequest) returns(ExchangeIBVerbsQueuePairEndpointResponse);
}
// DfdaemonDownload represents download service of dfdaemon.
service DfdaemonDownload {
// DownloadTask downloads task from p2p network.
rpc DownloadTask(DownloadTaskRequest) returns(stream DownloadTaskResponse);
// StatTask stats task information.
rpc StatTask(StatTaskRequest) returns(common.v2.Task);
// ListTaskEntries lists task entries for downloading directory.
rpc ListTaskEntries(ListTaskEntriesRequest) returns(ListTaskEntriesResponse);
// DeleteTask deletes task from p2p network.
rpc DeleteTask(DeleteTaskRequest) returns(google.protobuf.Empty);
// DeleteHost releases host in scheduler.
rpc DeleteHost(google.protobuf.Empty)returns(google.protobuf.Empty);
// DownloadPersistentCacheTask downloads persistent cache task from p2p network.
rpc DownloadPersistentCacheTask(DownloadPersistentCacheTaskRequest) returns(stream DownloadPersistentCacheTaskResponse);
// UploadPersistentCacheTask uploads persistent cache task to p2p network.
rpc UploadPersistentCacheTask(UploadPersistentCacheTaskRequest) returns(common.v2.PersistentCacheTask);
// StatPersistentCacheTask stats persistent cache task information.
rpc StatPersistentCacheTask(StatPersistentCacheTaskRequest) returns(common.v2.PersistentCacheTask);
}

View File

@ -1,35 +0,0 @@
/*
* Copyright 2024 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package errordetails.v2;
// Backend is error detail for Backend.
message Backend {
// Backend error message.
string message = 1;
// Backend HTTP response header.
map<string, string> header = 2;
// Backend HTTP status code.
optional int32 status_code = 3;
}
// Unknown is error detail for Unknown.
message Unknown {
// Unknown error message.
optional string message = 1;
}

View File

@ -1,330 +0,0 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package manager.v2;
import "common.proto";
import "google/protobuf/empty.proto";
// Request source type.
enum SourceType {
// Scheduler service.
SCHEDULER_SOURCE = 0;
// Peer service.
PEER_SOURCE = 1;
// SeedPeer service.
SEED_PEER_SOURCE = 2;
}
// SeedPeerCluster represents cluster of seed peer.
message SeedPeerCluster {
// Cluster id.
uint64 id = 1;
// Cluster name.
string name = 2;
// Cluster biography.
string bio = 3;
// Cluster configuration.
bytes config = 4;
}
// SeedPeer represents seed peer for network.
message SeedPeer {
// Seed peer id.
uint64 id = 1;
// Seed peer hostname.
string hostname = 2;
// Seed peer type.
string type = 3;
// Seed peer idc.
optional string idc = 4;
// Seed peer location.
optional string location = 5;
// Seed peer ip.
string ip = 6;
// Seed peer grpc port.
int32 port = 7;
// Seed peer download port.
int32 download_port = 8;
// Seed peer state.
string state = 9;
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 10;
// Cluster to which the seed peer belongs.
SeedPeerCluster seed_peer_cluster = 11;
// Schedulers included in seed peer.
repeated Scheduler schedulers = 12;
}
// GetSeedPeerRequest represents request of GetSeedPeer.
message GetSeedPeerRequest {
// Request source type.
SourceType source_type = 1;
// Seed peer hostname.
string hostname = 2;
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 3;
// Seed peer ip.
string ip = 4;
}
// ListSeedPeersRequest represents request of ListSeedPeers.
message ListSeedPeersRequest {
// Request source type.
SourceType source_type = 1;
// Source service hostname.
string hostname = 2;
// Source service ip.
string ip = 3;
// Dfdaemon version.
string version = 4;
// Dfdaemon commit.
string commit = 5;
}
// ListSeedPeersResponse represents response of ListSeedPeers.
message ListSeedPeersResponse {
// Seed peers to which the source service belongs.
repeated SeedPeer seed_peers = 1;
}
// UpdateSeedPeerRequest represents request of UpdateSeedPeer.
message UpdateSeedPeerRequest {
// Request source type.
SourceType source_type = 1;
// Seed peer hostname.
string hostname = 2;
// Seed peer type.
string type = 3;
// Seed peer idc.
optional string idc = 4;
// Seed peer location.
optional string location = 5;
// Seed peer ip.
string ip = 6;
// Seed peer port.
int32 port = 7;
// Seed peer download port.
int32 download_port = 8;
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 9;
}
// DeleteSeedPeerRequest represents request of DeleteSeedPeer.
message DeleteSeedPeerRequest {
// Request source type.
SourceType source_type = 1;
// Seed peer hostname.
string hostname = 2;
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 3;
// Seed peer ip.
string ip = 4;
}
// SeedPeerCluster represents cluster of scheduler.
message SchedulerCluster {
// Cluster id.
uint64 id = 1;
// Cluster name.
string name = 2;
// Cluster biography.
string bio = 3;
// Cluster config.
bytes config = 4;
// Cluster client config.
bytes client_config = 5;
// Cluster scopes.
bytes scopes = 6;
}
// SeedPeerCluster represents scheduler for network.
message Scheduler {
// Scheduler id.
uint64 id = 1;
// Scheduler hostname.
string hostname = 2;
// Scheduler idc.
optional string idc = 3;
// Scheduler location.
optional string location = 4;
// Scheduler ip.
string ip = 5;
// Scheduler grpc port.
int32 port = 6;
// Scheduler state.
string state = 7;
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 8;
// Cluster to which the scheduler belongs.
SchedulerCluster scheduler_cluster = 9;
// Seed peers to which the scheduler belongs.
repeated SeedPeer seed_peers = 10;
// Feature flags of scheduler.
bytes features = 11;
}
// GetSchedulerRequest represents request of GetScheduler.
message GetSchedulerRequest {
// Request source type.
SourceType source_type = 1;
// Scheduler hostname.
string hostname = 2;
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 3;
// Scheduler ip.
string ip = 4;
}
// UpdateSchedulerRequest represents request of UpdateScheduler.
message UpdateSchedulerRequest {
// Request source type.
SourceType source_type = 1;
// Scheduler hostname.
string hostname = 2;
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 3;
// Scheduler idc.
optional string idc = 4;
// Scheduler location.
optional string location = 5;
// Scheduler ip.
string ip = 6;
// Scheduler port.
int32 port = 7;
// Scheduler features.
repeated string features = 8;
// Scheduler Configuration.
bytes config = 9;
}
// ListSchedulersRequest represents request of ListSchedulers.
message ListSchedulersRequest {
// Request source type.
SourceType source_type = 1;
// Source service hostname.
string hostname = 2;
// Source service ip.
string ip = 3;
// Source idc.
optional string idc = 4;
// Source location.
optional string location = 5;
// Dfdaemon version.
string version = 6;
// Dfdaemon commit.
string commit = 7;
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 8;
}
// ListSchedulersResponse represents response of ListSchedulers.
message ListSchedulersResponse {
// Schedulers to which the source service belongs.
repeated Scheduler schedulers = 1;
}
// URLPriority represents config of url priority.
message URLPriority {
// URL regex.
string regex = 1;
// URL priority value.
common.v2.Priority value = 2;
}
// ApplicationPriority represents config of application priority.
message ApplicationPriority {
// Priority value.
common.v2.Priority value = 1;
// URL priority.
repeated URLPriority urls = 2;
}
// Application represents config of application.
message Application {
// Application id.
uint64 id = 1;
// Application name.
string name = 2;
// Application url.
string url = 3;
// Application biography.
string bio = 4;
// Application priority.
ApplicationPriority priority = 5;
}
// ListApplicationsRequest represents request of ListApplications.
message ListApplicationsRequest {
// Request source type.
SourceType source_type = 1;
// Source service hostname.
string hostname = 2;
// Source service ip.
string ip = 3;
}
// ListApplicationsResponse represents response of ListApplications.
message ListApplicationsResponse {
// Application configs.
repeated Application applications = 1;
}
// KeepAliveRequest represents request of KeepAlive.
message KeepAliveRequest {
// Request source type.
SourceType source_type = 1;
// Source service hostname.
string hostname = 2;
// ID of the cluster to which the source service belongs.
uint64 cluster_id = 3;
// Source service ip.
string ip = 4;
}
// Manager RPC Service.
service Manager {
// Get SeedPeer and SeedPeer cluster configuration.
rpc GetSeedPeer(GetSeedPeerRequest) returns(SeedPeer);
// List acitve schedulers configuration.
rpc ListSeedPeers(ListSeedPeersRequest)returns(ListSeedPeersResponse);
// Update SeedPeer configuration.
rpc UpdateSeedPeer(UpdateSeedPeerRequest) returns(SeedPeer);
// Delete SeedPeer configuration.
rpc DeleteSeedPeer(DeleteSeedPeerRequest) returns(google.protobuf.Empty);
// Get Scheduler and Scheduler cluster configuration.
rpc GetScheduler(GetSchedulerRequest)returns(Scheduler);
// Update scheduler configuration.
rpc UpdateScheduler(UpdateSchedulerRequest) returns(Scheduler);
// List acitve schedulers configuration.
rpc ListSchedulers(ListSchedulersRequest)returns(ListSchedulersResponse);
// List applications configuration.
rpc ListApplications(ListApplicationsRequest)returns(ListApplicationsResponse);
// KeepAlive with manager.
rpc KeepAlive(stream KeepAliveRequest)returns(google.protobuf.Empty);
}

View File

@ -1,430 +0,0 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package scheduler.v2;
import "common.proto";
import "errordetails.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
// RegisterPeerRequest represents peer registered request of AnnouncePeerRequest.
message RegisterPeerRequest {
// Download information.
common.v2.Download download = 1;
}
// DownloadPeerStartedRequest represents peer download started request of AnnouncePeerRequest.
message DownloadPeerStartedRequest {
}
// DownloadPeerBackToSourceStartedRequest represents peer download back-to-source started request of AnnouncePeerRequest.
message DownloadPeerBackToSourceStartedRequest {
// The description of the back-to-source reason.
optional string description = 1;
}
// ReschedulePeerRequest represents reschedule request of AnnouncePeerRequest.
message ReschedulePeerRequest {
// Candidate parent ids.
repeated common.v2.Peer candidate_parents = 1;
// The description of the reschedule reason.
optional string description = 2;
}
// DownloadPeerFinishedRequest represents peer download finished request of AnnouncePeerRequest.
message DownloadPeerFinishedRequest {
// Total content length.
uint64 content_length = 1;
// Total piece count.
uint32 piece_count = 2;
}
// DownloadPeerBackToSourceFinishedRequest represents peer download back-to-source finished request of AnnouncePeerRequest.
message DownloadPeerBackToSourceFinishedRequest {
// Total content length.
uint64 content_length = 1;
// Total piece count.
uint32 piece_count = 2;
}
// DownloadPeerFailedRequest represents peer download failed request of AnnouncePeerRequest.
message DownloadPeerFailedRequest {
// The description of the download failed.
optional string description = 1;
}
// DownloadPeerBackToSourceFailedRequest represents peer download back-to-source failed request of AnnouncePeerRequest.
message DownloadPeerBackToSourceFailedRequest {
// The description of the download back-to-source failed.
optional string description = 1;
}
// DownloadPieceFinishedRequest represents piece download finished request of AnnouncePeerRequest.
message DownloadPieceFinishedRequest {
// Piece info.
common.v2.Piece piece = 1;
}
// DownloadPieceBackToSourceFinishedRequest represents piece download back-to-source finished request of AnnouncePeerRequest.
message DownloadPieceBackToSourceFinishedRequest {
// Piece info.
common.v2.Piece piece = 1;
}
// DownloadPieceFailedRequest downloads piece failed request of AnnouncePeerRequest.
message DownloadPieceFailedRequest {
// Piece number.
optional uint32 piece_number = 1;
// Parent id.
string parent_id = 2;
// Temporary indicates whether the error is temporary.
bool temporary = 3;
}
// DownloadPieceBackToSourceFailedRequest downloads piece back-to-source failed request of AnnouncePeerRequest.
message DownloadPieceBackToSourceFailedRequest {
// Piece number.
optional uint32 piece_number = 1;
oneof response {
errordetails.v2.Backend backend = 2;
errordetails.v2.Unknown unknown = 3;
}
}
// AnnouncePeerRequest represents request of AnnouncePeer.
message AnnouncePeerRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
oneof request {
RegisterPeerRequest register_peer_request = 4;
DownloadPeerStartedRequest download_peer_started_request = 5;
DownloadPeerBackToSourceStartedRequest download_peer_back_to_source_started_request = 6;
ReschedulePeerRequest reschedule_peer_request = 7;
DownloadPeerFinishedRequest download_peer_finished_request = 8;
DownloadPeerBackToSourceFinishedRequest download_peer_back_to_source_finished_request = 9;
DownloadPeerFailedRequest download_peer_failed_request = 10;
DownloadPeerBackToSourceFailedRequest download_peer_back_to_source_failed_request = 11;
DownloadPieceFinishedRequest download_piece_finished_request = 12;
DownloadPieceBackToSourceFinishedRequest download_piece_back_to_source_finished_request = 13;
DownloadPieceFailedRequest download_piece_failed_request = 14;
DownloadPieceBackToSourceFailedRequest download_piece_back_to_source_failed_request = 15;
}
}
// EmptyTaskResponse represents empty task response of AnnouncePeerResponse.
message EmptyTaskResponse {
}
// NormalTaskResponse represents normal task response of AnnouncePeerResponse.
message NormalTaskResponse {
// Candidate parents.
repeated common.v2.Peer candidate_parents = 1;
}
// NeedBackToSourceResponse represents need back-to-source response of AnnouncePeerResponse.
message NeedBackToSourceResponse {
// The description of the back-to-source reason.
optional string description = 1;
}
// AnnouncePeerResponse represents response of AnnouncePeer.
message AnnouncePeerResponse {
oneof response {
EmptyTaskResponse empty_task_response = 1;
NormalTaskResponse normal_task_response = 2;
NeedBackToSourceResponse need_back_to_source_response = 3;
}
}
// StatPeerRequest represents request of StatPeer.
message StatPeerRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
}
// DeletePeerRequest represents request of DeletePeer.
message DeletePeerRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
}
// StatTaskRequest represents request of StatTask.
message StatTaskRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
}
// DeleteTaskRequest represents request of DeleteTask.
message DeleteTaskRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
}
// AnnounceHostRequest represents request of AnnounceHost.
message AnnounceHostRequest {
// Host information.
common.v2.Host host = 1;
// The interval between dfdaemon announces to scheduler.
optional google.protobuf.Duration interval = 2;
}
// ListHostsResponse represents response of ListHosts.
message ListHostsResponse {
// Hosts info.
repeated common.v2.Host hosts = 1;
}
// DeleteHostRequest represents request of DeleteHost.
message DeleteHostRequest{
// Host id.
string host_id = 1;
}
// RegisterPersistentCachePeerRequest represents persistent cache peer registered request of AnnouncePersistentCachePeerRequest.
message RegisterPersistentCachePeerRequest {
// Persistent represents whether the persistent cache task is persistent.
// If the persistent cache task is persistent, the persistent cache peer will
// not be deleted when dfdaemon runs garbage collection.
bool persistent = 1;
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 2;
// Application of task.
optional string application = 3;
// Task piece length, the value needs to be greater than or equal to 4194304(4MiB).
uint64 piece_length = 4;
// File path to be exported.
optional string output_path = 5;
// Download timeout.
optional google.protobuf.Duration timeout = 6;
}
// DownloadPersistentCachePeerStartedRequest represents persistent cache peer download started request of AnnouncePersistentCachePeerRequest.
message DownloadPersistentCachePeerStartedRequest {
}
// ReschedulePersistentCachePeerRequest represents reschedule request of AnnouncePersistentCachePeerRequest.
message ReschedulePersistentCachePeerRequest {
// Candidate parent ids.
repeated common.v2.PersistentCachePeer candidate_parents = 1;
// The description of the reschedule reason.
optional string description = 2;
}
// DownloadPersistentCachePeerFinishedRequest represents persistent cache peer download finished request of AnnouncePersistentCachePeerRequest.
message DownloadPersistentCachePeerFinishedRequest {
// Total piece count.
uint32 piece_count = 1;
}
// DownloadPersistentCachePeerFailedRequest represents persistent cache peer download failed request of AnnouncePersistentCachePeerRequest.
message DownloadPersistentCachePeerFailedRequest {
// The description of the download failed.
optional string description = 1;
}
// AnnouncePersistentCachePeerRequest represents request of AnnouncePersistentCachePeer.
message AnnouncePersistentCachePeerRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
oneof request {
RegisterPersistentCachePeerRequest register_persistent_cache_peer_request = 4;
DownloadPersistentCachePeerStartedRequest download_persistent_cache_peer_started_request = 5;
ReschedulePersistentCachePeerRequest reschedule_persistent_cache_peer_request = 6;
DownloadPersistentCachePeerFinishedRequest download_persistent_cache_peer_finished_request = 7;
DownloadPersistentCachePeerFailedRequest download_persistent_cache_peer_failed_request = 8;
DownloadPieceFinishedRequest download_piece_finished_request = 9;
DownloadPieceFailedRequest download_piece_failed_request = 10;
}
}
// EmptyPersistentCacheTaskResponse represents empty persistent cache task response of AnnouncePersistentCachePeerResponse.
message EmptyPersistentCacheTaskResponse {
}
// NormalPersistentCacheTaskResponse represents normal persistent cache task response of AnnouncePersistentCachePeerResponse.
message NormalPersistentCacheTaskResponse {
// Candidate parents.
repeated common.v2.PersistentCachePeer candidate_cache_parents = 1;
}
// AnnouncePersistentCachePeerResponse represents response of AnnouncePersistentCachePeer.
message AnnouncePersistentCachePeerResponse {
oneof response {
EmptyPersistentCacheTaskResponse empty_persistent_cache_task_response = 1;
NormalPersistentCacheTaskResponse normal_persistent_cache_task_response = 2;
}
}
// StatPersistentCachePeerRequest represents request of StatPersistentCachePeer.
message StatPersistentCachePeerRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
}
// DeletePersistentCachePeerRequest represents request of DeletePersistentCachePeer.
message DeletePersistentCachePeerRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
}
// UploadPersistentCacheTaskStartedRequest represents upload persistent cache task started request of UploadPersistentCacheTaskStarted.
message UploadPersistentCacheTaskStartedRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
// Replica count of the persistent cache task.
uint64 persistent_replica_count = 4;
// Tag is used to distinguish different persistent cache tasks.
optional string tag = 5;
// Application of task.
optional string application = 6;
// Task piece length, the value needs to be greater than or equal to 4194304(4MiB).
uint64 piece_length = 7;
// Task content length.
uint64 content_length = 8;
// Task piece count.
uint32 piece_count = 9;
// TTL of the persistent cache task.
google.protobuf.Duration ttl = 10;
}
// UploadPersistentCacheTaskFinishedRequest represents upload persistent cache task finished request of UploadPersistentCacheTaskFinished.
message UploadPersistentCacheTaskFinishedRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
}
// UploadPersistentCacheTaskFailedRequest represents upload persistent cache task failed request of UploadPersistentCacheTaskFailed.
message UploadPersistentCacheTaskFailedRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
// The description of the upload failed.
optional string description = 4;
}
// StatPersistentCacheTaskRequest represents request of StatPersistentCacheTask.
message StatPersistentCacheTaskRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
}
// DeletePersistentCacheTaskRequest represents request of DeletePersistentCacheTask.
message DeletePersistentCacheTaskRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
}
// Scheduler RPC Service.
service Scheduler {
// AnnouncePeer announces peer to scheduler.
rpc AnnouncePeer(stream AnnouncePeerRequest) returns(stream AnnouncePeerResponse);
// Checks information of peer.
rpc StatPeer(StatPeerRequest)returns(common.v2.Peer);
// DeletePeer releases peer in scheduler.
rpc DeletePeer(DeletePeerRequest)returns(google.protobuf.Empty);
// Checks information of task.
rpc StatTask(StatTaskRequest)returns(common.v2.Task);
// DeleteTask releases task in scheduler.
rpc DeleteTask(DeleteTaskRequest)returns(google.protobuf.Empty);
// AnnounceHost announces host to scheduler.
rpc AnnounceHost(AnnounceHostRequest)returns(google.protobuf.Empty);
// ListHosts lists hosts in scheduler.
rpc ListHosts(google.protobuf.Empty)returns(ListHostsResponse);
// DeleteHost releases host in scheduler.
rpc DeleteHost(DeleteHostRequest)returns(google.protobuf.Empty);
// AnnouncePersistentCachePeer announces persistent cache peer to scheduler.
rpc AnnouncePersistentCachePeer(stream AnnouncePersistentCachePeerRequest) returns(stream AnnouncePersistentCachePeerResponse);
// Checks information of persistent cache peer.
rpc StatPersistentCachePeer(StatPersistentCachePeerRequest)returns(common.v2.PersistentCachePeer);
// DeletePersistentCachePeer releases persistent cache peer in scheduler.
rpc DeletePersistentCachePeer(DeletePersistentCachePeerRequest)returns(google.protobuf.Empty);
// UploadPersistentCacheTaskStarted uploads persistent cache task started to scheduler.
rpc UploadPersistentCacheTaskStarted(UploadPersistentCacheTaskStartedRequest)returns(google.protobuf.Empty);
// UploadPersistentCacheTaskFinished uploads persistent cache task finished to scheduler.
rpc UploadPersistentCacheTaskFinished(UploadPersistentCacheTaskFinishedRequest)returns(common.v2.PersistentCacheTask);
// UploadPersistentCacheTaskFailed uploads persistent cache task failed to scheduler.
rpc UploadPersistentCacheTaskFailed(UploadPersistentCacheTaskFailedRequest)returns(google.protobuf.Empty);
// Checks information of persistent cache task.
rpc StatPersistentCacheTask(StatPersistentCacheTaskRequest)returns(common.v2.PersistentCacheTask);
// DeletePersistentCacheTask releases persistent cache task in scheduler.
rpc DeletePersistentCacheTask(DeletePersistentCacheTaskRequest)returns(google.protobuf.Empty);
}

View File

@ -1,779 +0,0 @@
// This file is @generated by prost-build.
/// Peer metadata.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Peer {
/// Peer id.
#[prost(string, tag = "1")]
pub id: ::prost::alloc::string::String,
/// Range is url range of request.
#[prost(message, optional, tag = "2")]
pub range: ::core::option::Option<Range>,
/// Peer priority.
#[prost(enumeration = "Priority", tag = "3")]
pub priority: i32,
/// Pieces of peer.
#[prost(message, repeated, tag = "4")]
pub pieces: ::prost::alloc::vec::Vec<Piece>,
/// Peer downloads costs time.
#[prost(message, optional, tag = "5")]
pub cost: ::core::option::Option<::prost_wkt_types::Duration>,
/// Peer state.
#[prost(string, tag = "6")]
pub state: ::prost::alloc::string::String,
/// Task info.
#[prost(message, optional, tag = "7")]
pub task: ::core::option::Option<Task>,
/// Host info.
#[prost(message, optional, tag = "8")]
pub host: ::core::option::Option<Host>,
/// NeedBackToSource needs downloaded from source.
#[prost(bool, tag = "9")]
pub need_back_to_source: bool,
/// Peer create time.
#[prost(message, optional, tag = "10")]
pub created_at: ::core::option::Option<::prost_wkt_types::Timestamp>,
/// Peer update time.
#[prost(message, optional, tag = "11")]
pub updated_at: ::core::option::Option<::prost_wkt_types::Timestamp>,
}
/// PersistentCachePeer metadata.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PersistentCachePeer {
/// Peer id.
#[prost(string, tag = "1")]
pub id: ::prost::alloc::string::String,
/// Persistent represents whether the persistent cache peer is persistent.
/// If the persistent cache peer is persistent, the persistent cache peer will
/// not be deleted when dfdaemon runs garbage collection. It only be deleted
/// when the task is deleted by the user.
#[prost(bool, tag = "2")]
pub persistent: bool,
/// Peer downloads costs time.
#[prost(message, optional, tag = "3")]
pub cost: ::core::option::Option<::prost_wkt_types::Duration>,
/// Peer state.
#[prost(string, tag = "4")]
pub state: ::prost::alloc::string::String,
/// Persistent task info.
#[prost(message, optional, tag = "5")]
pub task: ::core::option::Option<PersistentCacheTask>,
/// Host info.
#[prost(message, optional, tag = "6")]
pub host: ::core::option::Option<Host>,
/// Peer create time.
#[prost(message, optional, tag = "7")]
pub created_at: ::core::option::Option<::prost_wkt_types::Timestamp>,
/// Peer update time.
#[prost(message, optional, tag = "8")]
pub updated_at: ::core::option::Option<::prost_wkt_types::Timestamp>,
}
/// Task metadata.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Task {
/// Task id.
#[prost(string, tag = "1")]
pub id: ::prost::alloc::string::String,
/// Task type.
#[prost(enumeration = "TaskType", tag = "2")]
pub r#type: i32,
/// Download url.
#[prost(string, tag = "3")]
pub url: ::prost::alloc::string::String,
/// Verifies task data integrity after download using a digest. Supports CRC32, SHA256, and SHA512 algorithms.
/// Format: `<algorithm>:<hash>`, e.g., `crc32:xxx`, `sha256:yyy`, `sha512:zzz`.
/// Returns an error if the computed digest mismatches the expected value.
///
/// Performance
/// Digest calculation increases processing time. Enable only when data integrity verification is critical.
#[prost(string, optional, tag = "4")]
pub digest: ::core::option::Option<::prost::alloc::string::String>,
/// URL tag identifies different task for same url.
#[prost(string, optional, tag = "5")]
pub tag: ::core::option::Option<::prost::alloc::string::String>,
/// Application of task.
#[prost(string, optional, tag = "6")]
pub application: ::core::option::Option<::prost::alloc::string::String>,
/// Filtered query params to generate the task id.
/// When filter is \["Signature", "Expires", "ns"\], for example:
/// <http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io> and <http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io>
/// will generate the same task id.
/// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
#[prost(string, repeated, tag = "7")]
pub filtered_query_params: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// Task request headers.
#[prost(map = "string, string", tag = "8")]
pub request_header: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost::alloc::string::String,
>,
/// Task content length.
#[prost(uint64, tag = "9")]
pub content_length: u64,
/// Task piece count.
#[prost(uint32, tag = "10")]
pub piece_count: u32,
/// Task size scope.
#[prost(enumeration = "SizeScope", tag = "11")]
pub size_scope: i32,
/// Pieces of task.
#[prost(message, repeated, tag = "12")]
pub pieces: ::prost::alloc::vec::Vec<Piece>,
/// Task state.
#[prost(string, tag = "13")]
pub state: ::prost::alloc::string::String,
/// Task peer count.
#[prost(uint32, tag = "14")]
pub peer_count: u32,
/// Task contains available peer.
#[prost(bool, tag = "15")]
pub has_available_peer: bool,
/// Task create time.
#[prost(message, optional, tag = "16")]
pub created_at: ::core::option::Option<::prost_wkt_types::Timestamp>,
/// Task update time.
#[prost(message, optional, tag = "17")]
pub updated_at: ::core::option::Option<::prost_wkt_types::Timestamp>,
}
/// PersistentCacheTask metadata.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PersistentCacheTask {
/// Task id.
#[prost(string, tag = "1")]
pub id: ::prost::alloc::string::String,
/// Replica count of the persistent cache task. The persistent cache task will
/// not be deleted when dfdamon runs garbage collection. It only be deleted
/// when the task is deleted by the user.
#[prost(uint64, tag = "2")]
pub persistent_replica_count: u64,
/// Current replica count of the persistent cache task. The persistent cache task
/// will not be deleted when dfdaemon runs garbage collection. It only be deleted
/// when the task is deleted by the user.
#[prost(uint64, tag = "3")]
pub current_persistent_replica_count: u64,
/// Current replica count of the cache task. If cache task is not persistent,
/// the persistent cache task will be deleted when dfdaemon runs garbage collection.
#[prost(uint64, tag = "4")]
pub current_replica_count: u64,
/// Tag is used to distinguish different persistent cache tasks.
#[prost(string, optional, tag = "5")]
pub tag: ::core::option::Option<::prost::alloc::string::String>,
/// Application of task.
#[prost(string, optional, tag = "6")]
pub application: ::core::option::Option<::prost::alloc::string::String>,
/// Task piece length.
#[prost(uint64, tag = "7")]
pub piece_length: u64,
/// Task content length.
#[prost(uint64, tag = "8")]
pub content_length: u64,
/// Task piece count.
#[prost(uint32, tag = "9")]
pub piece_count: u32,
/// Task state.
#[prost(string, tag = "10")]
pub state: ::prost::alloc::string::String,
/// TTL of the persistent cache task.
#[prost(message, optional, tag = "11")]
pub ttl: ::core::option::Option<::prost_wkt_types::Duration>,
/// Task create time.
#[prost(message, optional, tag = "12")]
pub created_at: ::core::option::Option<::prost_wkt_types::Timestamp>,
/// Task update time.
#[prost(message, optional, tag = "13")]
pub updated_at: ::core::option::Option<::prost_wkt_types::Timestamp>,
}
/// Host metadata.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Host {
/// Host id.
#[prost(string, tag = "1")]
pub id: ::prost::alloc::string::String,
/// Host type.
#[prost(uint32, tag = "2")]
pub r#type: u32,
/// Hostname.
#[prost(string, tag = "3")]
pub hostname: ::prost::alloc::string::String,
/// Host ip.
#[prost(string, tag = "4")]
pub ip: ::prost::alloc::string::String,
/// Port of grpc service.
#[prost(int32, tag = "5")]
pub port: i32,
/// Port of download server.
#[prost(int32, tag = "6")]
pub download_port: i32,
/// Host OS.
#[prost(string, tag = "7")]
pub os: ::prost::alloc::string::String,
/// Host platform.
#[prost(string, tag = "8")]
pub platform: ::prost::alloc::string::String,
/// Host platform family.
#[prost(string, tag = "9")]
pub platform_family: ::prost::alloc::string::String,
/// Host platform version.
#[prost(string, tag = "10")]
pub platform_version: ::prost::alloc::string::String,
/// Host kernel version.
#[prost(string, tag = "11")]
pub kernel_version: ::prost::alloc::string::String,
/// CPU Stat.
#[prost(message, optional, tag = "12")]
pub cpu: ::core::option::Option<Cpu>,
/// Memory Stat.
#[prost(message, optional, tag = "13")]
pub memory: ::core::option::Option<Memory>,
/// Network Stat.
#[prost(message, optional, tag = "14")]
pub network: ::core::option::Option<Network>,
/// Disk Stat.
#[prost(message, optional, tag = "15")]
pub disk: ::core::option::Option<Disk>,
/// Build information.
#[prost(message, optional, tag = "16")]
pub build: ::core::option::Option<Build>,
/// ID of the cluster to which the host belongs.
#[prost(uint64, tag = "17")]
pub scheduler_cluster_id: u64,
/// Disable shared data for other peers.
#[prost(bool, tag = "18")]
pub disable_shared: bool,
}
/// CPU Stat.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct Cpu {
/// Number of logical cores in the system.
#[prost(uint32, tag = "1")]
pub logical_count: u32,
/// Number of physical cores in the system
#[prost(uint32, tag = "2")]
pub physical_count: u32,
/// Percent calculates the percentage of cpu used.
#[prost(double, tag = "3")]
pub percent: f64,
/// Calculates the percentage of cpu used by process.
#[prost(double, tag = "4")]
pub process_percent: f64,
/// CPUTimes contains the amounts of time the CPU has spent performing different kinds of work.
#[prost(message, optional, tag = "5")]
pub times: ::core::option::Option<CpuTimes>,
}
/// CPUTimes contains the amounts of time the CPU has spent performing different
/// kinds of work. Time units are in seconds.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct CpuTimes {
/// CPU time of user.
#[prost(double, tag = "1")]
pub user: f64,
/// CPU time of system.
#[prost(double, tag = "2")]
pub system: f64,
/// CPU time of idle.
#[prost(double, tag = "3")]
pub idle: f64,
/// CPU time of nice.
#[prost(double, tag = "4")]
pub nice: f64,
/// CPU time of iowait.
#[prost(double, tag = "5")]
pub iowait: f64,
/// CPU time of irq.
#[prost(double, tag = "6")]
pub irq: f64,
/// CPU time of softirq.
#[prost(double, tag = "7")]
pub softirq: f64,
/// CPU time of steal.
#[prost(double, tag = "8")]
pub steal: f64,
/// CPU time of guest.
#[prost(double, tag = "9")]
pub guest: f64,
/// CPU time of guest nice.
#[prost(double, tag = "10")]
pub guest_nice: f64,
}
/// Memory Stat.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct Memory {
/// Total amount of RAM on this system.
#[prost(uint64, tag = "1")]
pub total: u64,
/// RAM available for programs to allocate.
#[prost(uint64, tag = "2")]
pub available: u64,
/// RAM used by programs.
#[prost(uint64, tag = "3")]
pub used: u64,
/// Percentage of RAM used by programs.
#[prost(double, tag = "4")]
pub used_percent: f64,
/// Calculates the percentage of memory used by process.
#[prost(double, tag = "5")]
pub process_used_percent: f64,
/// This is the kernel's notion of free memory.
#[prost(uint64, tag = "6")]
pub free: u64,
}
/// Network Stat.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Network {
/// Return count of tcp connections opened and status is ESTABLISHED.
#[prost(uint32, tag = "1")]
pub tcp_connection_count: u32,
/// Return count of upload tcp connections opened and status is ESTABLISHED.
#[prost(uint32, tag = "2")]
pub upload_tcp_connection_count: u32,
/// Location path(area|country|province|city|...).
#[prost(string, optional, tag = "3")]
pub location: ::core::option::Option<::prost::alloc::string::String>,
/// IDC where the peer host is located
#[prost(string, optional, tag = "4")]
pub idc: ::core::option::Option<::prost::alloc::string::String>,
/// Download rate is received bytes per second.
#[prost(uint64, tag = "5")]
pub download_rate: u64,
/// Download rate is the limit of received bytes per second.
#[prost(uint64, tag = "6")]
pub download_rate_limit: u64,
/// Upload rate is transmitted bytes per second.
#[prost(uint64, tag = "7")]
pub upload_rate: u64,
/// Upload rate is the limit of transmitted bytes per second.
#[prost(uint64, tag = "8")]
pub upload_rate_limit: u64,
}
/// Disk Stat.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct Disk {
/// Total amount of disk on the data path of dragonfly.
#[prost(uint64, tag = "1")]
pub total: u64,
/// Free amount of disk on the data path of dragonfly.
#[prost(uint64, tag = "2")]
pub free: u64,
/// Used amount of disk on the data path of dragonfly.
#[prost(uint64, tag = "3")]
pub used: u64,
/// Used percent of disk on the data path of dragonfly directory.
#[prost(double, tag = "4")]
pub used_percent: f64,
/// Total amount of indoes on the data path of dragonfly directory.
#[prost(uint64, tag = "5")]
pub inodes_total: u64,
/// Used amount of indoes on the data path of dragonfly directory.
#[prost(uint64, tag = "6")]
pub inodes_used: u64,
/// Free amount of indoes on the data path of dragonfly directory.
#[prost(uint64, tag = "7")]
pub inodes_free: u64,
/// Used percent of indoes on the data path of dragonfly directory.
#[prost(double, tag = "8")]
pub inodes_used_percent: f64,
/// Disk read bandwidth, in bytes per second.
#[prost(uint64, tag = "9")]
pub read_bandwidth: u64,
/// Disk write bandwidth, in bytes per second.
#[prost(uint64, tag = "10")]
pub write_bandwidth: u64,
}
/// Build information.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Build {
/// Git version.
#[prost(string, tag = "1")]
pub git_version: ::prost::alloc::string::String,
/// Git commit.
#[prost(string, optional, tag = "2")]
pub git_commit: ::core::option::Option<::prost::alloc::string::String>,
/// Golang version.
#[prost(string, optional, tag = "3")]
pub go_version: ::core::option::Option<::prost::alloc::string::String>,
/// Rust version.
#[prost(string, optional, tag = "4")]
pub rust_version: ::core::option::Option<::prost::alloc::string::String>,
/// Build platform.
#[prost(string, optional, tag = "5")]
pub platform: ::core::option::Option<::prost::alloc::string::String>,
}
/// Download information.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Download {
/// Download url.
#[prost(string, tag = "1")]
pub url: ::prost::alloc::string::String,
/// Digest of the task digest, for example :xxx or sha256:yyy.
#[prost(string, optional, tag = "2")]
pub digest: ::core::option::Option<::prost::alloc::string::String>,
/// Range is url range of request. If protocol is http, range
/// will set in request header. If protocol is others, range
/// will set in range field.
#[prost(message, optional, tag = "3")]
pub range: ::core::option::Option<Range>,
/// Task type.
#[prost(enumeration = "TaskType", tag = "4")]
pub r#type: i32,
/// URL tag identifies different task for same url.
#[prost(string, optional, tag = "5")]
pub tag: ::core::option::Option<::prost::alloc::string::String>,
/// Application of task.
#[prost(string, optional, tag = "6")]
pub application: ::core::option::Option<::prost::alloc::string::String>,
/// Peer priority.
#[prost(enumeration = "Priority", tag = "7")]
pub priority: i32,
/// Filtered query params to generate the task id.
/// When filter is \["Signature", "Expires", "ns"\], for example:
/// <http://example.com/xyz?Expires=e1&Signature=s1&ns=docker.io> and <http://example.com/xyz?Expires=e2&Signature=s2&ns=docker.io>
/// will generate the same task id.
/// Default value includes the filtered query params of s3, gcs, oss, obs, cos.
#[prost(string, repeated, tag = "8")]
pub filtered_query_params: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// Task request headers.
#[prost(map = "string, string", tag = "9")]
pub request_header: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost::alloc::string::String,
>,
/// Task piece length.
#[prost(uint64, optional, tag = "10")]
pub piece_length: ::core::option::Option<u64>,
/// File path to be downloaded. If output_path is set, the downloaded file will be saved to the specified path.
/// Dfdaemon will try to create hard link to the output path before starting the download. If hard link creation fails,
/// it will copy the file to the output path after the download is completed.
/// For more details refer to <https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.>
#[prost(string, optional, tag = "11")]
pub output_path: ::core::option::Option<::prost::alloc::string::String>,
/// Download timeout.
#[prost(message, optional, tag = "12")]
pub timeout: ::core::option::Option<::prost_wkt_types::Duration>,
/// Dfdaemon cannot download the task from the source if disable_back_to_source is true.
#[prost(bool, tag = "13")]
pub disable_back_to_source: bool,
/// Scheduler needs to schedule the task downloads from the source if need_back_to_source is true.
#[prost(bool, tag = "14")]
pub need_back_to_source: bool,
/// certificate_chain is the client certs with DER format for the backend client to download back-to-source.
#[prost(bytes = "vec", repeated, tag = "15")]
pub certificate_chain: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
/// Prefetch pre-downloads all pieces of the task when the download task request is a range request.
#[prost(bool, tag = "16")]
pub prefetch: bool,
/// Object storage protocol information.
#[prost(message, optional, tag = "17")]
pub object_storage: ::core::option::Option<ObjectStorage>,
/// HDFS protocol information.
#[prost(message, optional, tag = "18")]
pub hdfs: ::core::option::Option<Hdfs>,
/// is_prefetch is the flag to indicate whether the request is a prefetch request.
#[prost(bool, tag = "19")]
pub is_prefetch: bool,
/// need_piece_content is the flag to indicate whether the response needs to return piece content.
#[prost(bool, tag = "20")]
pub need_piece_content: bool,
/// load_to_cache indicates whether the content downloaded will be stored in the cache storage.
/// Cache storage is designed to store downloaded piece content from preheat tasks,
/// allowing other peers to access the content from memory instead of disk.
#[prost(bool, tag = "21")]
pub load_to_cache: bool,
/// force_hard_link is the flag to indicate whether the download file must be hard linked to the output path.
/// For more details refer to <https://github.com/dragonflyoss/design/blob/main/systems-analysis/file-download-workflow-with-hard-link/README.md.>
#[prost(bool, tag = "22")]
pub force_hard_link: bool,
/// content_for_calculating_task_id is the content used to calculate the task id.
/// If content_for_calculating_task_id is set, use its value to calculate the task ID.
/// Otherwise, calculate the task ID based on url, piece_length, tag, application, and filtered_query_params.
#[prost(string, optional, tag = "23")]
pub content_for_calculating_task_id: ::core::option::Option<
::prost::alloc::string::String,
>,
/// remote_ip represents the IP address of the client initiating the download request.
/// For proxy requests, it is set to the IP address of the request source.
/// For dfget requests, it is set to the IP address of the dfget.
#[prost(string, optional, tag = "24")]
pub remote_ip: ::core::option::Option<::prost::alloc::string::String>,
}
/// Object Storage related information.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ObjectStorage {
/// Region is the region of the object storage service.
#[prost(string, optional, tag = "1")]
pub region: ::core::option::Option<::prost::alloc::string::String>,
/// Endpoint is the endpoint of the object storage service.
#[prost(string, optional, tag = "2")]
pub endpoint: ::core::option::Option<::prost::alloc::string::String>,
/// Access key that used to access the object storage service.
#[prost(string, optional, tag = "3")]
pub access_key_id: ::core::option::Option<::prost::alloc::string::String>,
/// Access secret that used to access the object storage service.
#[prost(string, optional, tag = "4")]
pub access_key_secret: ::core::option::Option<::prost::alloc::string::String>,
/// Session token that used to access s3 storage service.
#[prost(string, optional, tag = "5")]
pub session_token: ::core::option::Option<::prost::alloc::string::String>,
/// Local path to credential file for Google Cloud Storage service OAuth2 authentication.
#[prost(string, optional, tag = "6")]
pub credential_path: ::core::option::Option<::prost::alloc::string::String>,
/// Predefined ACL that used for the Google Cloud Storage service.
#[prost(string, optional, tag = "7")]
pub predefined_acl: ::core::option::Option<::prost::alloc::string::String>,
}
/// HDFS related information.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Hdfs {
/// Delegation token for Web HDFS operator.
#[prost(string, optional, tag = "1")]
pub delegation_token: ::core::option::Option<::prost::alloc::string::String>,
}
/// Range represents download range.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct Range {
/// Start of range.
#[prost(uint64, tag = "1")]
pub start: u64,
/// Length of range.
#[prost(uint64, tag = "2")]
pub length: u64,
}
/// Piece represents information of piece.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Piece {
/// Piece number.
#[prost(uint32, tag = "1")]
pub number: u32,
/// Parent peer id.
#[prost(string, optional, tag = "2")]
pub parent_id: ::core::option::Option<::prost::alloc::string::String>,
/// Piece offset.
#[prost(uint64, tag = "3")]
pub offset: u64,
/// Piece length.
#[prost(uint64, tag = "4")]
pub length: u64,
/// Digest of the piece data, for example blake3:xxx or sha256:yyy.
#[prost(string, tag = "5")]
pub digest: ::prost::alloc::string::String,
/// Piece content.
#[prost(bytes = "vec", optional, tag = "6")]
pub content: ::core::option::Option<::prost::alloc::vec::Vec<u8>>,
/// Traffic type.
#[prost(enumeration = "TrafficType", optional, tag = "7")]
pub traffic_type: ::core::option::Option<i32>,
/// Downloading piece costs time.
#[prost(message, optional, tag = "8")]
pub cost: ::core::option::Option<::prost_wkt_types::Duration>,
/// Piece create time.
#[prost(message, optional, tag = "9")]
pub created_at: ::core::option::Option<::prost_wkt_types::Timestamp>,
}
/// SizeScope represents size scope of task.
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum SizeScope {
/// size > one piece size.
Normal = 0,
/// 128 byte < size <= one piece size and be plain type.
Small = 1,
/// size <= 128 byte and be plain type.
Tiny = 2,
/// size == 0 byte and be plain type.
Empty = 3,
}
impl SizeScope {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
SizeScope::Normal => "NORMAL",
SizeScope::Small => "SMALL",
SizeScope::Tiny => "TINY",
SizeScope::Empty => "EMPTY",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"NORMAL" => Some(Self::Normal),
"SMALL" => Some(Self::Small),
"TINY" => Some(Self::Tiny),
"EMPTY" => Some(Self::Empty),
_ => None,
}
}
}
/// TaskType represents type of task.
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum TaskType {
/// STANDARD is standard type of task, it can download from source, remote peer and
/// local peer(local cache). When the standard task is never downloaded in the
/// P2P cluster, dfdaemon will download the task from the source. When the standard
/// task is downloaded in the P2P cluster, dfdaemon will download the task from
/// the remote peer or local peer(local cache).
Standard = 0,
/// PERSISTENT is persistent type of task, it can import file and export file in P2P cluster.
/// When the persistent task is imported into the P2P cluster, dfdaemon will store
/// the task in the peer's disk and copy multiple replicas to remote peers to
/// prevent data loss.
Persistent = 1,
/// PERSISTENT_CACHE is persistent cache type of task, it can import file and export file in P2P cluster.
/// When the persistent cache task is imported into the P2P cluster, dfdaemon will store
/// the task in the peer's disk and copy multiple replicas to remote peers to prevent data loss.
/// When the expiration time is reached, task will be deleted in the P2P cluster.
PersistentCache = 2,
}
impl TaskType {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
TaskType::Standard => "STANDARD",
TaskType::Persistent => "PERSISTENT",
TaskType::PersistentCache => "PERSISTENT_CACHE",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"STANDARD" => Some(Self::Standard),
"PERSISTENT" => Some(Self::Persistent),
"PERSISTENT_CACHE" => Some(Self::PersistentCache),
_ => None,
}
}
}
/// TrafficType represents type of traffic.
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum TrafficType {
/// BACK_TO_SOURCE is to download traffic from the source.
BackToSource = 0,
/// REMOTE_PEER is to download traffic from the remote peer.
RemotePeer = 1,
/// LOCAL_PEER is to download traffic from the local peer.
LocalPeer = 2,
}
impl TrafficType {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
TrafficType::BackToSource => "BACK_TO_SOURCE",
TrafficType::RemotePeer => "REMOTE_PEER",
TrafficType::LocalPeer => "LOCAL_PEER",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"BACK_TO_SOURCE" => Some(Self::BackToSource),
"REMOTE_PEER" => Some(Self::RemotePeer),
"LOCAL_PEER" => Some(Self::LocalPeer),
_ => None,
}
}
}
/// Priority represents priority of application.
#[derive(serde::Serialize, serde::Deserialize)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Priority {
/// LEVEL0 has no special meaning for scheduler.
Level0 = 0,
/// LEVEL1 represents the download task is forbidden,
/// and an error code is returned during the registration.
Level1 = 1,
/// LEVEL2 represents when the task is downloaded for the first time,
/// allow peers to download from the other peers,
/// but not back-to-source. When the task is not downloaded for
/// the first time, it is scheduled normally.
Level2 = 2,
/// LEVEL3 represents when the task is downloaded for the first time,
/// the normal peer is first to download back-to-source.
/// When the task is not downloaded for the first time, it is scheduled normally.
Level3 = 3,
/// LEVEL4 represents when the task is downloaded for the first time,
/// the weak peer is first triggered to back-to-source.
/// When the task is not downloaded for the first time, it is scheduled normally.
Level4 = 4,
/// LEVEL5 represents when the task is downloaded for the first time,
/// the strong peer is first triggered to back-to-source.
/// When the task is not downloaded for the first time, it is scheduled normally.
Level5 = 5,
/// LEVEL6 represents when the task is downloaded for the first time,
/// the super peer is first triggered to back-to-source.
/// When the task is not downloaded for the first time, it is scheduled normally.
Level6 = 6,
}
impl Priority {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Priority::Level0 => "LEVEL0",
Priority::Level1 => "LEVEL1",
Priority::Level2 => "LEVEL2",
Priority::Level3 => "LEVEL3",
Priority::Level4 => "LEVEL4",
Priority::Level5 => "LEVEL5",
Priority::Level6 => "LEVEL6",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"LEVEL0" => Some(Self::Level0),
"LEVEL1" => Some(Self::Level1),
"LEVEL2" => Some(Self::Level2),
"LEVEL3" => Some(Self::Level3),
"LEVEL4" => Some(Self::Level4),
"LEVEL5" => Some(Self::Level5),
"LEVEL6" => Some(Self::Level6),
_ => None,
}
}
}

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +0,0 @@
// This file is @generated by prost-build.
/// Backend is error detail for Backend.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Backend {
/// Backend error message.
#[prost(string, tag = "1")]
pub message: ::prost::alloc::string::String,
/// Backend HTTP response header.
#[prost(map = "string, string", tag = "2")]
pub header: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost::alloc::string::String,
>,
/// Backend HTTP status code.
#[prost(int32, optional, tag = "3")]
pub status_code: ::core::option::Option<i32>,
}
/// Unknown is error detail for Unknown.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Unknown {
/// Unknown error message.
#[prost(string, optional, tag = "1")]
pub message: ::core::option::Option<::prost::alloc::string::String>,
}

View File

@ -1,32 +0,0 @@
#[path = ""]
pub mod common {
#[path = "common.v2.rs"]
pub mod v2;
}
#[path = ""]
pub mod errordetails {
#[path = "errordetails.v2.rs"]
pub mod v2;
}
#[path = ""]
pub mod dfdaemon {
#[path = "dfdaemon.v2.rs"]
pub mod v2;
}
#[path = ""]
pub mod manager {
#[path = "manager.v2.rs"]
pub mod v2;
}
#[path = ""]
pub mod scheduler {
#[path = "scheduler.v2.rs"]
pub mod v2;
}
// FILE_DESCRIPTOR_SET is the serialized FileDescriptorSet of the proto files.
pub const FILE_DESCRIPTOR_SET: &[u8] = include_bytes!("descriptor.bin");

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff