From d79f6405ff79f94a4a0bf97e8324c6914acf71a8 Mon Sep 17 00:00:00 2001 From: Gaius Date: Tue, 2 Aug 2022 17:31:39 +0800 Subject: [PATCH] feat: replace grpc package with https://github.com/dragonflyoss/api (#1515) Signed-off-by: Gaius Co-authored-by: Jim Ma --- .golangci.yml | 1 + Makefile | 6 - client/config/dynconfig.go | 23 +- client/config/dynconfig_test.go | 171 +- client/config/mocks/dynconfig_mock.go | 10 +- client/config/peerhost.go | 13 +- client/daemon/daemon.go | 43 +- client/daemon/daemon_test.go | 21 +- client/daemon/objectstorage/objectstorage.go | 9 +- client/daemon/peer/peertask_conductor.go | 187 +- client/daemon/peer/peertask_dummy.go | 25 +- client/daemon/peer/peertask_file.go | 17 +- client/daemon/peer/peertask_manager.go | 41 +- client/daemon/peer/peertask_manager_mock.go | 12 +- client/daemon/peer/peertask_manager_test.go | 133 +- .../daemon/peer/peertask_piecetask_poller.go | 49 +- .../peer/peertask_piecetask_synchronizer.go | 57 +- .../peertask_piecetask_synchronizer_test.go | 9 +- client/daemon/peer/peertask_reuse.go | 5 +- client/daemon/peer/peertask_reuse_test.go | 67 +- client/daemon/peer/peertask_seed.go | 5 +- client/daemon/peer/peertask_stream.go | 11 +- ...peertask_stream_backsource_partial_test.go | 55 +- client/daemon/peer/piece_downloader.go | 5 +- client/daemon/peer/piece_downloader_test.go | 7 +- client/daemon/peer/piece_manager.go | 35 +- client/daemon/peer/piece_manager_test.go | 9 +- client/daemon/proxy/proxy.go | 15 +- client/daemon/proxy/proxy_manager.go | 7 +- client/daemon/rpcserver/rpcserver.go | 99 +- client/daemon/rpcserver/rpcserver_test.go | 77 +- client/daemon/rpcserver/seeder.go | 33 +- client/daemon/rpcserver/seeder_test.go | 43 +- client/daemon/rpcserver/subscriber.go | 21 +- client/daemon/storage/local_storage.go | 13 +- .../daemon/storage/local_storage_subtask.go | 13 +- client/daemon/storage/local_storage_test.go | 7 +- client/daemon/storage/metadata.go | 13 +- .../storage/mocks/stroage_manager_mock.go | 18 +- client/daemon/storage/storage_manager.go | 11 +- client/daemon/transport/transport.go | 13 +- client/dfcache/dfcache.go | 35 +- client/dfget/dfget.go | 13 +- cmd/dfcache/cmd/root.go | 2 +- cmd/dfget/cmd/root.go | 2 +- cmd/dfstore/cmd/root.go | 2 +- cmd/manager/cmd/root.go | 2 +- cmd/scheduler/cmd/root.go | 2 +- go.mod | 11 +- go.sum | 16 +- hack/protoc.sh | 22 - internal/dferrors/error.go | 10 +- manager/middlewares/error.go | 5 +- manager/rpcserver/rpcserver.go | 83 +- manager/searcher/mocks/searcher_mock.go | 4 +- manager/searcher/searcher.go | 7 +- manager/searcher/searcher_test.go | 5 +- manager/searcher/testdata/main.go | 5 +- manager/searcher/testdata/plugin/searcher.go | 5 +- pkg/idgen/task_id.go | 9 +- pkg/idgen/task_id_test.go | 12 +- pkg/rpc/base/base.pb.go | 1255 -------- pkg/rpc/base/base.pb.validate.go | 681 ----- pkg/rpc/base/base.proto | 185 -- pkg/rpc/base/mocks/base_mock.go | 5 - pkg/rpc/cdnsystem/cdnsystem.pb.go | 591 ---- pkg/rpc/cdnsystem/cdnsystem.pb.validate.go | 237 -- pkg/rpc/cdnsystem/cdnsystem.proto | 63 - pkg/rpc/cdnsystem/client/client.go | 27 +- pkg/rpc/cdnsystem/client/mocks/client_mock.go | 14 +- pkg/rpc/cdnsystem/client/piece_seed_stream.go | 25 +- pkg/rpc/cdnsystem/mocks/cdnsystem_mock.go | 678 ----- pkg/rpc/client.go | 5 +- pkg/rpc/client_util.go | 5 +- pkg/rpc/{base => }/common/common.go | 18 +- pkg/rpc/dfdaemon/client/client.go | 43 +- pkg/rpc/dfdaemon/client/down_result_stream.go | 25 +- pkg/rpc/dfdaemon/client/mocks/client_mock.go | 22 +- pkg/rpc/dfdaemon/client/peer.go | 19 +- pkg/rpc/dfdaemon/dfdaemon.pb.go | 1275 -------- pkg/rpc/dfdaemon/dfdaemon.pb.validate.go | 654 ---- pkg/rpc/dfdaemon/dfdaemon.proto | 132 - pkg/rpc/dfdaemon/mocks/dfdaemon_mock.go | 854 ------ pkg/rpc/dfdaemon/server/mocks/server_mock.go | 20 +- pkg/rpc/dfdaemon/server/server.go | 45 +- pkg/rpc/errordetails/error_details.pb.go | 178 -- .../errordetails/error_details.pb.validate.go | 111 - pkg/rpc/errordetails/error_details.proto | 30 - pkg/rpc/manager/client/client.go | 37 +- pkg/rpc/manager/client/mocks/client_mock.go | 28 +- pkg/rpc/manager/manager.pb.go | 2639 ----------------- pkg/rpc/manager/manager.pb.validate.go | 1990 ------------- pkg/rpc/manager/manager.proto | 321 -- pkg/rpc/manager/mocks/manager_mock.go | 612 ---- pkg/rpc/scheduler/client/client.go | 47 +- pkg/rpc/scheduler/client/mocks/client_mock.go | 20 +- pkg/rpc/scheduler/mocks/scheduler_mock.go | 647 ---- pkg/rpc/scheduler/scheduler.pb.go | 2094 ------------- pkg/rpc/scheduler/scheduler.pb.validate.go | 1384 --------- pkg/rpc/scheduler/scheduler.proto | 257 -- pkg/rpc/server.go | 13 +- pkg/source/source_client.go | 2 +- scheduler/config/dynconfig.go | 7 +- scheduler/config/dynconfig_test.go | 11 +- scheduler/job/job.go | 9 +- scheduler/resource/host.go | 5 +- scheduler/resource/host_manager_test.go | 5 +- scheduler/resource/host_test.go | 31 +- scheduler/resource/peer.go | 9 +- scheduler/resource/peer_manager_test.go | 13 +- scheduler/resource/peer_test.go | 51 +- scheduler/resource/seed_peer.go | 17 +- scheduler/resource/seed_peer_client.go | 7 +- scheduler/resource/seed_peer_client_mock.go | 14 +- scheduler/resource/seed_peer_mock.go | 6 +- scheduler/resource/seed_peer_test.go | 10 +- scheduler/resource/task.go | 29 +- scheduler/resource/task_manager_test.go | 13 +- scheduler/resource/task_test.go | 135 +- scheduler/rpcserver/rpcserver.go | 19 +- scheduler/scheduler.go | 11 +- .../evaluator/evaluator_base_test.go | 23 +- scheduler/scheduler/scheduler.go | 23 +- scheduler/scheduler/scheduler_test.go | 115 +- scheduler/service/service.go | 151 +- scheduler/service/service_test.go | 564 ++-- test/e2e/manager/preheat.go | 5 +- 127 files changed, 1696 insertions(+), 18536 deletions(-) delete mode 100755 hack/protoc.sh delete mode 100644 pkg/rpc/base/base.pb.go delete mode 100644 pkg/rpc/base/base.pb.validate.go delete mode 100644 pkg/rpc/base/base.proto delete mode 100644 pkg/rpc/base/mocks/base_mock.go delete mode 100644 pkg/rpc/cdnsystem/cdnsystem.pb.go delete mode 100644 pkg/rpc/cdnsystem/cdnsystem.pb.validate.go delete mode 100644 pkg/rpc/cdnsystem/cdnsystem.proto delete mode 100644 pkg/rpc/cdnsystem/mocks/cdnsystem_mock.go rename pkg/rpc/{base => }/common/common.go (74%) delete mode 100644 pkg/rpc/dfdaemon/dfdaemon.pb.go delete mode 100644 pkg/rpc/dfdaemon/dfdaemon.pb.validate.go delete mode 100644 pkg/rpc/dfdaemon/dfdaemon.proto delete mode 100644 pkg/rpc/dfdaemon/mocks/dfdaemon_mock.go delete mode 100644 pkg/rpc/errordetails/error_details.pb.go delete mode 100644 pkg/rpc/errordetails/error_details.pb.validate.go delete mode 100644 pkg/rpc/errordetails/error_details.proto delete mode 100644 pkg/rpc/manager/manager.pb.go delete mode 100644 pkg/rpc/manager/manager.pb.validate.go delete mode 100644 pkg/rpc/manager/manager.proto delete mode 100644 pkg/rpc/manager/mocks/manager_mock.go delete mode 100644 pkg/rpc/scheduler/mocks/scheduler_mock.go delete mode 100644 pkg/rpc/scheduler/scheduler.pb.go delete mode 100644 pkg/rpc/scheduler/scheduler.pb.validate.go delete mode 100644 pkg/rpc/scheduler/scheduler.proto diff --git a/.golangci.yml b/.golangci.yml index a532c29b1..e130b2d97 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,6 +9,7 @@ linters-settings: sections: - standard - default + - prefix(d7y.io/api) - prefix(d7y.io/dragonfly/v2) issues: diff --git a/Makefile b/Makefile index c4ad9b14e..77d031271 100644 --- a/Makefile +++ b/Makefile @@ -375,11 +375,6 @@ generate: @go generate ${PKG_LIST} .PHONY: generate -# Generate grpc protos -protoc: - @./hack/protoc.sh -.PHONY: protoc - # Generate swagger files swag: @swag init --parseDependency --parseInternal -g cmd/manager/main.go -o api/manager @@ -441,7 +436,6 @@ help: @echo "make lint run code lint" @echo "make markdownlint run markdown lint" @echo "make generate run go generate" - @echo "make protoc generate grpc protos" @echo "make swag generate swagger api docs" @echo "make changelog generate CHANGELOG.md" @echo "make clean clean" diff --git a/client/config/dynconfig.go b/client/config/dynconfig.go index 0b0c0cc74..89dc0525f 100644 --- a/client/config/dynconfig.go +++ b/client/config/dynconfig.go @@ -26,10 +26,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" internaldynconfig "d7y.io/dragonfly/v2/internal/dynconfig" "d7y.io/dragonfly/v2/manager/searcher" - "d7y.io/dragonfly/v2/pkg/rpc/manager" managerclient "d7y.io/dragonfly/v2/pkg/rpc/manager/client" ) @@ -42,16 +43,16 @@ var ( ) type DynconfigData struct { - Schedulers []*manager.Scheduler - ObjectStorage *manager.ObjectStorage + Schedulers []*managerv1.Scheduler + ObjectStorage *managerv1.ObjectStorage } type Dynconfig interface { // Get the dynamic schedulers config from manager. - GetSchedulers() ([]*manager.Scheduler, error) + GetSchedulers() ([]*managerv1.Scheduler, error) // Get the dynamic object storage config from manager. - GetObjectStorage() (*manager.ObjectStorage, error) + GetObjectStorage() (*managerv1.ObjectStorage, error) // Get the dynamic config from manager. Get() (*DynconfigData, error) @@ -104,7 +105,7 @@ func NewDynconfig(rawManagerClient managerclient.Client, cacheDir string, hostOp }, nil } -func (d *dynconfig) GetSchedulers() ([]*manager.Scheduler, error) { +func (d *dynconfig) GetSchedulers() ([]*managerv1.Scheduler, error) { data, err := d.Get() if err != nil { return nil, err @@ -113,7 +114,7 @@ func (d *dynconfig) GetSchedulers() ([]*manager.Scheduler, error) { return data.Schedulers, nil } -func (d *dynconfig) GetObjectStorage() (*manager.ObjectStorage, error) { +func (d *dynconfig) GetObjectStorage() (*managerv1.ObjectStorage, error) { data, err := d.Get() if err != nil { return nil, err @@ -200,8 +201,8 @@ func newManagerClient(client managerclient.Client, hostOption HostOption) intern } func (mc *managerClient) Get() (any, error) { - listSchedulersResp, err := mc.ListSchedulers(&manager.ListSchedulersRequest{ - SourceType: manager.SourceType_PEER_SOURCE, + listSchedulersResp, err := mc.ListSchedulers(&managerv1.ListSchedulersRequest{ + SourceType: managerv1.SourceType_PEER_SOURCE, HostName: mc.hostOption.Hostname, Ip: mc.hostOption.AdvertiseIP, HostInfo: map[string]string{ @@ -215,8 +216,8 @@ func (mc *managerClient) Get() (any, error) { return nil, err } - getObjectStorageResp, err := mc.GetObjectStorage(&manager.GetObjectStorageRequest{ - SourceType: manager.SourceType_PEER_SOURCE, + getObjectStorageResp, err := mc.GetObjectStorage(&managerv1.GetObjectStorageRequest{ + SourceType: managerv1.SourceType_PEER_SOURCE, HostName: mc.hostOption.Hostname, Ip: mc.hostOption.AdvertiseIP, }) diff --git a/client/config/dynconfig_test.go b/client/config/dynconfig_test.go index 00b46e4c2..e42990e3d 100644 --- a/client/config/dynconfig_test.go +++ b/client/config/dynconfig_test.go @@ -28,7 +28,8 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "d7y.io/dragonfly/v2/pkg/rpc/manager" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + "d7y.io/dragonfly/v2/pkg/rpc/manager/client/mocks" ) @@ -57,8 +58,8 @@ func TestDynconfigNewDynconfig(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), ) }, expect: func(t *testing.T, err error) { @@ -77,8 +78,8 @@ func TestDynconfigNewDynconfig(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), ) }, expect: func(t *testing.T, err error) { @@ -106,7 +107,7 @@ func TestDynconfigNewDynconfig(t *testing.T) { cleanFileCache: func(t *testing.T) {}, mock: func(m *mocks.MockClientMockRecorder) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(nil, errors.New("foo")).Times(1), ) }, @@ -126,7 +127,7 @@ func TestDynconfigNewDynconfig(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(nil, status.Error(codes.NotFound, "")).Times(1), ) }, @@ -185,12 +186,12 @@ func TestDynconfigGet(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - Schedulers: []*manager.Scheduler{ + Schedulers: []*managerv1.Scheduler{ { HostName: "foo", }, }, - ObjectStorage: &manager.ObjectStorage{ + ObjectStorage: &managerv1.ObjectStorage{ Name: "foo", }, }, @@ -202,14 +203,14 @@ func TestDynconfigGet(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ - Schedulers: []*manager.Scheduler{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{ + Schedulers: []*managerv1.Scheduler{ { HostName: data.Schedulers[0].HostName, }, }, }, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{ Name: data.ObjectStorage.Name, }, nil).Times(1), ) @@ -228,12 +229,12 @@ func TestDynconfigGet(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - Schedulers: []*manager.Scheduler{ + Schedulers: []*managerv1.Scheduler{ { HostName: "foo", }, }, - ObjectStorage: &manager.ObjectStorage{ + ObjectStorage: &managerv1.ObjectStorage{ Name: "foo", }, }, @@ -247,16 +248,16 @@ func TestDynconfigGet(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ - Schedulers: []*manager.Scheduler{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{ + Schedulers: []*managerv1.Scheduler{ { HostName: data.Schedulers[0].HostName, }, }, }, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{ Name: data.ObjectStorage.Name, }, nil).Times(1), ) @@ -275,12 +276,12 @@ func TestDynconfigGet(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - Schedulers: []*manager.Scheduler{ + Schedulers: []*managerv1.Scheduler{ { HostName: "foo", }, }, - ObjectStorage: &manager.ObjectStorage{ + ObjectStorage: &managerv1.ObjectStorage{ Name: "foo", }, }, @@ -294,14 +295,14 @@ func TestDynconfigGet(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ - Schedulers: []*manager.Scheduler{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{ + Schedulers: []*managerv1.Scheduler{ { HostName: data.Schedulers[0].HostName, }, }, }, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{ Name: data.ObjectStorage.Name, }, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(nil, errors.New("foo")).Times(1), @@ -321,12 +322,12 @@ func TestDynconfigGet(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - Schedulers: []*manager.Scheduler{ + Schedulers: []*managerv1.Scheduler{ { HostName: "foo", }, }, - ObjectStorage: &manager.ObjectStorage{ + ObjectStorage: &managerv1.ObjectStorage{ Name: "foo", }, }, @@ -340,17 +341,17 @@ func TestDynconfigGet(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ - Schedulers: []*manager.Scheduler{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{ + Schedulers: []*managerv1.Scheduler{ { HostName: data.Schedulers[0].HostName, }, }, }, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{ Name: data.ObjectStorage.Name, }, nil).Times(1), - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(nil, errors.New("foo")).Times(1), ) }, @@ -368,12 +369,12 @@ func TestDynconfigGet(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - Schedulers: []*manager.Scheduler{ + Schedulers: []*managerv1.Scheduler{ { HostName: "foo", }, }, - ObjectStorage: &manager.ObjectStorage{ + ObjectStorage: &managerv1.ObjectStorage{ Name: "foo", }, }, @@ -387,18 +388,18 @@ func TestDynconfigGet(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ - Schedulers: []*manager.Scheduler{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{ + Schedulers: []*managerv1.Scheduler{ { HostName: data.Schedulers[0].HostName, }, }, }, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{ Name: data.ObjectStorage.Name, }, nil).Times(1), - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ - Schedulers: []*manager.Scheduler{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{ + Schedulers: []*managerv1.Scheduler{ { HostName: data.Schedulers[0].HostName, }, @@ -412,7 +413,7 @@ func TestDynconfigGet(t *testing.T) { result, err := dynconfig.Get() assert.NoError(err) assert.EqualValues(result, &DynconfigData{ - Schedulers: []*manager.Scheduler{ + Schedulers: []*managerv1.Scheduler{ { HostName: data.Schedulers[0].HostName, }, @@ -427,8 +428,8 @@ func TestDynconfigGet(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - Schedulers: []*manager.Scheduler(nil), - ObjectStorage: &manager.ObjectStorage{}, + Schedulers: []*managerv1.Scheduler(nil), + ObjectStorage: &managerv1.ObjectStorage{}, }, sleep: func() { time.Sleep(100 * time.Millisecond) @@ -440,10 +441,10 @@ func TestDynconfigGet(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), ) }, expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) { @@ -495,7 +496,7 @@ func TestDynconfigGetSchedulers(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - Schedulers: []*manager.Scheduler{ + Schedulers: []*managerv1.Scheduler{ { HostName: "foo", }, @@ -509,14 +510,14 @@ func TestDynconfigGetSchedulers(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ - Schedulers: []*manager.Scheduler{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{ + Schedulers: []*managerv1.Scheduler{ { HostName: data.Schedulers[0].HostName, }, }, }, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), ) }, expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) { @@ -533,7 +534,7 @@ func TestDynconfigGetSchedulers(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - Schedulers: []*manager.Scheduler{ + Schedulers: []*managerv1.Scheduler{ { HostName: "foo", }, @@ -549,16 +550,16 @@ func TestDynconfigGetSchedulers(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ - Schedulers: []*manager.Scheduler{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{ + Schedulers: []*managerv1.Scheduler{ { HostName: data.Schedulers[0].HostName, }, }, }, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), ) }, expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) { @@ -575,7 +576,7 @@ func TestDynconfigGetSchedulers(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - Schedulers: []*manager.Scheduler{ + Schedulers: []*managerv1.Scheduler{ { HostName: "foo", }, @@ -591,14 +592,14 @@ func TestDynconfigGetSchedulers(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ - Schedulers: []*manager.Scheduler{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{ + Schedulers: []*managerv1.Scheduler{ { HostName: data.Schedulers[0].HostName, }, }, }, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(nil, errors.New("foo")).Times(1), ) }, @@ -616,7 +617,7 @@ func TestDynconfigGetSchedulers(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - Schedulers: []*manager.Scheduler(nil), + Schedulers: []*managerv1.Scheduler(nil), }, sleep: func() { time.Sleep(100 * time.Millisecond) @@ -628,10 +629,10 @@ func TestDynconfigGetSchedulers(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), ) }, expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) { @@ -683,7 +684,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - ObjectStorage: &manager.ObjectStorage{ + ObjectStorage: &managerv1.ObjectStorage{ Name: "foo", }, }, @@ -695,8 +696,8 @@ func TestDynconfigGetObjectStorage(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{ Name: data.ObjectStorage.Name, }, nil).Times(1), ) @@ -715,7 +716,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - ObjectStorage: &manager.ObjectStorage{ + ObjectStorage: &managerv1.ObjectStorage{ Name: "foo", }, }, @@ -729,10 +730,10 @@ func TestDynconfigGetObjectStorage(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{ Name: data.ObjectStorage.Name, }, nil).Times(1), ) @@ -751,7 +752,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - ObjectStorage: &manager.ObjectStorage{ + ObjectStorage: &managerv1.ObjectStorage{ Name: "foo", }, }, @@ -765,11 +766,11 @@ func TestDynconfigGetObjectStorage(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{ Name: data.ObjectStorage.Name, }, nil).Times(1), - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(nil, errors.New("foo")).Times(1), ) }, @@ -787,7 +788,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - ObjectStorage: &manager.ObjectStorage{ + ObjectStorage: &managerv1.ObjectStorage{ Name: "foo", }, }, @@ -801,11 +802,11 @@ func TestDynconfigGetObjectStorage(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{ Name: data.ObjectStorage.Name, }, nil).Times(1), - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(nil, status.Error(codes.NotFound, "")).Times(1), ) }, @@ -813,7 +814,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) { assert := assert.New(t) result, err := dynconfig.GetObjectStorage() assert.NoError(err) - assert.EqualValues(result, (*manager.ObjectStorage)(nil)) + assert.EqualValues(result, (*managerv1.ObjectStorage)(nil)) }, }, { @@ -823,7 +824,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) { Hostname: "foo", }, data: &DynconfigData{ - ObjectStorage: &manager.ObjectStorage{}, + ObjectStorage: &managerv1.ObjectStorage{}, }, sleep: func() { time.Sleep(100 * time.Millisecond) @@ -835,10 +836,10 @@ func TestDynconfigGetObjectStorage(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { gomock.InOrder( - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), - m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), - m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), + m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1), + m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1), ) }, expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) { diff --git a/client/config/mocks/dynconfig_mock.go b/client/config/mocks/dynconfig_mock.go index 37a4c8acb..32130b96c 100644 --- a/client/config/mocks/dynconfig_mock.go +++ b/client/config/mocks/dynconfig_mock.go @@ -7,8 +7,8 @@ package mocks import ( reflect "reflect" + v1 "d7y.io/api/pkg/apis/manager/v1" config "d7y.io/dragonfly/v2/client/config" - manager "d7y.io/dragonfly/v2/pkg/rpc/manager" gomock "github.com/golang/mock/gomock" ) @@ -63,10 +63,10 @@ func (mr *MockDynconfigMockRecorder) Get() *gomock.Call { } // GetObjectStorage mocks base method. -func (m *MockDynconfig) GetObjectStorage() (*manager.ObjectStorage, error) { +func (m *MockDynconfig) GetObjectStorage() (*v1.ObjectStorage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetObjectStorage") - ret0, _ := ret[0].(*manager.ObjectStorage) + ret0, _ := ret[0].(*v1.ObjectStorage) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -78,10 +78,10 @@ func (mr *MockDynconfigMockRecorder) GetObjectStorage() *gomock.Call { } // GetSchedulers mocks base method. -func (m *MockDynconfig) GetSchedulers() ([]*manager.Scheduler, error) { +func (m *MockDynconfig) GetSchedulers() ([]*v1.Scheduler, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetSchedulers") - ret0, _ := ret[0].([]*manager.Scheduler) + ret0, _ := ret[0].([]*v1.Scheduler) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/client/config/peerhost.go b/client/config/peerhost.go index d4cddb5c0..5834e2cef 100644 --- a/client/config/peerhost.go +++ b/client/config/peerhost.go @@ -33,12 +33,13 @@ import ( "gopkg.in/yaml.v3" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/cmd/dependency/base" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/dfnet" netip "d7y.io/dragonfly/v2/pkg/net/ip" - rpcbase "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/unit" ) @@ -156,18 +157,18 @@ func (p *DaemonOption) Validate() error { return nil } -func ConvertPattern(p string, defaultPattern rpcbase.Pattern) rpcbase.Pattern { +func ConvertPattern(p string, defaultPattern commonv1.Pattern) commonv1.Pattern { switch p { case PatternP2P: - return rpcbase.Pattern_P2P + return commonv1.Pattern_P2P case PatternSeedPeer: - return rpcbase.Pattern_SEED_PEER + return commonv1.Pattern_SEED_PEER case PatternSource: - return rpcbase.Pattern_SOURCE + return commonv1.Pattern_SOURCE case "": return defaultPattern } - logger.Warnf("unknown pattern, use default pattern: %s", rpcbase.Pattern_name[int32(defaultPattern)]) + logger.Warnf("unknown pattern, use default pattern: %s", commonv1.Pattern_name[int32(defaultPattern)]) return defaultPattern } diff --git a/client/daemon/daemon.go b/client/daemon/daemon.go index a7deb353f..e75675e23 100644 --- a/client/daemon/daemon.go +++ b/client/daemon/daemon.go @@ -37,6 +37,10 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" + commonv1 "d7y.io/api/pkg/apis/common/v1" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/gc" "d7y.io/dragonfly/v2/client/daemon/metrics" @@ -54,10 +58,7 @@ import ( "d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/reachable" "d7y.io/dragonfly/v2/pkg/rpc" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/manager" managerclient "d7y.io/dragonfly/v2/pkg/rpc/manager/client" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" "d7y.io/dragonfly/v2/pkg/source" ) @@ -68,15 +69,15 @@ type Daemon interface { // ExportTaskManager returns the underlay peer.TaskManager for downloading when embed dragonfly in custom binary ExportTaskManager() peer.TaskManager - // ExportPeerHost returns the underlay scheduler.PeerHost for scheduling - ExportPeerHost() *scheduler.PeerHost + // ExportPeerHost returns the underlay schedulerv1.PeerHost for scheduling + ExportPeerHost() *schedulerv1.PeerHost } type clientDaemon struct { once *sync.Once done chan bool - schedPeerHost *scheduler.PeerHost + schedPeerHost *schedulerv1.PeerHost Option config.DaemonOption @@ -92,7 +93,7 @@ type clientDaemon struct { dynconfig config.Dynconfig dfpath dfpath.Dfpath - schedulers []*manager.Scheduler + schedulers []*managerv1.Scheduler managerClient managerclient.Client schedulerClient schedulerclient.Client } @@ -101,7 +102,7 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) { // update plugin directory source.UpdatePluginDir(d.PluginDir()) - host := &scheduler.PeerHost{ + host := &schedulerv1.PeerHost{ Id: idgen.HostID(opt.Host.Hostname, int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start)), Ip: opt.Host.AdvertiseIP, RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start), @@ -115,10 +116,10 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) { var ( addrs []dfnet.NetAddr - schedulers []*manager.Scheduler + schedulers []*managerv1.Scheduler dynconfig config.Dynconfig managerClient managerclient.Client - defaultPattern = config.ConvertPattern(opt.Download.DefaultPattern, base.Pattern_P2P) + defaultPattern = config.ConvertPattern(opt.Download.DefaultPattern, commonv1.Pattern_P2P) ) if opt.Scheduler.Manager.Enable { @@ -162,7 +163,7 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) { // Storage.Option.DataPath is same with Daemon DataDir opt.Storage.DataPath = d.DataDir() gcCallback := func(request storage.CommonTaskRequest) { - er := sched.LeaveTask(context.Background(), &scheduler.PeerTarget{ + er := sched.LeaveTask(context.Background(), &schedulerv1.PeerTarget{ TaskId: request.TaskID, PeerId: request.PeerID, }) @@ -524,8 +525,8 @@ func (cd *clientDaemon) Serve() error { g.Go(func() error { logger.Info("keepalive to manager") - cd.managerClient.KeepAlive(cd.Option.Scheduler.Manager.SeedPeer.KeepAlive.Interval, &manager.KeepAliveRequest{ - SourceType: manager.SourceType_SEED_PEER_SOURCE, + cd.managerClient.KeepAlive(cd.Option.Scheduler.Manager.SeedPeer.KeepAlive.Interval, &managerv1.KeepAliveRequest{ + SourceType: managerv1.SourceType_SEED_PEER_SOURCE, HostName: cd.Option.Host.Hostname, Ip: cd.Option.Host.AdvertiseIP, ClusterId: uint64(cd.Option.Scheduler.Manager.SeedPeer.ClusterID), @@ -564,7 +565,7 @@ func (cd *clientDaemon) Serve() error { } // serve dynconfig service - if cd.dynconfig != nil { + if cd.Option.Scheduler.Manager.Enable { // dynconfig register client daemon cd.dynconfig.Register(cd) @@ -662,7 +663,7 @@ func (cd *clientDaemon) Stop() { cd.StorageManager.CleanUp() } - if cd.dynconfig != nil { + if cd.Option.Scheduler.Manager.Enable { if err := cd.dynconfig.Stop(); err != nil { logger.Errorf("dynconfig client closed failed %s", err) } @@ -697,7 +698,7 @@ func (cd *clientDaemon) OnNotify(data *config.DynconfigData) { } // getSchedulerIPs gets ips by schedulers. -func getSchedulerIPs(schedulers []*manager.Scheduler) []string { +func getSchedulerIPs(schedulers []*managerv1.Scheduler) []string { ips := []string{} for _, scheduler := range schedulers { ips = append(ips, scheduler.Ip) @@ -706,8 +707,8 @@ func getSchedulerIPs(schedulers []*manager.Scheduler) []string { return ips } -// schedulersToAvailableNetAddrs coverts []*manager.Scheduler to available []dfnet.NetAddr. -func schedulersToAvailableNetAddrs(schedulers []*manager.Scheduler) []dfnet.NetAddr { +// schedulersToAvailableNetAddrs coverts []*managerv1.Scheduler to available []dfnet.NetAddr. +func schedulersToAvailableNetAddrs(schedulers []*managerv1.Scheduler) []dfnet.NetAddr { var schedulerClusterID uint64 netAddrs := make([]dfnet.NetAddr, 0, len(schedulers)) for _, scheduler := range schedulers { @@ -752,8 +753,8 @@ func (cd *clientDaemon) announceSeedPeer() error { objectStoragePort = int32(cd.Option.ObjectStorage.TCPListen.PortRange.Start) } - if _, err := cd.managerClient.UpdateSeedPeer(&manager.UpdateSeedPeerRequest{ - SourceType: manager.SourceType_SEED_PEER_SOURCE, + if _, err := cd.managerClient.UpdateSeedPeer(&managerv1.UpdateSeedPeerRequest{ + SourceType: managerv1.SourceType_SEED_PEER_SOURCE, HostName: cd.Option.Host.Hostname, Type: cd.Option.Scheduler.Manager.SeedPeer.Type, Idc: cd.Option.Host.IDC, @@ -775,6 +776,6 @@ func (cd *clientDaemon) ExportTaskManager() peer.TaskManager { return cd.PeerTaskManager } -func (cd *clientDaemon) ExportPeerHost() *scheduler.PeerHost { +func (cd *clientDaemon) ExportPeerHost() *schedulerv1.PeerHost { return cd.schedPeerHost } diff --git a/client/daemon/daemon_test.go b/client/daemon/daemon_test.go index 96a40294f..01cdcc1cb 100644 --- a/client/daemon/daemon_test.go +++ b/client/daemon/daemon_test.go @@ -22,8 +22,9 @@ import ( "github.com/stretchr/testify/assert" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + "d7y.io/dragonfly/v2/pkg/dfnet" - "d7y.io/dragonfly/v2/pkg/rpc/manager" ) func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) { @@ -35,12 +36,12 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) { tests := []struct { name string - schedulers []*manager.Scheduler + schedulers []*managerv1.Scheduler expect func(t *testing.T, addrs []dfnet.NetAddr) }{ { name: "available ip", - schedulers: []*manager.Scheduler{ + schedulers: []*managerv1.Scheduler{ { Ip: "127.0.0.1", Port: int32(3000), @@ -59,7 +60,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) { }, { name: "available host", - schedulers: []*manager.Scheduler{ + schedulers: []*managerv1.Scheduler{ { Ip: "foo", HostName: "localhost", @@ -80,7 +81,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) { }, { name: "available ip and host", - schedulers: []*manager.Scheduler{ + schedulers: []*managerv1.Scheduler{ { Ip: "foo", HostName: "localhost", @@ -122,7 +123,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) { }, { name: "unreachable", - schedulers: []*manager.Scheduler{ + schedulers: []*managerv1.Scheduler{ { Ip: "foo", HostName: "localhost", @@ -143,7 +144,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) { }, { name: "empty schedulers", - schedulers: []*manager.Scheduler{}, + schedulers: []*managerv1.Scheduler{}, expect: func(t *testing.T, addrs []dfnet.NetAddr) { assert := assert.New(t) assert.EqualValues(addrs, []dfnet.NetAddr{}) @@ -151,7 +152,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) { }, { name: "available ip with different scheduler cluster", - schedulers: []*manager.Scheduler{ + schedulers: []*managerv1.Scheduler{ { Ip: "127.0.0.1", HostName: "foo", @@ -181,7 +182,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) { }, { name: "available host with different scheduler cluster", - schedulers: []*manager.Scheduler{ + schedulers: []*managerv1.Scheduler{ { Ip: "127.0.0.1", HostName: "foo", @@ -211,7 +212,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) { }, { name: "available host and ip with different scheduler cluster", - schedulers: []*manager.Scheduler{ + schedulers: []*managerv1.Scheduler{ { Ip: "foo", HostName: "localhost", diff --git a/client/daemon/objectstorage/objectstorage.go b/client/daemon/objectstorage/objectstorage.go index b12cf8cd5..f36ff2787 100644 --- a/client/daemon/objectstorage/objectstorage.go +++ b/client/daemon/objectstorage/objectstorage.go @@ -40,6 +40,8 @@ import ( ginprometheus "github.com/mcuadros/go-gin-prometheus" "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/storage" @@ -48,7 +50,6 @@ import ( "d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/objectstorage" - "d7y.io/dragonfly/v2/pkg/rpc/base" pkgstrings "d7y.io/dragonfly/v2/pkg/strings" ) @@ -251,7 +252,7 @@ func (o *objectStorage) getObject(ctx *gin.Context) { ) // Initialize filter field. - urlMeta := &base.UrlMeta{Filter: o.config.ObjectStorage.Filter} + urlMeta := &commonv1.UrlMeta{Filter: o.config.ObjectStorage.Filter} if filter != "" { urlMeta.Filter = filter } @@ -387,7 +388,7 @@ func (o *objectStorage) putObject(ctx *gin.Context) { } // Initialize url meta. - urlMeta := &base.UrlMeta{Filter: o.config.ObjectStorage.Filter} + urlMeta := &commonv1.UrlMeta{Filter: o.config.ObjectStorage.Filter} dgst := o.md5FromFileHeader(fileHeader) urlMeta.Digest = dgst.String() if filter != "" { @@ -419,7 +420,7 @@ func (o *objectStorage) putObject(ctx *gin.Context) { if err := o.peerTaskManager.AnnouncePeerTask(ctx, storage.PeerTaskMetadata{ TaskID: taskID, PeerID: peerID, - }, signURL, base.TaskType_DfStore, urlMeta); err != nil { + }, signURL, commonv1.TaskType_DfStore, urlMeta); err != nil { log.Error(err) ctx.JSON(http.StatusInternalServerError, gin.H{"errors": err.Error()}) return diff --git a/client/daemon/peer/peertask_conductor.go b/client/daemon/peer/peertask_conductor.go index de12b816a..cd987f098 100644 --- a/client/daemon/peer/peertask_conductor.go +++ b/client/daemon/peer/peertask_conductor.go @@ -33,6 +33,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + errordetailsv1 "d7y.io/api/pkg/apis/errordetails/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/storage" @@ -41,9 +45,6 @@ import ( logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/errordetails" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" "d7y.io/dragonfly/v2/pkg/source" ) @@ -76,9 +77,9 @@ type peerTaskConductor struct { pieceDownloadCancel context.CancelFunc // host info about current host - host *scheduler.PeerHost + host *schedulerv1.PeerHost // request is the original PeerTaskRequest - request *scheduler.PeerTaskRequest + request *schedulerv1.PeerTaskRequest // needBackSource indicates downloading resource from instead of other peers needBackSource *atomic.Bool @@ -107,15 +108,15 @@ type peerTaskConductor struct { broker *pieceBroker - sizeScope base.SizeScope - singlePiece *scheduler.SinglePiece + sizeScope commonv1.SizeScope + singlePiece *schedulerv1.SinglePiece tinyData *TinyData // peerPacketStream stands schedulerclient.PeerPacketStream from scheduler - peerPacketStream scheduler.Scheduler_ReportPieceResultClient + peerPacketStream schedulerv1.Scheduler_ReportPieceResultClient // peerPacket is the latest available peers from peerPacketCh // Deprecated: remove in future release - peerPacket atomic.Value // *scheduler.PeerPacket + peerPacket atomic.Value // *schedulerv1.PeerPacket legacyPeerCount *atomic.Int64 // peerPacketReady will receive a ready signal for peerPacket ready peerPacketReady chan bool @@ -141,7 +142,7 @@ type peerTaskConductor struct { // failedReason will be set when peer task failed failedReason string // failedReason will be set when peer task failed - failedCode base.Code + failedCode commonv1.Code // readyPieces stands all downloaded pieces readyPieces *Bitmap @@ -171,7 +172,7 @@ type peerTaskConductor struct { func (ptm *peerTaskManager) newPeerTaskConductor( ctx context.Context, - request *scheduler.PeerTaskRequest, + request *schedulerv1.PeerTaskRequest, limit rate.Limit, parent *peerTaskConductor, rg *util.Range, @@ -229,7 +230,7 @@ func (ptm *peerTaskManager) newPeerTaskConductor( requestedPieces: NewBitmap(), failedPieceCh: make(chan int32, config.DefaultPieceChanSize), failedReason: failedReasonNotSet, - failedCode: base.Code_UnknownError, + failedCode: commonv1.Code_UnknownError, contentLength: atomic.NewInt64(-1), totalPiece: atomic.NewInt32(-1), digest: atomic.NewString(""), @@ -265,8 +266,8 @@ func (pt *peerTaskConductor) register() error { var ( needBackSource bool - sizeScope base.SizeScope - singlePiece *scheduler.SinglePiece + sizeScope commonv1.SizeScope + singlePiece *schedulerv1.SinglePiece tinyData *TinyData ) @@ -287,35 +288,35 @@ func (pt *peerTaskConductor) register() error { pt.peerPacketStream = &dummyPeerPacketStream{} pt.Errorf("register peer task failed: %s, peer id: %s, auto back source disabled", err, pt.request.PeerId) pt.span.RecordError(err) - pt.cancel(base.Code_SchedError, err.Error()) + pt.cancel(commonv1.Code_SchedError, err.Error()) return err } needBackSource = true // can not detect source or scheduler error, create a new dummy scheduler client pt.schedulerClient = &dummySchedulerClient{} - result = &scheduler.RegisterResult{TaskId: pt.taskID} + result = &schedulerv1.RegisterResult{TaskId: pt.taskID} pt.Warnf("register peer task failed: %s, peer id: %s, try to back source", err, pt.request.PeerId) } else { - pt.Infof("register task success, SizeScope: %s", base.SizeScope_name[int32(result.SizeScope)]) + pt.Infof("register task success, SizeScope: %s", commonv1.SizeScope_name[int32(result.SizeScope)]) } var header map[string]string if !needBackSource { sizeScope = result.SizeScope switch result.SizeScope { - case base.SizeScope_NORMAL: + case commonv1.SizeScope_NORMAL: pt.span.SetAttributes(config.AttributePeerTaskSizeScope.String("normal")) - case base.SizeScope_SMALL: + case commonv1.SizeScope_SMALL: pt.span.SetAttributes(config.AttributePeerTaskSizeScope.String("small")) - if piece, ok := result.DirectPiece.(*scheduler.RegisterResult_SinglePiece); ok { + if piece, ok := result.DirectPiece.(*schedulerv1.RegisterResult_SinglePiece); ok { singlePiece = piece.SinglePiece } if result.ExtendAttribute != nil { header = result.ExtendAttribute.Header } - case base.SizeScope_TINY: + case commonv1.SizeScope_TINY: pt.span.SetAttributes(config.AttributePeerTaskSizeScope.String("tiny")) - if piece, ok := result.DirectPiece.(*scheduler.RegisterResult_PieceContent); ok { + if piece, ok := result.DirectPiece.(*schedulerv1.RegisterResult_PieceContent); ok { tinyData = &TinyData{ TaskID: result.TaskId, PeerID: pt.request.PeerId, @@ -327,7 +328,7 @@ func (pt *peerTaskConductor) register() error { pt.peerPacketStream = &dummyPeerPacketStream{} pt.span.RecordError(err) pt.Errorf("%s", err) - pt.cancel(base.Code_SchedError, err.Error()) + pt.cancel(commonv1.Code_SchedError, err.Error()) return err } if result.ExtendAttribute != nil { @@ -342,7 +343,7 @@ func (pt *peerTaskConductor) register() error { // when peer register failed, some actions need to do with peerPacketStream pt.peerPacketStream = &dummyPeerPacketStream{} pt.span.RecordError(err) - pt.cancel(base.Code_SchedError, err.Error()) + pt.cancel(commonv1.Code_SchedError, err.Error()) return err } @@ -363,7 +364,7 @@ func (pt *peerTaskConductor) start() error { if pt.seed { pt.peerPacketStream = &dummyPeerPacketStream{} pt.schedulerClient = &dummySchedulerClient{} - pt.sizeScope = base.SizeScope_NORMAL + pt.sizeScope = commonv1.SizeScope_NORMAL pt.needBackSource = atomic.NewBool(true) } else { // register to scheduler @@ -449,7 +450,7 @@ func (pt *peerTaskConductor) UpdateSourceErrorStatus(st *status.Status) { pt.sourceErrorStatus = st } -func (pt *peerTaskConductor) cancel(code base.Code, reason string) { +func (pt *peerTaskConductor) cancel(code commonv1.Code, reason string) { pt.statusOnce.Do(func() { pt.failedCode = code pt.failedReason = reason @@ -463,19 +464,19 @@ func (pt *peerTaskConductor) markBackSource() { // when close peerPacketReady, pullPiecesFromPeers will invoke backSource close(pt.peerPacketReady) // let legacy mode exit - pt.peerPacket.Store(&scheduler.PeerPacket{ + pt.peerPacket.Store(&schedulerv1.PeerPacket{ TaskId: pt.taskID, SrcPid: pt.peerID, ParallelCount: 1, MainPeer: nil, - CandidatePeers: []*scheduler.PeerPacket_DestPeer{ + CandidatePeers: []*schedulerv1.PeerPacket_DestPeer{ { Ip: pt.host.Ip, RpcPort: pt.host.RpcPort, PeerId: pt.peerID, }, }, - Code: base.Code_SchedNeedBackSource, + Code: commonv1.Code_SchedNeedBackSource, }) } @@ -501,9 +502,9 @@ func (pt *peerTaskConductor) backSource() { span.SetAttributes(config.AttributePeerTaskSuccess.Bool(false)) span.RecordError(err) if isBackSourceError(err) { - pt.cancel(base.Code_ClientBackSourceError, err.Error()) + pt.cancel(commonv1.Code_ClientBackSourceError, err.Error()) } else { - pt.cancel(base.Code_ClientError, err.Error()) + pt.cancel(commonv1.Code_ClientError, err.Error()) } span.End() return @@ -521,14 +522,14 @@ func (pt *peerTaskConductor) pullPieces() { return } switch pt.sizeScope { - case base.SizeScope_TINY: + case commonv1.SizeScope_TINY: pt.storeTinyPeerTask() - case base.SizeScope_SMALL: + case commonv1.SizeScope_SMALL: pt.pullSinglePiece() - case base.SizeScope_NORMAL: + case commonv1.SizeScope_NORMAL: pt.pullPiecesWithP2P() default: - pt.cancel(base.Code_ClientError, fmt.Sprintf("unknown size scope: %d", pt.sizeScope)) + pt.cancel(commonv1.Code_ClientError, fmt.Sprintf("unknown size scope: %d", pt.sizeScope)) } } @@ -571,7 +572,7 @@ func (pt *peerTaskConductor) storeTinyPeerTask() { pt.storage = storageDriver if err != nil { pt.Errorf("register tiny data storage failed: %s", err) - pt.cancel(base.Code_ClientError, err.Error()) + pt.cancel(commonv1.Code_ClientError, err.Error()) return } n, err := pt.GetStorage().WritePiece(ctx, @@ -598,19 +599,19 @@ func (pt *peerTaskConductor) storeTinyPeerTask() { }) if err != nil { pt.Errorf("write tiny data storage failed: %s", err) - pt.cancel(base.Code_ClientError, err.Error()) + pt.cancel(commonv1.Code_ClientError, err.Error()) return } if n != contentLength { pt.Errorf("write tiny data storage failed, want: %d, wrote: %d", contentLength, n) - pt.cancel(base.Code_ClientError, err.Error()) + pt.cancel(commonv1.Code_ClientError, err.Error()) return } err = pt.UpdateStorage() if err != nil { pt.Errorf("update tiny data storage failed: %s", err) - pt.cancel(base.Code_ClientError, err.Error()) + pt.cancel(commonv1.Code_ClientError, err.Error()) return } @@ -621,7 +622,7 @@ func (pt *peerTaskConductor) storeTinyPeerTask() { func (pt *peerTaskConductor) receivePeerPacket(pieceRequestCh chan *DownloadPieceRequest) { var ( lastNotReadyPiece int32 = 0 - peerPacket *scheduler.PeerPacket + peerPacket *schedulerv1.PeerPacket err error firstPacketReceived bool ) @@ -669,8 +670,8 @@ loop: } pt.Debugf("receive peerPacket %v", peerPacket) - if peerPacket.Code != base.Code_Success { - if peerPacket.Code == base.Code_SchedNeedBackSource { + if peerPacket.Code != commonv1.Code_Success { + if peerPacket.Code == commonv1.Code_SchedNeedBackSource { pt.markBackSource() pt.Infof("receive back source code") return @@ -737,8 +738,8 @@ loop: } } -// updateSynchronizer will convert peers to synchronizer, if failed, will update failed peers to scheduler.PeerPacket -func (pt *peerTaskConductor) updateSynchronizer(lastNum int32, p *scheduler.PeerPacket) int32 { +// updateSynchronizer will convert peers to synchronizer, if failed, will update failed peers to schedulerv1.PeerPacket +func (pt *peerTaskConductor) updateSynchronizer(lastNum int32, p *schedulerv1.PeerPacket) int32 { desiredPiece, ok := pt.getNextNotReadyPieceNum(lastNum) if !ok { pt.Infof("all pieces is ready, peer task completed, skip to synchronize") @@ -746,7 +747,7 @@ func (pt *peerTaskConductor) updateSynchronizer(lastNum int32, p *scheduler.Peer p.CandidatePeers = nil return desiredPiece } - var peers = []*scheduler.PeerPacket_DestPeer{p.MainPeer} + var peers = []*schedulerv1.PeerPacket_DestPeer{p.MainPeer} peers = append(peers, p.CandidatePeers...) legacyPeers := pt.pieceTaskSyncManager.newMultiPieceTaskSynchronizer(peers, desiredPiece) @@ -765,15 +766,15 @@ func (pt *peerTaskConductor) confirmReceivePeerPacketError(err error) { default: } var ( - failedCode = base.Code_UnknownError + failedCode = commonv1.Code_UnknownError failedReason string ) de, ok := err.(*dferrors.DfError) - if ok && de.Code == base.Code_SchedNeedBackSource { + if ok && de.Code == commonv1.Code_SchedNeedBackSource { pt.markBackSource() pt.Infof("receive back source code") return - } else if ok && de.Code != base.Code_SchedNeedBackSource { + } else if ok && de.Code != commonv1.Code_SchedNeedBackSource { failedCode = de.Code failedReason = de.Message pt.Errorf("receive peer packet failed: %s", pt.failedReason) @@ -784,29 +785,29 @@ func (pt *peerTaskConductor) confirmReceivePeerPacketError(err error) { return } -func (pt *peerTaskConductor) isExitPeerPacketCode(pp *scheduler.PeerPacket) bool { +func (pt *peerTaskConductor) isExitPeerPacketCode(pp *schedulerv1.PeerPacket) bool { switch pp.Code { - case base.Code_ResourceLacked, base.Code_BadRequest, - base.Code_PeerTaskNotFound, base.Code_UnknownError, base.Code_RequestTimeOut: + case commonv1.Code_ResourceLacked, commonv1.Code_BadRequest, + commonv1.Code_PeerTaskNotFound, commonv1.Code_UnknownError, commonv1.Code_RequestTimeOut: // 1xxx pt.failedCode = pp.Code pt.failedReason = fmt.Sprintf("receive exit peer packet with code %d", pp.Code) return true - case base.Code_SchedError, base.Code_SchedTaskStatusError, base.Code_SchedPeerNotFound: + case commonv1.Code_SchedError, commonv1.Code_SchedTaskStatusError, commonv1.Code_SchedPeerNotFound: // 5xxx pt.failedCode = pp.Code pt.failedReason = fmt.Sprintf("receive exit peer packet with code %d", pp.Code) return true - case base.Code_SchedPeerGone: + case commonv1.Code_SchedPeerGone: pt.failedReason = reasonPeerGoneFromScheduler - pt.failedCode = base.Code_SchedPeerGone + pt.failedCode = commonv1.Code_SchedPeerGone return true - case base.Code_CDNTaskRegistryFail: + case commonv1.Code_CDNTaskRegistryFail: // 6xxx pt.failedCode = pp.Code pt.failedReason = fmt.Sprintf("receive exit peer packet with code %d", pp.Code) return true - case base.Code_BackToSourceAborted: + case commonv1.Code_BackToSourceAborted: st := status.Newf(codes.Aborted, "response is not valid") st, err := st.WithDetails(pp.GetSourceError()) if err != nil { @@ -877,7 +878,7 @@ func (pt *peerTaskConductor) pullPiecesFromPeers(pieceRequestCh chan *DownloadPi ) // ensure first peer packet is not nil - peerPacket, ok := pt.peerPacket.Load().(*scheduler.PeerPacket) + peerPacket, ok := pt.peerPacket.Load().(*schedulerv1.PeerPacket) if !ok { pt.Warn("pull pieces canceled") return @@ -912,7 +913,7 @@ loop: // 2, try to get pieces pt.Debugf("try to get pieces, number: %d, limit: %d", num, limit) piecePacket, err := pt.pieceTaskPoller.preparePieceTasks( - &base.PieceTaskRequest{ + &commonv1.PieceTaskRequest{ TaskId: pt.taskID, SrcPid: pt.peerID, StartNum: uint32(num), @@ -953,7 +954,7 @@ loop: } } -func (pt *peerTaskConductor) updateMetadata(piecePacket *base.PiecePacket) { +func (pt *peerTaskConductor) updateMetadata(piecePacket *commonv1.PiecePacket) { // update total piece var metadataChanged bool if piecePacket.TotalPiece > pt.GetTotalPieces() { @@ -1013,17 +1014,17 @@ func (pt *peerTaskConductor) waitFirstPeerPacket() (done bool, backSource bool) if ok { // preparePieceTasksByPeer func already send piece result with error pt.Infof("new peer client ready, scheduler time cost: %dus, peer count: %d", - time.Since(pt.startTime).Microseconds(), len(pt.peerPacket.Load().(*scheduler.PeerPacket).CandidatePeers)) + time.Since(pt.startTime).Microseconds(), len(pt.peerPacket.Load().(*schedulerv1.PeerPacket).CandidatePeers)) return true, false } - // when scheduler says base.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady - pt.Infof("start download from source due to base.Code_SchedNeedBackSource") + // when scheduler says commonv1.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady + pt.Infof("start download from source due to commonv1.Code_SchedNeedBackSource") pt.span.AddEvent("back source due to scheduler says need back source") pt.backSource() return false, true case <-time.After(pt.schedulerOption.ScheduleTimeout.Duration): if pt.schedulerOption.DisableAutoBackSource { - pt.cancel(base.Code_ClientScheduleTimeout, reasonBackSourceDisabled) + pt.cancel(commonv1.Code_ClientScheduleTimeout, reasonBackSourceDisabled) err := fmt.Errorf("%s, auto back source disabled", pt.failedReason) pt.span.RecordError(err) pt.Errorf(err.Error()) @@ -1048,12 +1049,12 @@ func (pt *peerTaskConductor) waitAvailablePeerPacket() (int32, bool) { case _, ok := <-pt.peerPacketReady: if ok { // preparePieceTasksByPeer func already send piece result with error - pt.Infof("new peer client ready, peer count: %d", len(pt.peerPacket.Load().(*scheduler.PeerPacket).CandidatePeers)) + pt.Infof("new peer client ready, peer count: %d", len(pt.peerPacket.Load().(*schedulerv1.PeerPacket).CandidatePeers)) // research from piece 0 return 0, true } - // when scheduler says base.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady - pt.Infof("start download from source due to base.Code_SchedNeedBackSource") + // when scheduler says commonv1.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady + pt.Infof("start download from source due to commonv1.Code_SchedNeedBackSource") pt.span.AddEvent("back source due to scheduler says need back source ") // TODO optimize back source when already downloaded some pieces pt.backSource() @@ -1062,7 +1063,7 @@ func (pt *peerTaskConductor) waitAvailablePeerPacket() (int32, bool) { } // Deprecated -func (pt *peerTaskConductor) dispatchPieceRequest(pieceRequestCh chan *DownloadPieceRequest, piecePacket *base.PiecePacket) { +func (pt *peerTaskConductor) dispatchPieceRequest(pieceRequestCh chan *DownloadPieceRequest, piecePacket *commonv1.PiecePacket) { pieceCount := len(piecePacket.PieceInfos) pt.Debugf("dispatch piece request, piece count: %d", pieceCount) // fix cdn return zero piece info, but with total piece count and content length @@ -1122,8 +1123,8 @@ wait: pt.Infof("new peer client ready, but all pieces are already downloading, just wait failed pieces") goto wait } - // when scheduler says base.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady - pt.Infof("start download from source due to base.Code_SchedNeedBackSource") + // when scheduler says commonv1.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady + pt.Infof("start download from source due to commonv1.Code_SchedNeedBackSource") pt.span.AddEvent("back source due to scheduler says need back source") pt.backSource() return -1, false @@ -1199,7 +1200,7 @@ func (pt *peerTaskConductor) downloadPiece(workerID int32, request *DownloadPiec return } attempt, success := pt.pieceTaskSyncManager.acquire( - &base.PieceTaskRequest{ + &commonv1.PieceTaskRequest{ Limit: 1, TaskId: pt.taskID, SrcPid: pt.peerID, @@ -1249,13 +1250,13 @@ func (pt *peerTaskConductor) waitLimit(ctx context.Context, request *DownloadPie waitSpan.End() // send error piece result - sendError := pt.sendPieceResult(&scheduler.PieceResult{ + sendError := pt.sendPieceResult(&schedulerv1.PieceResult{ TaskId: pt.GetTaskID(), SrcPid: pt.GetPeerID(), DstPid: request.DstPid, PieceInfo: request.piece, Success: false, - Code: base.Code_ClientRequestLimitFail, + Code: commonv1.Code_ClientRequestLimitFail, HostLoad: nil, FinishedCount: 0, // update by peer task }) @@ -1263,7 +1264,7 @@ func (pt *peerTaskConductor) waitLimit(ctx context.Context, request *DownloadPie pt.Errorf("report piece result failed %s", err) } - pt.cancel(base.Code_ClientRequestLimitFail, err.Error()) + pt.cancel(commonv1.Code_ClientRequestLimitFail, err.Error()) return false } @@ -1334,13 +1335,13 @@ func (pt *peerTaskConductor) ReportPieceResult(request *DownloadPieceRequest, re pt.reportSuccessResult(request, result) return } - code := base.Code_ClientPieceDownloadFail + code := commonv1.Code_ClientPieceDownloadFail if isConnectionError(err) { - code = base.Code_ClientConnectionError + code = commonv1.Code_ClientConnectionError } else if isPieceNotFound(err) { - code = base.Code_ClientPieceNotFound + code = commonv1.Code_ClientPieceNotFound } else if isBackSourceError(err) { - code = base.Code_ClientBackSourceError + code = commonv1.Code_ClientBackSourceError } pt.reportFailResult(request, result, code) } @@ -1351,7 +1352,7 @@ func (pt *peerTaskConductor) reportSuccessResult(request *DownloadPieceRequest, span.SetAttributes(config.AttributeWritePieceSuccess.Bool(true)) err := pt.sendPieceResult( - &scheduler.PieceResult{ + &schedulerv1.PieceResult{ TaskId: pt.GetTaskID(), SrcPid: pt.GetPeerID(), DstPid: request.DstPid, @@ -1359,7 +1360,7 @@ func (pt *peerTaskConductor) reportSuccessResult(request *DownloadPieceRequest, BeginTime: uint64(result.BeginTime), EndTime: uint64(result.FinishTime), Success: true, - Code: base.Code_Success, + Code: commonv1.Code_Success, HostLoad: nil, // TODO(jim): update host load FinishedCount: pt.readyPieces.Settled(), // TODO range_start, range_size, piece_md5, piece_offset, piece_style @@ -1372,12 +1373,12 @@ func (pt *peerTaskConductor) reportSuccessResult(request *DownloadPieceRequest, span.End() } -func (pt *peerTaskConductor) reportFailResult(request *DownloadPieceRequest, result *DownloadPieceResult, code base.Code) { +func (pt *peerTaskConductor) reportFailResult(request *DownloadPieceRequest, result *DownloadPieceResult, code commonv1.Code) { metrics.PieceTaskFailedCount.Add(1) _, span := tracer.Start(pt.ctx, config.SpanReportPieceResult) span.SetAttributes(config.AttributeWritePieceSuccess.Bool(false)) - err := pt.sendPieceResult(&scheduler.PieceResult{ + err := pt.sendPieceResult(&schedulerv1.PieceResult{ TaskId: pt.GetTaskID(), SrcPid: pt.GetPeerID(), DstPid: request.DstPid, @@ -1466,7 +1467,7 @@ func (pt *peerTaskConductor) done() { var ( cost = time.Since(pt.startTime).Milliseconds() success = true - code = base.Code_Success + code = commonv1.Code_Success ) pt.Log().Infof("peer task done, cost: %dms", cost) // TODO merge error handle @@ -1479,8 +1480,8 @@ func (pt *peerTaskConductor) done() { } else { close(pt.failCh) success = false - code = base.Code_ClientError - pt.failedCode = base.Code_ClientError + code = commonv1.Code_ClientError + pt.failedCode = commonv1.Code_ClientError pt.failedReason = err.Error() pt.span.SetAttributes(config.AttributePeerTaskSuccess.Bool(false)) @@ -1492,8 +1493,8 @@ func (pt *peerTaskConductor) done() { } else { close(pt.failCh) success = false - code = base.Code_ClientError - pt.failedCode = base.Code_ClientError + code = commonv1.Code_ClientError + pt.failedCode = commonv1.Code_ClientError pt.failedReason = err.Error() pt.span.SetAttributes(config.AttributePeerTaskSuccess.Bool(false)) @@ -1517,7 +1518,7 @@ func (pt *peerTaskConductor) done() { err = pt.schedulerClient.ReportPeerResult( peerResultCtx, - &scheduler.PeerResult{ + &schedulerv1.PeerResult{ TaskId: pt.GetTaskID(), PeerId: pt.GetPeerID(), SrcIp: pt.host.Ip, @@ -1544,7 +1545,7 @@ func (pt *peerTaskConductor) Fail() { } func (pt *peerTaskConductor) fail() { - if pt.failedCode == base.Code_ClientBackSourceError { + if pt.failedCode == commonv1.Code_ClientBackSourceError { metrics.PeerTaskFailedCount.WithLabelValues(metrics.FailTypeBackSource).Add(1) } else { metrics.PeerTaskFailedCount.WithLabelValues(metrics.FailTypeP2P).Add(1) @@ -1574,16 +1575,16 @@ func (pt *peerTaskConductor) fail() { peerResultCtx, peerResultSpan := tracer.Start(ctx, config.SpanReportPeerResult) defer peerResultSpan.End() - var sourceError *errordetails.SourceError + var sourceError *errordetailsv1.SourceError if pt.sourceErrorStatus != nil { for _, detail := range pt.sourceErrorStatus.Details() { switch d := detail.(type) { - case *errordetails.SourceError: + case *errordetailsv1.SourceError: sourceError = d } } } - peerResult := &scheduler.PeerResult{ + peerResult := &schedulerv1.PeerResult{ TaskId: pt.GetTaskID(), PeerId: pt.GetPeerID(), SrcIp: pt.peerTaskManager.host.Ip, @@ -1598,7 +1599,7 @@ func (pt *peerTaskConductor) fail() { Code: pt.failedCode, } if sourceError != nil { - peerResult.ErrorDetail = &scheduler.PeerResult_SourceError{ + peerResult.Errordetails = &schedulerv1.PeerResult_SourceError{ SourceError: sourceError, } } @@ -1672,7 +1673,7 @@ func (pt *peerTaskConductor) PublishPieceInfo(pieceNum int32, size uint32) { }) } -func (pt *peerTaskConductor) sendPieceResult(pr *scheduler.PieceResult) error { +func (pt *peerTaskConductor) sendPieceResult(pr *schedulerv1.PieceResult) error { pt.sendPieceResultLock.Lock() err := pt.peerPacketStream.Send(pr) pt.sendPieceResultLock.Unlock() diff --git a/client/daemon/peer/peertask_dummy.go b/client/daemon/peer/peertask_dummy.go index 6922c25d1..8425df870 100644 --- a/client/daemon/peer/peertask_dummy.go +++ b/client/daemon/peer/peertask_dummy.go @@ -21,37 +21,38 @@ import ( "google.golang.org/grpc" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/pkg/dfnet" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) // when scheduler is not available, use dummySchedulerClient to back source type dummySchedulerClient struct { } -func (d *dummySchedulerClient) RegisterPeerTask(ctx context.Context, request *scheduler.PeerTaskRequest, option ...grpc.CallOption) (*scheduler.RegisterResult, error) { +func (d *dummySchedulerClient) RegisterPeerTask(ctx context.Context, request *schedulerv1.PeerTaskRequest, option ...grpc.CallOption) (*schedulerv1.RegisterResult, error) { panic("should not call this function") } -func (d *dummySchedulerClient) ReportPieceResult(ctx context.Context, request *scheduler.PeerTaskRequest, option ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) { +func (d *dummySchedulerClient) ReportPieceResult(ctx context.Context, request *schedulerv1.PeerTaskRequest, option ...grpc.CallOption) (schedulerv1.Scheduler_ReportPieceResultClient, error) { return &dummyPeerPacketStream{}, nil } -func (d *dummySchedulerClient) ReportPeerResult(ctx context.Context, result *scheduler.PeerResult, option ...grpc.CallOption) error { +func (d *dummySchedulerClient) ReportPeerResult(ctx context.Context, result *schedulerv1.PeerResult, option ...grpc.CallOption) error { return nil } -func (d *dummySchedulerClient) LeaveTask(ctx context.Context, target *scheduler.PeerTarget, option ...grpc.CallOption) error { +func (d *dummySchedulerClient) LeaveTask(ctx context.Context, target *schedulerv1.PeerTarget, option ...grpc.CallOption) error { return nil } -func (d *dummySchedulerClient) StatTask(ctx context.Context, request *scheduler.StatTaskRequest, option ...grpc.CallOption) (*scheduler.Task, error) { +func (d *dummySchedulerClient) StatTask(ctx context.Context, request *schedulerv1.StatTaskRequest, option ...grpc.CallOption) (*schedulerv1.Task, error) { panic("should not call this function") } -func (d *dummySchedulerClient) AnnounceTask(ctx context.Context, request *scheduler.AnnounceTaskRequest, option ...grpc.CallOption) error { +func (d *dummySchedulerClient) AnnounceTask(ctx context.Context, request *schedulerv1.AnnounceTaskRequest, option ...grpc.CallOption) error { panic("should not call this function") } @@ -70,12 +71,12 @@ type dummyPeerPacketStream struct { grpc.ClientStream } -func (d *dummyPeerPacketStream) Recv() (*scheduler.PeerPacket, error) { - // TODO set base.Code_SchedNeedBackSource in *scheduler.PeerPacket instead of error - return nil, dferrors.New(base.Code_SchedNeedBackSource, "") +func (d *dummyPeerPacketStream) Recv() (*schedulerv1.PeerPacket, error) { + // TODO set commonv1.Code_SchedNeedBackSource in *scheduler.PeerPacket instead of error + return nil, dferrors.New(commonv1.Code_SchedNeedBackSource, "") } -func (d *dummyPeerPacketStream) Send(pr *scheduler.PieceResult) error { +func (d *dummyPeerPacketStream) Send(pr *schedulerv1.PieceResult) error { return nil } diff --git a/client/daemon/peer/peertask_file.go b/client/daemon/peer/peertask_file.go index e2d6591fb..111a0d035 100644 --- a/client/daemon/peer/peertask_file.go +++ b/client/daemon/peer/peertask_file.go @@ -23,18 +23,19 @@ import ( "go.opentelemetry.io/otel/trace" "golang.org/x/time/rate" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/util" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) type FileTaskRequest struct { - scheduler.PeerTaskRequest + schedulerv1.PeerTaskRequest Output string Limit float64 DisableBackSource bool @@ -68,7 +69,7 @@ type fileTask struct { type ProgressState struct { Success bool - Code base.Code + Code commonv1.Code Msg string } @@ -142,7 +143,7 @@ func (f *fileTask) syncProgress() { pg := &FileTaskProgress{ State: &ProgressState{ Success: true, - Code: base.Code_Success, + Code: commonv1.Code_Success, Msg: "downloading", }, TaskID: f.peerTaskConductor.GetTaskID(), @@ -178,7 +179,7 @@ func (f *fileTask) storeToOutput() { OriginalOffset: f.request.KeepOriginalOffset, }) if err != nil { - f.sendFailProgress(base.Code_ClientError, err.Error()) + f.sendFailProgress(commonv1.Code_ClientError, err.Error()) return } f.sendSuccessProgress() @@ -189,7 +190,7 @@ func (f *fileTask) sendSuccessProgress() { pg := &FileTaskProgress{ State: &ProgressState{ Success: true, - Code: base.Code_Success, + Code: commonv1.Code_Success, Msg: "done", }, TaskID: f.peerTaskConductor.GetTaskID(), @@ -223,7 +224,7 @@ func (f *fileTask) sendSuccessProgress() { } } -func (f *fileTask) sendFailProgress(code base.Code, msg string) { +func (f *fileTask) sendFailProgress(code commonv1.Code, msg string) { var progressDone bool pg := &FileTaskProgress{ State: &ProgressState{ diff --git a/client/daemon/peer/peertask_manager.go b/client/daemon/peer/peertask_manager.go index 232c16baa..d5db51913 100644 --- a/client/daemon/peer/peertask_manager.go +++ b/client/daemon/peer/peertask_manager.go @@ -32,14 +32,15 @@ import ( "golang.org/x/time/rate" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/util" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" ) @@ -57,15 +58,15 @@ type TaskManager interface { StartSeedTask(ctx context.Context, req *SeedTaskRequest) ( seedTaskResult *SeedTaskResponse, reuse bool, err error) - Subscribe(request *base.PieceTaskRequest) (*SubscribeResponse, bool) + Subscribe(request *commonv1.PieceTaskRequest) (*SubscribeResponse, bool) IsPeerTaskRunning(taskID string) (Task, bool) // StatTask checks whether the given task exists in P2P network - StatTask(ctx context.Context, taskID string) (*scheduler.Task, error) + StatTask(ctx context.Context, taskID string) (*schedulerv1.Task, error) // AnnouncePeerTask announces peer task info to P2P network - AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType base.TaskType, urlMeta *base.UrlMeta) error + AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType commonv1.TaskType, urlMeta *commonv1.UrlMeta) error GetPieceManager() PieceManager @@ -119,7 +120,7 @@ func init() { } type peerTaskManager struct { - host *scheduler.PeerHost + host *schedulerv1.PeerHost schedulerClient schedulerclient.Client schedulerOption config.SchedulerOption pieceManager PieceManager @@ -143,7 +144,7 @@ type peerTaskManager struct { } func NewPeerTaskManager( - host *scheduler.PeerHost, + host *schedulerv1.PeerHost, pieceManager PieceManager, storageManager storage.Manager, schedulerClient schedulerclient.Client, @@ -183,7 +184,7 @@ func (ptm *peerTaskManager) findPeerTaskConductor(taskID string) (*peerTaskCondu func (ptm *peerTaskManager) getPeerTaskConductor(ctx context.Context, taskID string, - request *scheduler.PeerTaskRequest, + request *schedulerv1.PeerTaskRequest, limit rate.Limit, parent *peerTaskConductor, rg *util.Range, @@ -206,7 +207,7 @@ func (ptm *peerTaskManager) getPeerTaskConductor(ctx context.Context, func (ptm *peerTaskManager) getOrCreatePeerTaskConductor( ctx context.Context, taskID string, - request *scheduler.PeerTaskRequest, + request *schedulerv1.PeerTaskRequest, limit rate.Limit, parent *peerTaskConductor, rg *util.Range, @@ -238,7 +239,7 @@ func (ptm *peerTaskManager) getOrCreatePeerTaskConductor( err := ptc.initStorage(desiredLocation) if err != nil { ptc.Errorf("init storage error: %s", err) - ptc.cancel(base.Code_ClientError, err.Error()) + ptc.cancel(commonv1.Code_ClientError, err.Error()) return nil, false, err } return ptc, true, nil @@ -248,15 +249,15 @@ func (ptm *peerTaskManager) enabledPrefetch(rg *util.Range) bool { return ptm.enablePrefetch && rg != nil } -func (ptm *peerTaskManager) prefetchParentTask(request *scheduler.PeerTaskRequest, desiredLocation string) *peerTaskConductor { - req := &scheduler.PeerTaskRequest{ +func (ptm *peerTaskManager) prefetchParentTask(request *schedulerv1.PeerTaskRequest, desiredLocation string) *peerTaskConductor { + req := &schedulerv1.PeerTaskRequest{ Url: request.Url, PeerId: request.PeerId, PeerHost: ptm.host, HostLoad: request.HostLoad, IsMigrating: request.IsMigrating, Pattern: request.Pattern, - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Digest: request.UrlMeta.Digest, Tag: request.UrlMeta.Tag, Filter: request.UrlMeta.Filter, @@ -320,7 +321,7 @@ func (ptm *peerTaskManager) StartFileTask(ctx context.Context, req *FileTaskRequ } func (ptm *peerTaskManager) StartStreamTask(ctx context.Context, req *StreamTaskRequest) (io.ReadCloser, map[string]string, error) { - peerTaskRequest := &scheduler.PeerTaskRequest{ + peerTaskRequest := &schedulerv1.PeerTaskRequest{ Url: req.URL, UrlMeta: req.URLMeta, PeerId: req.PeerID, @@ -379,7 +380,7 @@ type SubscribeResponse struct { FailReason func() error } -func (ptm *peerTaskManager) Subscribe(request *base.PieceTaskRequest) (*SubscribeResponse, bool) { +func (ptm *peerTaskManager) Subscribe(request *commonv1.PieceTaskRequest) (*SubscribeResponse, bool) { ptc, ok := ptm.findPeerTaskConductor(request.TaskId) if !ok { return nil, false @@ -413,8 +414,8 @@ func (ptm *peerTaskManager) IsPeerTaskRunning(taskID string) (Task, bool) { return nil, ok } -func (ptm *peerTaskManager) StatTask(ctx context.Context, taskID string) (*scheduler.Task, error) { - req := &scheduler.StatTaskRequest{ +func (ptm *peerTaskManager) StatTask(ctx context.Context, taskID string) (*schedulerv1.Task, error) { + req := &schedulerv1.StatTaskRequest{ TaskId: taskID, } @@ -425,7 +426,7 @@ func (ptm *peerTaskManager) GetPieceManager() PieceManager { return ptm.pieceManager } -func (ptm *peerTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType base.TaskType, urlMeta *base.UrlMeta) error { +func (ptm *peerTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType commonv1.TaskType, urlMeta *commonv1.UrlMeta) error { // Check if the given task is completed in local storageManager. if ptm.storageManager.FindCompletedTask(meta.TaskID) == nil { return errors.New("task not found in local storage") @@ -437,7 +438,7 @@ func (ptm *peerTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.P return err } - piecePacket, err := ptm.storageManager.GetPieces(ctx, &base.PieceTaskRequest{ + piecePacket, err := ptm.storageManager.GetPieces(ctx, &commonv1.PieceTaskRequest{ TaskId: meta.TaskID, DstPid: meta.PeerID, StartNum: 0, @@ -449,7 +450,7 @@ func (ptm *peerTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.P piecePacket.DstAddr = fmt.Sprintf("%s:%d", ptm.host.Ip, ptm.host.DownPort) // Announce peer task to scheduler - if err := ptm.schedulerClient.AnnounceTask(ctx, &scheduler.AnnounceTaskRequest{ + if err := ptm.schedulerClient.AnnounceTask(ctx, &schedulerv1.AnnounceTaskRequest{ TaskId: meta.TaskID, TaskType: taskType, Url: url, diff --git a/client/daemon/peer/peertask_manager_mock.go b/client/daemon/peer/peertask_manager_mock.go index 56e6a9b96..ae9ab12ea 100644 --- a/client/daemon/peer/peertask_manager_mock.go +++ b/client/daemon/peer/peertask_manager_mock.go @@ -9,10 +9,10 @@ import ( io "io" reflect "reflect" + v1 "d7y.io/api/pkg/apis/common/v1" + v10 "d7y.io/api/pkg/apis/scheduler/v1" storage "d7y.io/dragonfly/v2/client/daemon/storage" dflog "d7y.io/dragonfly/v2/internal/dflog" - base "d7y.io/dragonfly/v2/pkg/rpc/base" - scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" gomock "github.com/golang/mock/gomock" status "google.golang.org/grpc/status" ) @@ -41,7 +41,7 @@ func (m *MockTaskManager) EXPECT() *MockTaskManagerMockRecorder { } // AnnouncePeerTask mocks base method. -func (m *MockTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType base.TaskType, urlMeta *base.UrlMeta) error { +func (m *MockTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType v1.TaskType, urlMeta *v1.UrlMeta) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AnnouncePeerTask", ctx, meta, url, taskType, urlMeta) ret0, _ := ret[0].(error) @@ -132,10 +132,10 @@ func (mr *MockTaskManagerMockRecorder) StartStreamTask(ctx, req interface{}) *go } // StatTask mocks base method. -func (m *MockTaskManager) StatTask(ctx context.Context, taskID string) (*scheduler.Task, error) { +func (m *MockTaskManager) StatTask(ctx context.Context, taskID string) (*v10.Task, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StatTask", ctx, taskID) - ret0, _ := ret[0].(*scheduler.Task) + ret0, _ := ret[0].(*v10.Task) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -161,7 +161,7 @@ func (mr *MockTaskManagerMockRecorder) Stop(ctx interface{}) *gomock.Call { } // Subscribe mocks base method. -func (m *MockTaskManager) Subscribe(request *base.PieceTaskRequest) (*SubscribeResponse, bool) { +func (m *MockTaskManager) Subscribe(request *v1.PieceTaskRequest) (*SubscribeResponse, bool) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Subscribe", request) ret0, _ := ret[0].(*SubscribeResponse) diff --git a/client/daemon/peer/peertask_manager_test.go b/client/daemon/peer/peertask_manager_test.go index d597fc81a..f134b6962 100644 --- a/client/daemon/peer/peertask_manager_test.go +++ b/client/daemon/peer/peertask_manager_test.go @@ -42,6 +42,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + schedulerv1mocks "d7y.io/api/pkg/apis/scheduler/v1/mocks" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/test" @@ -52,14 +57,10 @@ import ( "d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/rpc" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" daemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" servermocks "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server/mocks" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" - mock_scheduler_client "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client/mocks" - mock_scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks" + schedulerclientmocks "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client/mocks" "d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol" sourcemocks "d7y.io/dragonfly/v2/pkg/source/mocks" @@ -79,7 +80,7 @@ type componentsOption struct { sourceClient source.ResourceClient peerPacketDelay []time.Duration backSource bool - scope base.SizeScope + scope commonv1.SizeScope content []byte getPieceTasks bool } @@ -97,8 +98,8 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio pieces[i] = digest.MD5FromReader(io.LimitReader(r, int64(opt.pieceSize))) } totalDigests := digest.SHA256FromStrings(pieces...) - genPiecePacket := func(request *base.PieceTaskRequest) *base.PiecePacket { - var tasks []*base.PieceInfo + genPiecePacket := func(request *commonv1.PieceTaskRequest) *commonv1.PiecePacket { + var tasks []*commonv1.PieceInfo for i := uint32(0); i < request.Limit; i++ { start := opt.pieceSize * (request.StartNum + i) if int64(start)+1 > opt.contentLength { @@ -109,7 +110,7 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio size = uint32(opt.contentLength) - start } tasks = append(tasks, - &base.PieceInfo{ + &commonv1.PieceInfo{ PieceNum: int32(request.StartNum + i), RangeStart: uint64(start), RangeSize: size, @@ -118,7 +119,7 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio PieceStyle: 0, }) } - return &base.PiecePacket{ + return &commonv1.PiecePacket{ TaskId: request.TaskId, DstPid: "peer-x", PieceInfos: tasks, @@ -129,18 +130,18 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio } if opt.getPieceTasks { daemon.EXPECT().GetPieceTasks(gomock.Any(), gomock.Any()).AnyTimes(). - DoAndReturn(func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { + DoAndReturn(func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { return genPiecePacket(request), nil }) - daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(arg0 dfdaemon.Daemon_SyncPieceTasksServer) error { + daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(arg0 dfdaemonv1.Daemon_SyncPieceTasksServer) error { return status.Error(codes.Unimplemented, "TODO") }) } else { daemon.EXPECT().GetPieceTasks(gomock.Any(), gomock.Any()).AnyTimes(). - DoAndReturn(func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { + DoAndReturn(func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { return nil, status.Error(codes.Unimplemented, "TODO") }) - daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(s dfdaemon.Daemon_SyncPieceTasksServer) error { + daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(s dfdaemonv1.Daemon_SyncPieceTasksServer) error { request, err := s.Recv() if err != nil { return err @@ -177,9 +178,9 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio time.Sleep(100 * time.Millisecond) // 2. setup a scheduler - pps := mock_scheduler.NewMockScheduler_ReportPieceResultClient(ctrl) + pps := schedulerv1mocks.NewMockScheduler_ReportPieceResultClient(ctrl) pps.EXPECT().Send(gomock.Any()).AnyTimes().DoAndReturn( - func(pr *scheduler.PieceResult) error { + func(pr *schedulerv1.PieceResult) error { return nil }) var ( @@ -188,7 +189,7 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio ) sent <- struct{}{} pps.EXPECT().Recv().AnyTimes().DoAndReturn( - func() (*scheduler.PeerPacket, error) { + func() (*schedulerv1.PeerPacket, error) { if len(opt.peerPacketDelay) > delayCount { if delay := opt.peerPacketDelay[delayCount]; delay > 0 { time.Sleep(delay) @@ -197,14 +198,14 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio } <-sent if opt.backSource { - return nil, dferrors.Newf(base.Code_SchedNeedBackSource, "fake back source error") + return nil, dferrors.Newf(commonv1.Code_SchedNeedBackSource, "fake back source error") } - return &scheduler.PeerPacket{ - Code: base.Code_Success, + return &schedulerv1.PeerPacket{ + Code: commonv1.Code_Success, TaskId: opt.taskID, SrcPid: "127.0.0.1", ParallelCount: opt.pieceParallelCount, - MainPeer: &scheduler.PeerPacket_DestPeer{ + MainPeer: &schedulerv1.PeerPacket_DestPeer{ Ip: "127.0.0.1", RpcPort: port, PeerId: "peer-x", @@ -214,27 +215,27 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio }) pps.EXPECT().CloseSend().AnyTimes() - sched := mock_scheduler_client.NewMockClient(ctrl) + sched := schedulerclientmocks.NewMockClient(ctrl) sched.EXPECT().RegisterPeerTask(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, ptr *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (*scheduler.RegisterResult, error) { + func(ctx context.Context, ptr *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) (*schedulerv1.RegisterResult, error) { switch opt.scope { - case base.SizeScope_TINY: - return &scheduler.RegisterResult{ + case commonv1.SizeScope_TINY: + return &schedulerv1.RegisterResult{ TaskId: opt.taskID, - SizeScope: base.SizeScope_TINY, - DirectPiece: &scheduler.RegisterResult_PieceContent{ + SizeScope: commonv1.SizeScope_TINY, + DirectPiece: &schedulerv1.RegisterResult_PieceContent{ PieceContent: opt.content, }, }, nil - case base.SizeScope_SMALL: - return &scheduler.RegisterResult{ + case commonv1.SizeScope_SMALL: + return &schedulerv1.RegisterResult{ TaskId: opt.taskID, - SizeScope: base.SizeScope_SMALL, - DirectPiece: &scheduler.RegisterResult_SinglePiece{ - SinglePiece: &scheduler.SinglePiece{ + SizeScope: commonv1.SizeScope_SMALL, + DirectPiece: &schedulerv1.RegisterResult_SinglePiece{ + SinglePiece: &schedulerv1.SinglePiece{ DstPid: "fake-pid", DstAddr: "fake-addr", - PieceInfo: &base.PieceInfo{ + PieceInfo: &commonv1.PieceInfo{ PieceNum: 0, RangeStart: 0, RangeSize: uint32(opt.contentLength), @@ -246,19 +247,19 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio }, }, nil } - return &scheduler.RegisterResult{ + return &schedulerv1.RegisterResult{ TaskId: opt.taskID, - SizeScope: base.SizeScope_NORMAL, + SizeScope: commonv1.SizeScope_NORMAL, DirectPiece: nil, }, nil }) sched.EXPECT().ReportPieceResult(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, ptr *scheduler.PeerTaskRequest, opts ...grpc.CallOption) ( - scheduler.Scheduler_ReportPieceResultClient, error) { + func(ctx context.Context, ptr *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) ( + schedulerv1.Scheduler_ReportPieceResultClient, error) { return pps, nil }) sched.EXPECT().ReportPeerResult(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, pr *scheduler.PeerResult, opts ...grpc.CallOption) error { + func(ctx context.Context, pr *schedulerv1.PeerResult, opts ...grpc.CallOption) error { return nil }) tempDir, _ := os.MkdirTemp("", "d7y-test-*") @@ -295,7 +296,7 @@ func setupMockManager(ctrl *gomock.Controller, ts *testSpec, opt componentsOptio } ptm := &peerTaskManager{ calculateDigest: true, - host: &scheduler.PeerHost{ + host: &schedulerv1.PeerHost{ Ip: "127.0.0.1", }, conductorLock: &sync.Mutex{}, @@ -336,7 +337,7 @@ type testSpec struct { httpRange *util.Range // only used in back source cases pieceParallelCount int32 pieceSize int - sizeScope base.SizeScope + sizeScope commonv1.SizeScope peerID string url string legacyFeature bool @@ -386,7 +387,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) { pieceSize: 1024, peerID: "normal-size-peer", url: "http://localhost/test/data", - sizeScope: base.SizeScope_NORMAL, + sizeScope: commonv1.SizeScope_NORMAL, mockPieceDownloader: commonPieceDownloader, mockHTTPSourceClient: nil, }, @@ -397,7 +398,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) { pieceSize: 16384, peerID: "small-size-peer", url: "http://localhost/test/data", - sizeScope: base.SizeScope_SMALL, + sizeScope: commonv1.SizeScope_SMALL, mockPieceDownloader: commonPieceDownloader, mockHTTPSourceClient: nil, }, @@ -408,7 +409,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) { pieceSize: 1024, peerID: "tiny-size-peer", url: "http://localhost/test/data", - sizeScope: base.SizeScope_TINY, + sizeScope: commonv1.SizeScope_TINY, mockPieceDownloader: nil, mockHTTPSourceClient: nil, }, @@ -421,7 +422,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) { peerID: "normal-size-peer-back-source", backSource: true, url: "http://localhost/test/data", - sizeScope: base.SizeScope_NORMAL, + sizeScope: commonv1.SizeScope_NORMAL, mockPieceDownloader: nil, mockHTTPSourceClient: func(t *testing.T, ctrl *gomock.Controller, rg *util.Range, taskData []byte, url string) source.ResourceClient { sourceClient := sourcemocks.NewMockResourceClient(ctrl) @@ -445,7 +446,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) { peerID: "normal-size-peer-range-back-source", backSource: true, url: "http://localhost/test/data", - sizeScope: base.SizeScope_NORMAL, + sizeScope: commonv1.SizeScope_NORMAL, httpRange: &util.Range{ Start: 0, Length: 4096, @@ -487,7 +488,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) { peerID: "normal-size-peer-back-source-no-length", backSource: true, url: "http://localhost/test/data", - sizeScope: base.SizeScope_NORMAL, + sizeScope: commonv1.SizeScope_NORMAL, mockPieceDownloader: nil, mockHTTPSourceClient: func(t *testing.T, ctrl *gomock.Controller, rg *util.Range, taskData []byte, url string) source.ResourceClient { sourceClient := sourcemocks.NewMockResourceClient(ctrl) @@ -511,7 +512,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) { peerID: "normal-size-peer-back-source-aligning-no-length", backSource: true, url: "http://localhost/test/data", - sizeScope: base.SizeScope_NORMAL, + sizeScope: commonv1.SizeScope_NORMAL, mockPieceDownloader: nil, mockHTTPSourceClient: func(t *testing.T, ctrl *gomock.Controller, rg *util.Range, taskData []byte, url string) source.ResourceClient { sourceClient := sourcemocks.NewMockResourceClient(ctrl) @@ -546,7 +547,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) { }) return server.URL }, - sizeScope: base.SizeScope_NORMAL, + sizeScope: commonv1.SizeScope_NORMAL, mockPieceDownloader: nil, mockHTTPSourceClient: nil, }, @@ -573,7 +574,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) { defer ctrl.Finish() mockContentLength := len(tc.taskData) - urlMeta := &base.UrlMeta{ + urlMeta := &commonv1.UrlMeta{ Tag: "d7y-test", } @@ -636,7 +637,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) { } } -func (ts *testSpec) run(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *base.UrlMeta) { +func (ts *testSpec) run(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *commonv1.UrlMeta) { switch ts.taskType { case taskTypeFile: ts.runFileTaskTest(assert, require, mm, urlMeta) @@ -651,7 +652,7 @@ func (ts *testSpec) run(assert *testifyassert.Assertions, require *testifyrequir } } -func (ts *testSpec) runFileTaskTest(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *base.UrlMeta) { +func (ts *testSpec) runFileTaskTest(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *commonv1.UrlMeta) { var output = "../test/testdata/test.output" defer func() { assert.Nil(os.Remove(output)) @@ -659,11 +660,11 @@ func (ts *testSpec) runFileTaskTest(assert *testifyassert.Assertions, require *t progress, _, err := mm.peerTaskManager.StartFileTask( context.Background(), &FileTaskRequest{ - PeerTaskRequest: scheduler.PeerTaskRequest{ + PeerTaskRequest: schedulerv1.PeerTaskRequest{ Url: ts.url, UrlMeta: urlMeta, PeerId: ts.peerID, - PeerHost: &scheduler.PeerHost{}, + PeerHost: &schedulerv1.PeerHost{}, }, Output: output, }) @@ -685,7 +686,7 @@ func (ts *testSpec) runFileTaskTest(assert *testifyassert.Assertions, require *t require.Equal(ts.taskData, outputBytes, "output and desired output must match") } -func (ts *testSpec) runStreamTaskTest(_ *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *base.UrlMeta) { +func (ts *testSpec) runStreamTaskTest(_ *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *commonv1.UrlMeta) { r, _, err := mm.peerTaskManager.StartStreamTask( context.Background(), &StreamTaskRequest{ @@ -700,15 +701,15 @@ func (ts *testSpec) runStreamTaskTest(_ *testifyassert.Assertions, require *test require.Equal(ts.taskData, outputBytes, "output and desired output must match") } -func (ts *testSpec) runSeedTaskTest(_ *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *base.UrlMeta) { +func (ts *testSpec) runSeedTaskTest(_ *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *commonv1.UrlMeta) { r, _, err := mm.peerTaskManager.StartSeedTask( context.Background(), &SeedTaskRequest{ - PeerTaskRequest: scheduler.PeerTaskRequest{ + PeerTaskRequest: schedulerv1.PeerTaskRequest{ Url: ts.url, UrlMeta: urlMeta, PeerId: ts.peerID, - PeerHost: &scheduler.PeerHost{}, + PeerHost: &schedulerv1.PeerHost{}, HostLoad: nil, IsMigrating: false, }, @@ -746,7 +747,7 @@ loop: require.True(success, "seed task should success") } -func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *base.UrlMeta) { +func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *commonv1.UrlMeta) { var ( ptm = mm.peerTaskManager pieceSize = ts.pieceSize @@ -757,11 +758,11 @@ func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require * assert.Nil(os.Remove(output)) }() - peerTaskRequest := &scheduler.PeerTaskRequest{ + peerTaskRequest := &schedulerv1.PeerTaskRequest{ Url: ts.url, UrlMeta: urlMeta, PeerId: ts.peerID, - PeerHost: &scheduler.PeerHost{}, + PeerHost: &schedulerv1.PeerHost{}, } ptc, created, err := ptm.getOrCreatePeerTaskConductor( @@ -803,11 +804,11 @@ func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require * } for i := 0; i < ptcCount; i++ { - request := &scheduler.PeerTaskRequest{ + request := &schedulerv1.PeerTaskRequest{ Url: ts.url, UrlMeta: urlMeta, PeerId: fmt.Sprintf("should-not-use-peer-%d", i), - PeerHost: &scheduler.PeerHost{}, + PeerHost: &schedulerv1.PeerHost{}, } p, created, err := ptm.getOrCreatePeerTaskConductor( context.Background(), taskID, request, rate.Limit(pieceSize*3), nil, nil, "", false) @@ -820,9 +821,9 @@ func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require * require.Nil(ptc.start(), "peerTaskConductor start should be ok") switch ts.sizeScope { - case base.SizeScope_TINY: + case commonv1.SizeScope_TINY: require.NotNil(ptc.tinyData) - case base.SizeScope_SMALL: + case commonv1.SizeScope_SMALL: require.NotNil(ptc.singlePiece) } @@ -880,11 +881,11 @@ func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require * progress, ok := ptm.tryReuseFilePeerTask( context.Background(), &FileTaskRequest{ - PeerTaskRequest: scheduler.PeerTaskRequest{ + PeerTaskRequest: schedulerv1.PeerTaskRequest{ Url: ts.url, UrlMeta: urlMeta, PeerId: ts.peerID, - PeerHost: &scheduler.PeerHost{}, + PeerHost: &schedulerv1.PeerHost{}, }, Output: output, }) diff --git a/client/daemon/peer/peertask_piecetask_poller.go b/client/daemon/peer/peertask_piecetask_poller.go index e2abb61d8..143ef0d1e 100644 --- a/client/daemon/peer/peertask_piecetask_poller.go +++ b/client/daemon/peer/peertask_piecetask_poller.go @@ -25,12 +25,13 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/pkg/retry" - "d7y.io/dragonfly/v2/pkg/rpc/base" dfclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) type pieceTaskPoller struct { @@ -39,14 +40,14 @@ type pieceTaskPoller struct { getPiecesMaxRetry int } -func (poller *pieceTaskPoller) preparePieceTasks(request *base.PieceTaskRequest) (pp *base.PiecePacket, err error) { +func (poller *pieceTaskPoller) preparePieceTasks(request *commonv1.PieceTaskRequest) (pp *commonv1.PiecePacket, err error) { ptc := poller.peerTaskConductor defer ptc.recoverFromPanic() var retryCount int prepare: retryCount++ poller.peerTaskConductor.Debugf("prepare piece tasks, retry count: %d", retryCount) - peerPacket := ptc.peerPacket.Load().(*scheduler.PeerPacket) + peerPacket := ptc.peerPacket.Load().(*schedulerv1.PeerPacket) if poller.peerTaskConductor.needBackSource.Load() { return nil, fmt.Errorf("need back source") @@ -74,8 +75,8 @@ prepare: } func (poller *pieceTaskPoller) preparePieceTasksByPeer( - curPeerPacket *scheduler.PeerPacket, - peer *scheduler.PeerPacket_DestPeer, request *base.PieceTaskRequest) (*base.PiecePacket, error) { + curPeerPacket *schedulerv1.PeerPacket, + peer *schedulerv1.PeerPacket_DestPeer, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { ptc := poller.peerTaskConductor if peer == nil { return nil, fmt.Errorf("empty peer") @@ -89,7 +90,7 @@ func (poller *pieceTaskPoller) preparePieceTasksByPeer( defer span.End() var maxRetries = 60 - // when cdn returns base.Code_CDNTaskNotFound, report it to scheduler and wait cdn download it. + // when cdn returns commonv1.Code_CDNTaskNotFound, report it to scheduler and wait cdn download it. retry: ptc.Debugf("try get piece task from peer %s, piece num: %d, limit: %d\"", peer.PeerId, request.StartNum, request.Limit) p, err := poller.getPieceTasksByPeer(span, curPeerPacket, peer, request) @@ -115,18 +116,18 @@ retry: return nil, err } } - code := base.Code_ClientPieceRequestFail + code := commonv1.Code_ClientPieceRequestFail // not grpc error if de, ok := err.(*dferrors.DfError); ok && uint32(de.Code) > uint32(codes.Unauthenticated) { ptc.Debugf("get piece task from peer %s with df error, code: %d", peer.PeerId, de.Code) code = de.Code } ptc.Errorf("get piece task from peer %s error: %s, code: %d", peer.PeerId, err, code) - sendError := ptc.sendPieceResult(&scheduler.PieceResult{ + sendError := ptc.sendPieceResult(&schedulerv1.PieceResult{ TaskId: ptc.taskID, SrcPid: ptc.peerID, DstPid: peer.PeerId, - PieceInfo: &base.PieceInfo{}, + PieceInfo: &commonv1.PieceInfo{}, Success: false, Code: code, HostLoad: nil, @@ -134,14 +135,14 @@ retry: }) // error code should be sent to scheduler and the scheduler can schedule a new peer if sendError != nil { - ptc.cancel(base.Code_SchedError, sendError.Error()) + ptc.cancel(commonv1.Code_SchedError, sendError.Error()) span.RecordError(sendError) ptc.Errorf("send piece result error: %s, code to send: %d", sendError, code) return nil, sendError } // currently, before cdn gc tasks, it did not notify scheduler, when cdn complains Code_CDNTaskNotFound, retry - if maxRetries > 0 && code == base.Code_CDNTaskNotFound && curPeerPacket == ptc.peerPacket.Load().(*scheduler.PeerPacket) { + if maxRetries > 0 && code == commonv1.Code_CDNTaskNotFound && curPeerPacket == ptc.peerPacket.Load().(*schedulerv1.PeerPacket) { span.AddEvent("retry for CdnTaskNotFound") time.Sleep(time.Second) maxRetries-- @@ -152,9 +153,9 @@ retry: func (poller *pieceTaskPoller) getPieceTasksByPeer( span trace.Span, - curPeerPacket *scheduler.PeerPacket, - peer *scheduler.PeerPacket_DestPeer, - request *base.PieceTaskRequest) (*base.PiecePacket, error) { + curPeerPacket *schedulerv1.PeerPacket, + peer *schedulerv1.PeerPacket_DestPeer, + request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { var ( peerPacketChanged bool count int @@ -174,7 +175,7 @@ func (poller *pieceTaskPoller) getPieceTasksByPeer( if de, ok := getError.(*dferrors.DfError); ok { ptc.Debugf("get piece task with grpc error, code: %d", de.Code) // bad request, like invalid piece num, just exit - if de.Code == base.Code_BadRequest { + if de.Code == commonv1.Code_BadRequest { span.AddEvent("bad request") ptc.Warnf("get piece task from peer %s canceled: %s", peer.PeerId, getError) return nil, true, getError @@ -182,7 +183,7 @@ func (poller *pieceTaskPoller) getPieceTasksByPeer( } // fast way 2 to exit retry - lastPeerPacket := ptc.peerPacket.Load().(*scheduler.PeerPacket) + lastPeerPacket := ptc.peerPacket.Load().(*schedulerv1.PeerPacket) if curPeerPacket.CandidatePeers[0].PeerId != lastPeerPacket.CandidatePeers[0].PeerId { ptc.Warnf("get piece tasks with error: %s, but peer packet changed, switch to new peer packet, current destPeer %s, new destPeer %s", getError, curPeerPacket.CandidatePeers[0].PeerId, lastPeerPacket.CandidatePeers[0].PeerId) @@ -206,24 +207,24 @@ func (poller *pieceTaskPoller) getPieceTasksByPeer( } // by santong: when peer return empty, retry later - sendError := ptc.sendPieceResult(&scheduler.PieceResult{ + sendError := ptc.sendPieceResult(&schedulerv1.PieceResult{ TaskId: ptc.taskID, SrcPid: ptc.peerID, DstPid: peer.PeerId, - PieceInfo: &base.PieceInfo{}, + PieceInfo: &commonv1.PieceInfo{}, Success: false, - Code: base.Code_ClientWaitPieceReady, + Code: commonv1.Code_ClientWaitPieceReady, HostLoad: nil, FinishedCount: ptc.readyPieces.Settled(), }) if sendError != nil { - ptc.cancel(base.Code_ClientPieceRequestFail, sendError.Error()) + ptc.cancel(commonv1.Code_ClientPieceRequestFail, sendError.Error()) span.RecordError(sendError) - ptc.Errorf("send piece result with base.Code_ClientWaitPieceReady error: %s", sendError) + ptc.Errorf("send piece result with commonv1.Code_ClientWaitPieceReady error: %s", sendError) return nil, true, sendError } // fast way to exit retry - lastPeerPacket := ptc.peerPacket.Load().(*scheduler.PeerPacket) + lastPeerPacket := ptc.peerPacket.Load().(*schedulerv1.PeerPacket) if curPeerPacket.CandidatePeers[0].PeerId != lastPeerPacket.CandidatePeers[0].PeerId { ptc.Warnf("get empty pieces and peer packet changed, switch to new peer packet, current destPeer %s, new destPeer %s", curPeerPacket.CandidatePeers[0].PeerId, lastPeerPacket.CandidatePeers[0].PeerId) @@ -241,7 +242,7 @@ func (poller *pieceTaskPoller) getPieceTasksByPeer( } if err == nil { - return p.(*base.PiecePacket), nil + return p.(*commonv1.PiecePacket), nil } return nil, err } diff --git a/client/daemon/peer/peertask_piecetask_synchronizer.go b/client/daemon/peer/peertask_piecetask_synchronizer.go index 9464403f8..d89fa89ac 100644 --- a/client/daemon/peer/peertask_piecetask_synchronizer.go +++ b/client/daemon/peer/peertask_piecetask_synchronizer.go @@ -29,12 +29,13 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" dfclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) type pieceTaskSyncManager struct { @@ -50,8 +51,8 @@ type pieceTaskSyncManager struct { type pieceTaskSynchronizer struct { *logger.SugaredLoggerOnWith span trace.Span - client dfdaemon.Daemon_SyncPieceTasksClient - dstPeer *scheduler.PeerPacket_DestPeer + client dfdaemonv1.Daemon_SyncPieceTasksClient + dstPeer *schedulerv1.PeerPacket_DestPeer error atomic.Value peerTaskConductor *peerTaskConductor pieceRequestCh chan *DownloadPieceRequest @@ -59,7 +60,7 @@ type pieceTaskSynchronizer struct { type synchronizerWatchdog struct { done chan struct{} - mainPeer atomic.Value // save *scheduler.PeerPacket_DestPeer + mainPeer atomic.Value // save *schedulerv1.PeerPacket_DestPeer syncSuccess *atomic.Bool peerTaskConductor *peerTaskConductor } @@ -69,7 +70,7 @@ type pieceTaskSynchronizerError struct { } // FIXME for compatibility, sync will be called after the dfclient.GetPieceTasks deprecated and the pieceTaskPoller removed -func (s *pieceTaskSyncManager) sync(pp *scheduler.PeerPacket, desiredPiece int32) error { +func (s *pieceTaskSyncManager) sync(pp *schedulerv1.PeerPacket, desiredPiece int32) error { var ( peers = map[string]bool{} errs []error @@ -116,7 +117,7 @@ func (s *pieceTaskSyncManager) sync(pp *scheduler.PeerPacket, desiredPiece int32 return nil } -func (s *pieceTaskSyncManager) cleanStaleWorker(destPeers []*scheduler.PeerPacket_DestPeer) { +func (s *pieceTaskSyncManager) cleanStaleWorker(destPeers []*schedulerv1.PeerPacket_DestPeer) { var ( peers = map[string]bool{} ) @@ -141,9 +142,9 @@ func (s *pieceTaskSyncManager) cleanStaleWorker(destPeers []*scheduler.PeerPacke func (s *pieceTaskSyncManager) newPieceTaskSynchronizer( ctx context.Context, - dstPeer *scheduler.PeerPacket_DestPeer, + dstPeer *schedulerv1.PeerPacket_DestPeer, desiredPiece int32) error { - request := &base.PieceTaskRequest{ + request := &commonv1.PieceTaskRequest{ TaskId: s.peerTaskConductor.taskID, SrcPid: s.peerTaskConductor.peerID, DstPid: dstPeer.PeerId, @@ -197,8 +198,8 @@ func (s *pieceTaskSyncManager) newPieceTaskSynchronizer( } func (s *pieceTaskSyncManager) newMultiPieceTaskSynchronizer( - destPeers []*scheduler.PeerPacket_DestPeer, - desiredPiece int32) (legacyPeers []*scheduler.PeerPacket_DestPeer) { + destPeers []*schedulerv1.PeerPacket_DestPeer, + desiredPiece int32) (legacyPeers []*schedulerv1.PeerPacket_DestPeer) { s.Lock() defer func() { if s.peerTaskConductor.ptm.watchdogTimeout > 0 { @@ -225,11 +226,11 @@ func (s *pieceTaskSyncManager) newMultiPieceTaskSynchronizer( // other errors, report to scheduler if errors.Is(err, context.DeadlineExceeded) { // connect timeout error, report to scheduler to get more available peers - s.reportInvalidPeer(peer, base.Code_ClientConnectionError) + s.reportInvalidPeer(peer, commonv1.Code_ClientConnectionError) s.peerTaskConductor.Infof("connect to peer %s with error: %s, peer is invalid, skip legacy grpc", peer.PeerId, err) } else { // other errors, report to scheduler to get more available peers - s.reportInvalidPeer(peer, base.Code_ClientPieceRequestFail) + s.reportInvalidPeer(peer, commonv1.Code_ClientPieceRequestFail) s.peerTaskConductor.Errorf("connect peer %s error: %s, not codes.Unimplemented", peer.PeerId, err) } } @@ -237,7 +238,7 @@ func (s *pieceTaskSyncManager) newMultiPieceTaskSynchronizer( return legacyPeers } -func (s *pieceTaskSyncManager) resetWatchdog(mainPeer *scheduler.PeerPacket_DestPeer) { +func (s *pieceTaskSyncManager) resetWatchdog(mainPeer *schedulerv1.PeerPacket_DestPeer) { if s.watchdog != nil { close(s.watchdog.done) s.peerTaskConductor.Debugf("close old watchdog") @@ -253,12 +254,12 @@ func (s *pieceTaskSyncManager) resetWatchdog(mainPeer *scheduler.PeerPacket_Dest go s.watchdog.watch(s.peerTaskConductor.ptm.watchdogTimeout) } -func compositePieceResult(peerTaskConductor *peerTaskConductor, destPeer *scheduler.PeerPacket_DestPeer, code base.Code) *scheduler.PieceResult { - return &scheduler.PieceResult{ +func compositePieceResult(peerTaskConductor *peerTaskConductor, destPeer *schedulerv1.PeerPacket_DestPeer, code commonv1.Code) *schedulerv1.PieceResult { + return &schedulerv1.PieceResult{ TaskId: peerTaskConductor.taskID, SrcPid: peerTaskConductor.peerID, DstPid: destPeer.PeerId, - PieceInfo: &base.PieceInfo{}, + PieceInfo: &commonv1.PieceInfo{}, Success: false, Code: code, HostLoad: nil, @@ -266,18 +267,18 @@ func compositePieceResult(peerTaskConductor *peerTaskConductor, destPeer *schedu } } -func (s *pieceTaskSyncManager) reportInvalidPeer(destPeer *scheduler.PeerPacket_DestPeer, code base.Code) { +func (s *pieceTaskSyncManager) reportInvalidPeer(destPeer *schedulerv1.PeerPacket_DestPeer, code commonv1.Code) { sendError := s.peerTaskConductor.sendPieceResult(compositePieceResult(s.peerTaskConductor, destPeer, code)) if sendError != nil { s.peerTaskConductor.Errorf("connect peer %s failed and send piece result with error: %s", destPeer.PeerId, sendError) - go s.peerTaskConductor.cancel(base.Code_SchedError, sendError.Error()) + go s.peerTaskConductor.cancel(commonv1.Code_SchedError, sendError.Error()) } else { s.peerTaskConductor.Debugf("report invalid peer %s/%d to scheduler", destPeer.PeerId, code) } } // acquire send the target piece to other peers -func (s *pieceTaskSyncManager) acquire(request *base.PieceTaskRequest) (attempt int, success int) { +func (s *pieceTaskSyncManager) acquire(request *commonv1.PieceTaskRequest) (attempt int, success int) { s.RLock() for _, p := range s.workers { attempt++ @@ -308,7 +309,7 @@ func (s *pieceTaskSynchronizer) close() { s.span.End() } -func (s *pieceTaskSynchronizer) dispatchPieceRequest(piecePacket *base.PiecePacket) { +func (s *pieceTaskSynchronizer) dispatchPieceRequest(piecePacket *commonv1.PiecePacket) { s.peerTaskConductor.updateMetadata(piecePacket) pieceCount := len(piecePacket.PieceInfos) @@ -350,7 +351,7 @@ func (s *pieceTaskSynchronizer) dispatchPieceRequest(piecePacket *base.PiecePack } } -func (s *pieceTaskSynchronizer) receive(piecePacket *base.PiecePacket) { +func (s *pieceTaskSynchronizer) receive(piecePacket *commonv1.PiecePacket) { var err error for { s.dispatchPieceRequest(piecePacket) @@ -373,7 +374,7 @@ func (s *pieceTaskSynchronizer) receive(piecePacket *base.PiecePacket) { } } -func (s *pieceTaskSynchronizer) acquire(request *base.PieceTaskRequest) error { +func (s *pieceTaskSynchronizer) acquire(request *commonv1.PieceTaskRequest) error { if s.error.Load() != nil { err := s.error.Load().(*pieceTaskSynchronizerError).err s.Debugf("synchronizer already error %s, skip acquire more pieces", err) @@ -393,10 +394,10 @@ func (s *pieceTaskSynchronizer) acquire(request *base.PieceTaskRequest) error { func (s *pieceTaskSynchronizer) reportError(err error) { s.span.RecordError(err) - sendError := s.peerTaskConductor.sendPieceResult(compositePieceResult(s.peerTaskConductor, s.dstPeer, base.Code_ClientPieceRequestFail)) + sendError := s.peerTaskConductor.sendPieceResult(compositePieceResult(s.peerTaskConductor, s.dstPeer, commonv1.Code_ClientPieceRequestFail)) if sendError != nil { s.Errorf("sync piece info failed and send piece result with error: %s", sendError) - go s.peerTaskConductor.cancel(base.Code_SchedError, sendError.Error()) + go s.peerTaskConductor.cancel(commonv1.Code_SchedError, sendError.Error()) } else { s.Debugf("report sync piece error to scheduler") } @@ -438,10 +439,10 @@ func (s *synchronizerWatchdog) watch(timeout time.Duration) { func (s *synchronizerWatchdog) reportWatchFailed() { sendError := s.peerTaskConductor.sendPieceResult(compositePieceResult( - s.peerTaskConductor, s.mainPeer.Load().(*scheduler.PeerPacket_DestPeer), base.Code_ClientPieceRequestFail)) + s.peerTaskConductor, s.mainPeer.Load().(*schedulerv1.PeerPacket_DestPeer), commonv1.Code_ClientPieceRequestFail)) if sendError != nil { s.peerTaskConductor.Errorf("watchdog sync piece info failed and send piece result with error: %s", sendError) - go s.peerTaskConductor.cancel(base.Code_SchedError, sendError.Error()) + go s.peerTaskConductor.cancel(commonv1.Code_SchedError, sendError.Error()) } else { s.peerTaskConductor.Debugf("report watchdog sync piece error to scheduler") } diff --git a/client/daemon/peer/peertask_piecetask_synchronizer_test.go b/client/daemon/peer/peertask_piecetask_synchronizer_test.go index 0a8beb2de..aa7fc6c5c 100644 --- a/client/daemon/peer/peertask_piecetask_synchronizer_test.go +++ b/client/daemon/peer/peertask_piecetask_synchronizer_test.go @@ -25,9 +25,10 @@ import ( testifyassert "github.com/stretchr/testify/assert" "go.uber.org/atomic" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/api/pkg/apis/scheduler/v1/mocks" + logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks" ) func Test_watchdog(t *testing.T) { @@ -53,7 +54,7 @@ func Test_watchdog(t *testing.T) { for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { - peer := &scheduler.PeerPacket_DestPeer{} + peer := &schedulerv1.PeerPacket_DestPeer{} pps := mocks.NewMockScheduler_ReportPieceResultClient(ctrl) watchdog := &synchronizerWatchdog{ done: make(chan struct{}), @@ -71,7 +72,7 @@ func Test_watchdog(t *testing.T) { if tt.ok { watchdog.peerTaskConductor.readyPieces.Set(0) } else { - pps.EXPECT().Send(gomock.Any()).DoAndReturn(func(pr *scheduler.PieceResult) error { + pps.EXPECT().Send(gomock.Any()).DoAndReturn(func(pr *schedulerv1.PieceResult) error { assert.Equal(peer.PeerId, pr.DstPid) return nil }) diff --git a/client/daemon/peer/peertask_reuse.go b/client/daemon/peer/peertask_reuse.go index 5b27b1af1..0ad1ba584 100644 --- a/client/daemon/peer/peertask_reuse.go +++ b/client/daemon/peer/peertask_reuse.go @@ -27,12 +27,13 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.7.0" "go.opentelemetry.io/otel/trace" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/util" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" ) var _ *logger.SugaredLoggerOnWith // pin this package for no log code generation @@ -141,7 +142,7 @@ func (ptm *peerTaskManager) tryReuseFilePeerTask(ctx context.Context, pg := &FileTaskProgress{ State: &ProgressState{ Success: true, - Code: base.Code_Success, + Code: commonv1.Code_Success, Msg: "Success", }, TaskID: taskID, diff --git a/client/daemon/peer/peertask_reuse_test.go b/client/daemon/peer/peertask_reuse_test.go index 79f119d30..2c52a5f26 100644 --- a/client/daemon/peer/peertask_reuse_test.go +++ b/client/daemon/peer/peertask_reuse_test.go @@ -29,12 +29,13 @@ import ( "github.com/golang/mock/gomock" testifyassert "github.com/stretchr/testify/assert" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage/mocks" "d7y.io/dragonfly/v2/client/daemon/test" "d7y.io/dragonfly/v2/client/util" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) func TestReuseFilePeerTask(t *testing.T) { @@ -56,10 +57,10 @@ func TestReuseFilePeerTask(t *testing.T) { { name: "normal completed task found", request: &FileTaskRequest{ - PeerTaskRequest: scheduler.PeerTaskRequest{ + PeerTaskRequest: schedulerv1.PeerTaskRequest{ PeerId: "", Url: "http://example.com/1", - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -100,10 +101,10 @@ func TestReuseFilePeerTask(t *testing.T) { { name: "normal completed task not found", request: &FileTaskRequest{ - PeerTaskRequest: scheduler.PeerTaskRequest{ + PeerTaskRequest: schedulerv1.PeerTaskRequest{ PeerId: "", Url: "http://example.com/1", - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -137,10 +138,10 @@ func TestReuseFilePeerTask(t *testing.T) { { name: "normal completed subtask found", request: &FileTaskRequest{ - PeerTaskRequest: scheduler.PeerTaskRequest{ + PeerTaskRequest: schedulerv1.PeerTaskRequest{ PeerId: "", Url: "http://example.com/1", - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -181,10 +182,10 @@ func TestReuseFilePeerTask(t *testing.T) { { name: "normal completed subtask not found", request: &FileTaskRequest{ - PeerTaskRequest: scheduler.PeerTaskRequest{ + PeerTaskRequest: schedulerv1.PeerTaskRequest{ PeerId: "", Url: "http://example.com/1", - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -214,10 +215,10 @@ func TestReuseFilePeerTask(t *testing.T) { { name: "partial task found", request: &FileTaskRequest{ - PeerTaskRequest: scheduler.PeerTaskRequest{ + PeerTaskRequest: schedulerv1.PeerTaskRequest{ PeerId: "", Url: "http://example.com/1", - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -263,10 +264,10 @@ func TestReuseFilePeerTask(t *testing.T) { { name: "partial task found - out of range", request: &FileTaskRequest{ - PeerTaskRequest: scheduler.PeerTaskRequest{ + PeerTaskRequest: schedulerv1.PeerTaskRequest{ PeerId: "", Url: "http://example.com/1", - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -317,7 +318,7 @@ func TestReuseFilePeerTask(t *testing.T) { sm := mocks.NewMockManager(ctrl) tc.storageManager(sm) ptm := &peerTaskManager{ - host: &scheduler.PeerHost{}, + host: &schedulerv1.PeerHost{}, enablePrefetch: tc.enablePrefetch, storageManager: sm, } @@ -344,7 +345,7 @@ func TestReuseStreamPeerTask(t *testing.T) { name: "normal completed task found", request: &StreamTaskRequest{ URL: "http://example.com/1", - URLMeta: &base.UrlMeta{ + URLMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -376,8 +377,8 @@ func TestReuseStreamPeerTask(t *testing.T) { }) sm.EXPECT().GetExtendAttribute(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { - return &base.ExtendAttribute{ + func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) { + return &commonv1.ExtendAttribute{ Header: map[string]string{ "Test": "test", }, @@ -397,7 +398,7 @@ func TestReuseStreamPeerTask(t *testing.T) { name: "normal completed task not found", request: &StreamTaskRequest{ URL: "http://example.com/1", - URLMeta: &base.UrlMeta{ + URLMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -432,7 +433,7 @@ func TestReuseStreamPeerTask(t *testing.T) { name: "normal completed subtask found", request: &StreamTaskRequest{ URL: "http://example.com/1", - URLMeta: &base.UrlMeta{ + URLMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -464,8 +465,8 @@ func TestReuseStreamPeerTask(t *testing.T) { }) sm.EXPECT().GetExtendAttribute(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { - return &base.ExtendAttribute{ + func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) { + return &commonv1.ExtendAttribute{ Header: map[string]string{ "Test": "test", }, @@ -484,7 +485,7 @@ func TestReuseStreamPeerTask(t *testing.T) { name: "normal completed subtask not found", request: &StreamTaskRequest{ URL: "http://example.com/1", - URLMeta: &base.UrlMeta{ + URLMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -515,7 +516,7 @@ func TestReuseStreamPeerTask(t *testing.T) { name: "partial task found", request: &StreamTaskRequest{ URL: "http://example.com/1", - URLMeta: &base.UrlMeta{ + URLMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -551,8 +552,8 @@ func TestReuseStreamPeerTask(t *testing.T) { }) sm.EXPECT().GetExtendAttribute(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { - return &base.ExtendAttribute{ + func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) { + return &commonv1.ExtendAttribute{ Header: map[string]string{ "Test": "test", }, @@ -571,7 +572,7 @@ func TestReuseStreamPeerTask(t *testing.T) { name: "partial task found - 2", request: &StreamTaskRequest{ URL: "http://example.com/1", - URLMeta: &base.UrlMeta{ + URLMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -607,8 +608,8 @@ func TestReuseStreamPeerTask(t *testing.T) { }) sm.EXPECT().GetExtendAttribute(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { - return &base.ExtendAttribute{ + func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) { + return &commonv1.ExtendAttribute{ Header: map[string]string{ "Test": "test", }, @@ -631,7 +632,7 @@ func TestReuseStreamPeerTask(t *testing.T) { name: "partial task found - out of range", request: &StreamTaskRequest{ URL: "http://example.com/1", - URLMeta: &base.UrlMeta{ + URLMeta: &commonv1.UrlMeta{ Digest: "", Tag: "", Range: "", @@ -667,8 +668,8 @@ func TestReuseStreamPeerTask(t *testing.T) { }) sm.EXPECT().GetExtendAttribute(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { - return &base.ExtendAttribute{ + func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) { + return &commonv1.ExtendAttribute{ Header: map[string]string{ "Test": "test", }, @@ -694,7 +695,7 @@ func TestReuseStreamPeerTask(t *testing.T) { sm := mocks.NewMockManager(ctrl) tc.storageManager(sm) ptm := &peerTaskManager{ - host: &scheduler.PeerHost{}, + host: &schedulerv1.PeerHost{}, enablePrefetch: tc.enablePrefetch, storageManager: sm, } diff --git a/client/daemon/peer/peertask_seed.go b/client/daemon/peer/peertask_seed.go index e66430bea..95d5fbc8d 100644 --- a/client/daemon/peer/peertask_seed.go +++ b/client/daemon/peer/peertask_seed.go @@ -22,14 +22,15 @@ import ( "go.opentelemetry.io/otel/trace" "golang.org/x/time/rate" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) type SeedTaskRequest struct { - scheduler.PeerTaskRequest + schedulerv1.PeerTaskRequest Limit float64 Callsystem string Range *util.Range diff --git a/client/daemon/peer/peertask_stream.go b/client/daemon/peer/peertask_stream.go index cb5ced209..2c779cd11 100644 --- a/client/daemon/peer/peertask_stream.go +++ b/client/daemon/peer/peertask_stream.go @@ -25,27 +25,28 @@ import ( "go.opentelemetry.io/otel/trace" "golang.org/x/time/rate" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/util" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) type StreamTaskRequest struct { // universal resource locator for different kind of storage URL string // url meta info - URLMeta *base.UrlMeta + URLMeta *commonv1.UrlMeta // http range Range *util.Range // peer's id and must be global uniqueness PeerID string // Pattern to register to scheduler - Pattern base.Pattern + Pattern commonv1.Pattern } // StreamTask represents a peer task with stream io for reading directly without once more disk io @@ -66,7 +67,7 @@ type streamTask struct { func (ptm *peerTaskManager) newStreamTask( ctx context.Context, - request *scheduler.PeerTaskRequest, + request *schedulerv1.PeerTaskRequest, rg *util.Range) (*streamTask, error) { metrics.StreamTaskCount.Add(1) var limit = rate.Inf diff --git a/client/daemon/peer/peertask_stream_backsource_partial_test.go b/client/daemon/peer/peertask_stream_backsource_partial_test.go index d11fd8996..f5f64d70b 100644 --- a/client/daemon/peer/peertask_stream_backsource_partial_test.go +++ b/client/daemon/peer/peertask_stream_backsource_partial_test.go @@ -38,6 +38,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + schedulerv1mocks "d7y.io/api/pkg/apis/scheduler/v1/mocks" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/test" @@ -46,14 +51,10 @@ import ( "d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/rpc" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" daemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" servermocks "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server/mocks" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" - mock_scheduler_client "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client/mocks" - mock_scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks" + clientmocks "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client/mocks" "d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol" sourcemocks "d7y.io/dragonfly/v2/pkg/source/mocks" @@ -74,12 +75,12 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte, piecesMd5 = append(piecesMd5, digest.MD5FromBytes(testBytes[int(i)*int(opt.pieceSize):int(i+1)*int(opt.pieceSize)])) } } - daemon.EXPECT().GetPieceTasks(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { - var tasks []*base.PieceInfo + daemon.EXPECT().GetPieceTasks(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { + var tasks []*commonv1.PieceInfo // only return first piece if request.StartNum == 0 { tasks = append(tasks, - &base.PieceInfo{ + &commonv1.PieceInfo{ PieceNum: int32(request.StartNum), RangeStart: uint64(0), RangeSize: opt.pieceSize, @@ -88,7 +89,7 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte, PieceStyle: 0, }) } - return &base.PiecePacket{ + return &commonv1.PiecePacket{ PieceMd5Sign: digest.SHA256FromStrings(piecesMd5...), TaskId: request.TaskId, DstPid: "peer-x", @@ -97,7 +98,7 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte, TotalPiece: pieceCount, }, nil }) - daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(arg0 dfdaemon.Daemon_SyncPieceTasksServer) error { + daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(arg0 dfdaemonv1.Daemon_SyncPieceTasksServer) error { return status.Error(codes.Unimplemented, "TODO") }) ln, _ := rpc.Listen(dfnet.NetAddr{ @@ -112,7 +113,7 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte, time.Sleep(100 * time.Millisecond) // 2. setup a scheduler - pps := mock_scheduler.NewMockScheduler_ReportPieceResultClient(ctrl) + pps := schedulerv1mocks.NewMockScheduler_ReportPieceResultClient(ctrl) var ( wg = sync.WaitGroup{} backSourceSent = atomic.Bool{} @@ -120,7 +121,7 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte, wg.Add(1) pps.EXPECT().Send(gomock.Any()).AnyTimes().DoAndReturn( - func(pr *scheduler.PieceResult) error { + func(pr *schedulerv1.PieceResult) error { if pr.PieceInfo.PieceNum == 0 && pr.Success { if !backSourceSent.Load() { wg.Done() @@ -134,7 +135,7 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte, schedPeerPacket bool ) pps.EXPECT().Recv().AnyTimes().DoAndReturn( - func() (*scheduler.PeerPacket, error) { + func() (*schedulerv1.PeerPacket, error) { if len(opt.peerPacketDelay) > delayCount { if delay := opt.peerPacketDelay[delayCount]; delay > 0 { time.Sleep(delay) @@ -144,15 +145,15 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte, if schedPeerPacket { // send back source after piece 0 is done wg.Wait() - return nil, dferrors.New(base.Code_SchedNeedBackSource, "") + return nil, dferrors.New(commonv1.Code_SchedNeedBackSource, "") } schedPeerPacket = true - return &scheduler.PeerPacket{ - Code: base.Code_Success, + return &schedulerv1.PeerPacket{ + Code: commonv1.Code_Success, TaskId: opt.taskID, SrcPid: "127.0.0.1", ParallelCount: opt.pieceParallelCount, - MainPeer: &scheduler.PeerPacket_DestPeer{ + MainPeer: &schedulerv1.PeerPacket_DestPeer{ Ip: "127.0.0.1", RpcPort: port, PeerId: "peer-x", @@ -161,21 +162,21 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte, }, nil }) pps.EXPECT().CloseSend().AnyTimes() - sched := mock_scheduler_client.NewMockClient(ctrl) + sched := clientmocks.NewMockClient(ctrl) sched.EXPECT().RegisterPeerTask(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, ptr *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (*scheduler.RegisterResult, error) { - return &scheduler.RegisterResult{ + func(ctx context.Context, ptr *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) (*schedulerv1.RegisterResult, error) { + return &schedulerv1.RegisterResult{ TaskId: opt.taskID, - SizeScope: base.SizeScope_NORMAL, + SizeScope: commonv1.SizeScope_NORMAL, DirectPiece: nil, }, nil }) sched.EXPECT().ReportPieceResult(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, ptr *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) { + func(ctx context.Context, ptr *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) (schedulerv1.Scheduler_ReportPieceResultClient, error) { return pps, nil }) sched.EXPECT().ReportPeerResult(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, pr *scheduler.PeerResult, opts ...grpc.CallOption) error { + func(ctx context.Context, pr *schedulerv1.PeerResult, opts ...grpc.CallOption) error { return nil }) tempDir, _ := os.MkdirTemp("", "d7y-test-*") @@ -250,7 +251,7 @@ func TestStreamPeerTask_BackSource_Partial_WithContentLength(t *testing.T) { } ptm := &peerTaskManager{ calculateDigest: true, - host: &scheduler.PeerHost{ + host: &schedulerv1.PeerHost{ Ip: "127.0.0.1", }, conductorLock: &sync.Mutex{}, @@ -262,13 +263,13 @@ func TestStreamPeerTask_BackSource_Partial_WithContentLength(t *testing.T) { ScheduleTimeout: util.Duration{Duration: 10 * time.Minute}, }, } - req := &scheduler.PeerTaskRequest{ + req := &schedulerv1.PeerTaskRequest{ Url: url, - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Tag: "d7y-test", }, PeerId: peerID, - PeerHost: &scheduler.PeerHost{}, + PeerHost: &schedulerv1.PeerHost{}, } ctx := context.Background() pt, err := ptm.newStreamTask(ctx, req, nil) diff --git a/client/daemon/peer/piece_downloader.go b/client/daemon/peer/piece_downloader.go index 0a7ffb418..a50ef004a 100644 --- a/client/daemon/peer/piece_downloader.go +++ b/client/daemon/peer/piece_downloader.go @@ -29,15 +29,16 @@ import ( "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/client/daemon/storage" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/digest" - "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/source" ) type DownloadPieceRequest struct { - piece *base.PieceInfo + piece *commonv1.PieceInfo log *logger.SugaredLoggerOnWith storage storage.TaskStorageDriver TaskID string diff --git a/client/daemon/peer/piece_downloader_test.go b/client/daemon/peer/piece_downloader_test.go index 5d5caf001..d784cc45f 100644 --- a/client/daemon/peer/piece_downloader_test.go +++ b/client/daemon/peer/piece_downloader_test.go @@ -34,10 +34,11 @@ import ( testifyassert "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/client/daemon/test" "d7y.io/dragonfly/v2/client/util" logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol" ) @@ -138,13 +139,13 @@ func TestPieceDownloader_DownloadPiece(t *testing.T) { DstPid: "", DstAddr: addr.Host, CalcDigest: true, - piece: &base.PieceInfo{ + piece: &commonv1.PieceInfo{ PieceNum: 0, RangeStart: tt.rangeStart, RangeSize: tt.rangeSize, PieceMd5: digest, PieceOffset: tt.rangeStart, - PieceStyle: base.PieceStyle_PLAIN, + PieceStyle: commonv1.PieceStyle_PLAIN, }, log: logger.With("test", "test"), }) diff --git a/client/daemon/peer/piece_manager.go b/client/daemon/peer/piece_manager.go index 1ff48597c..06dc825ab 100644 --- a/client/daemon/peer/piece_manager.go +++ b/client/daemon/peer/piece_manager.go @@ -34,6 +34,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + errordetailsv1 "d7y.io/api/pkg/apis/errordetails/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/storage" clientutil "d7y.io/dragonfly/v2/client/util" @@ -41,17 +46,13 @@ import ( "d7y.io/dragonfly/v2/internal/util" "d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/retry" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" - "d7y.io/dragonfly/v2/pkg/rpc/errordetails" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" "d7y.io/dragonfly/v2/pkg/source" ) type PieceManager interface { - DownloadSource(ctx context.Context, pt Task, request *scheduler.PeerTaskRequest, parsedRange *clientutil.Range) error + DownloadSource(ctx context.Context, pt Task, request *schedulerv1.PeerTaskRequest, parsedRange *clientutil.Range) error DownloadPiece(ctx context.Context, request *DownloadPieceRequest) (*DownloadPieceResult, error) - ImportFile(ctx context.Context, ptm storage.PeerTaskMetadata, tsd storage.TaskStorageDriver, req *dfdaemon.ImportTaskRequest) error + ImportFile(ctx context.Context, ptm storage.PeerTaskMetadata, tsd storage.TaskStorageDriver, req *dfdaemonv1.ImportTaskRequest) error Import(ctx context.Context, ptm storage.PeerTaskMetadata, tsd storage.TaskStorageDriver, contentLength int64, reader io.Reader) error } @@ -271,9 +272,9 @@ func (pm *pieceManager) processPieceFromSource(pt Task, return } -func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, peerTaskRequest *scheduler.PeerTaskRequest, parsedRange *clientutil.Range) error { +func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, peerTaskRequest *schedulerv1.PeerTaskRequest, parsedRange *clientutil.Range) error { if peerTaskRequest.UrlMeta == nil { - peerTaskRequest.UrlMeta = &base.UrlMeta{ + peerTaskRequest.UrlMeta = &commonv1.UrlMeta{ Header: map[string]string{}, } } else if peerTaskRequest.UrlMeta.Header == nil { @@ -362,9 +363,9 @@ singleDownload: hdr[k] = response.Header.Get(k) } } - srcErr := &errordetails.SourceError{ + srcErr := &errordetailsv1.SourceError{ Temporary: response.Temporary != nil && response.Temporary(), - Metadata: &base.ExtendAttribute{ + Metadata: &commonv1.ExtendAttribute{ Header: hdr, StatusCode: int32(response.StatusCode), Status: response.Status, @@ -422,7 +423,7 @@ singleDownload: return pm.downloadKnownLengthSource(ctx, pt, contentLength, pieceSize, reader, response, peerTaskRequest, parsedRange, metadata, supportConcurrent, targetContentLength) } -func (pm *pieceManager) downloadKnownLengthSource(ctx context.Context, pt Task, contentLength int64, pieceSize uint32, reader io.Reader, response *source.Response, peerTaskRequest *scheduler.PeerTaskRequest, parsedRange *clientutil.Range, metadata *source.Metadata, supportConcurrent bool, targetContentLength int64) error { +func (pm *pieceManager) downloadKnownLengthSource(ctx context.Context, pt Task, contentLength int64, pieceSize uint32, reader io.Reader, response *source.Response, peerTaskRequest *schedulerv1.PeerTaskRequest, parsedRange *clientutil.Range, metadata *source.Metadata, supportConcurrent bool, targetContentLength int64) error { log := pt.Log() maxPieceNum := util.ComputePieceCount(contentLength, pieceSize) pt.SetContentLength(contentLength) @@ -445,7 +446,7 @@ func (pm *pieceManager) downloadKnownLengthSource(ctx context.Context, pt Task, request := &DownloadPieceRequest{ TaskID: pt.GetTaskID(), PeerID: pt.GetPeerID(), - piece: &base.PieceInfo{ + piece: &commonv1.PieceInfo{ PieceNum: pieceNum, RangeStart: offset, RangeSize: uint32(result.Size), @@ -525,7 +526,7 @@ func (pm *pieceManager) downloadUnknownLengthSource(pt Task, pieceSize uint32, r request := &DownloadPieceRequest{ TaskID: pt.GetTaskID(), PeerID: pt.GetPeerID(), - piece: &base.PieceInfo{ + piece: &commonv1.PieceInfo{ PieceNum: pieceNum, RangeStart: offset, RangeSize: uint32(result.Size), @@ -617,7 +618,7 @@ func (pm *pieceManager) processPieceFromFile(ctx context.Context, ptm storage.Pe return n, nil } -func (pm *pieceManager) ImportFile(ctx context.Context, ptm storage.PeerTaskMetadata, tsd storage.TaskStorageDriver, req *dfdaemon.ImportTaskRequest) error { +func (pm *pieceManager) ImportFile(ctx context.Context, ptm storage.PeerTaskMetadata, tsd storage.TaskStorageDriver, req *dfdaemonv1.ImportTaskRequest) error { log := logger.With("function", "ImportFile", "URL", req.Url, "taskID", ptm.TaskID) // get file size and compute piece size and piece count stat, err := os.Stat(req.Path) @@ -747,7 +748,7 @@ func (pm *pieceManager) Import(ctx context.Context, ptm storage.PeerTaskMetadata return nil } -func (pm *pieceManager) concurrentDownloadSource(ctx context.Context, pt Task, peerTaskRequest *scheduler.PeerTaskRequest, parsedRange *clientutil.Range, metadata *source.Metadata, startPieceNum int32) error { +func (pm *pieceManager) concurrentDownloadSource(ctx context.Context, pt Task, peerTaskRequest *schedulerv1.PeerTaskRequest, parsedRange *clientutil.Range, metadata *source.Metadata, startPieceNum int32) error { // parsedRange is always exist pieceSize := pm.computePieceSize(parsedRange.Length) pieceCount := util.ComputePieceCount(parsedRange.Length, pieceSize) @@ -836,7 +837,7 @@ func (pm *pieceManager) concurrentDownloadSource(ctx context.Context, pt Task, p func (pm *pieceManager) downloadPieceFromSource(ctx context.Context, pt Task, log *logger.SugaredLoggerOnWith, - peerTaskRequest *scheduler.PeerTaskRequest, + peerTaskRequest *schedulerv1.PeerTaskRequest, pieceSize uint32, num int32, parsedRange *clientutil.Range, pieceCount int32, @@ -881,7 +882,7 @@ func (pm *pieceManager) downloadPieceFromSource(ctx context.Context, request := &DownloadPieceRequest{ TaskID: pt.GetTaskID(), PeerID: pt.GetPeerID(), - piece: &base.PieceInfo{ + piece: &commonv1.PieceInfo{ PieceNum: num, RangeStart: offset, RangeSize: uint32(result.Size), diff --git a/client/daemon/peer/piece_manager_test.go b/client/daemon/peer/piece_manager_test.go index d33dc40df..8f5a4c3e3 100644 --- a/client/daemon/peer/piece_manager_test.go +++ b/client/daemon/peer/piece_manager_test.go @@ -36,15 +36,16 @@ import ( "go.uber.org/atomic" "golang.org/x/time/rate" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/test" clientutil "d7y.io/dragonfly/v2/client/util" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/internal/util" - "d7y.io/dragonfly/v2/pkg/rpc/base" _ "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" "d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol" ) @@ -443,9 +444,9 @@ func TestPieceManager_DownloadSource(t *testing.T) { return tc.pieceSize } - request := &scheduler.PeerTaskRequest{ + request := &schedulerv1.PeerTaskRequest{ Url: ts.URL, - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Digest: "", Range: "", Header: nil, diff --git a/client/daemon/proxy/proxy.go b/client/daemon/proxy/proxy.go index d008faae2..5a1d474e5 100644 --- a/client/daemon/proxy/proxy.go +++ b/client/daemon/proxy/proxy.go @@ -39,13 +39,14 @@ import ( "go.uber.org/atomic" "golang.org/x/sync/semaphore" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/transport" logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" pkgstrings "d7y.io/dragonfly/v2/pkg/strings" ) @@ -85,7 +86,7 @@ type Proxy struct { peerTaskManager peer.TaskManager // peerHost is the peer host info - peerHost *scheduler.PeerHost + peerHost *schedulerv1.PeerHost // whiteList is the proxy white list whiteList []*config.WhiteList @@ -100,7 +101,7 @@ type Proxy struct { defaultTag string // defaultFilter is used for registering steam task - defaultPattern base.Pattern + defaultPattern commonv1.Pattern // tracer is used for telemetry tracer trace.Tracer @@ -116,8 +117,8 @@ type Proxy struct { // Option is a functional option for configuring the proxy type Option func(p *Proxy) *Proxy -// WithPeerHost sets the *scheduler.PeerHost -func WithPeerHost(peerHost *scheduler.PeerHost) Option { +// WithPeerHost sets the *schedulerv1.PeerHost +func WithPeerHost(peerHost *schedulerv1.PeerHost) Option { return func(p *Proxy) *Proxy { p.peerHost = peerHost return p @@ -226,7 +227,7 @@ func WithDefaultTag(t string) Option { } // WithDefaultPattern sets default pattern for downloading -func WithDefaultPattern(pattern base.Pattern) Option { +func WithDefaultPattern(pattern commonv1.Pattern) Option { return func(p *Proxy) *Proxy { p.defaultPattern = pattern return p diff --git a/client/daemon/proxy/proxy_manager.go b/client/daemon/proxy/proxy_manager.go index ea4f030f0..4528a3f51 100644 --- a/client/daemon/proxy/proxy_manager.go +++ b/client/daemon/proxy/proxy_manager.go @@ -32,11 +32,12 @@ import ( "github.com/spf13/viper" "gopkg.in/yaml.v3" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/peer" logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) type Manager interface { @@ -59,7 +60,7 @@ type proxyManager struct { var _ Manager = (*proxyManager)(nil) -func NewProxyManager(peerHost *scheduler.PeerHost, peerTaskManager peer.TaskManager, defaultPattern base.Pattern, proxyOption *config.ProxyOption) (Manager, error) { +func NewProxyManager(peerHost *schedulerv1.PeerHost, peerTaskManager peer.TaskManager, defaultPattern commonv1.Pattern, proxyOption *config.ProxyOption) (Manager, error) { // proxy is option, when nil, just disable it if proxyOption == nil { logger.Infof("proxy config is empty, disabled") diff --git a/client/daemon/rpcserver/rpcserver.go b/client/daemon/rpcserver/rpcserver.go index c5b092650..bdf728f10 100644 --- a/client/daemon/rpcserver/rpcserver.go +++ b/client/daemon/rpcserver/rpcserver.go @@ -35,6 +35,11 @@ import ( healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/status" + cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/storage" @@ -43,11 +48,7 @@ import ( logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/net/http" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" - dfdaemongrpc "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" dfdaemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" "d7y.io/dragonfly/v2/pkg/safe" "d7y.io/dragonfly/v2/scheduler/resource" ) @@ -61,18 +62,18 @@ type Server interface { type server struct { util.KeepAlive - peerHost *scheduler.PeerHost + peerHost *schedulerv1.PeerHost peerTaskManager peer.TaskManager storageManager storage.Manager - defaultPattern base.Pattern + defaultPattern commonv1.Pattern downloadServer *grpc.Server peerServer *grpc.Server uploadAddr string } -func New(peerHost *scheduler.PeerHost, peerTaskManager peer.TaskManager, - storageManager storage.Manager, defaultPattern base.Pattern, +func New(peerHost *schedulerv1.PeerHost, peerTaskManager peer.TaskManager, + storageManager storage.Manager, defaultPattern commonv1.Pattern, downloadOpts []grpc.ServerOption, peerOpts []grpc.ServerOption) (Server, error) { s := &server{ KeepAlive: util.NewKeepAlive("rpc server"), @@ -92,7 +93,7 @@ func New(peerHost *scheduler.PeerHost, peerTaskManager peer.TaskManager, s.peerServer = dfdaemonserver.New(s, peerOpts...) healthpb.RegisterHealthServer(s.peerServer, health.NewServer()) - cdnsystem.RegisterSeederServer(s.peerServer, sd) + cdnsystemv1.RegisterSeederServer(s.peerServer, sd) return s, nil } @@ -110,13 +111,13 @@ func (s *server) Stop() { s.downloadServer.GracefulStop() } -func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { +func (s *server) GetPieceTasks(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { s.Keep() p, err := s.storageManager.GetPieces(ctx, request) if err != nil { - code := base.Code_UnknownError + code := commonv1.Code_UnknownError if err == dferrors.ErrInvalidArgument { - code = base.Code_BadRequest + code = commonv1.Code_BadRequest } if err != storage.ErrTaskNotFound { logger.Errorf("get piece tasks error: %s, task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d", @@ -126,7 +127,7 @@ func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskReque // dst peer is not running task, ok := s.peerTaskManager.IsPeerTaskRunning(request.TaskId) if !ok { - code = base.Code_PeerTaskNotFound + code = commonv1.Code_PeerTaskNotFound logger.Errorf("get piece tasks error: target peer task not found, task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d", request.TaskId, request.SrcPid, request.DstPid, request.StartNum, request.Limit) return nil, dferrors.New(code, err.Error()) @@ -134,7 +135,7 @@ func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskReque if task.GetPeerID() != request.GetDstPid() { // there is only one running task in same time, redirect request to running peer task - r := base.PieceTaskRequest{ + r := commonv1.PieceTaskRequest{ TaskId: request.TaskId, SrcPid: request.SrcPid, DstPid: task.GetPeerID(), // replace to running task peer id @@ -148,7 +149,7 @@ func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskReque p.DstAddr = s.uploadAddr return p, nil } - code = base.Code_PeerTaskNotFound + code = commonv1.Code_PeerTaskNotFound logger.Errorf("get piece tasks error: target peer task and replaced peer task storage not found wit error: %s, task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d", err, request.TaskId, request.SrcPid, request.DstPid, request.StartNum, request.Limit) return nil, dferrors.New(code, err.Error()) @@ -160,7 +161,7 @@ func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskReque "task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d", request.TaskId, request.SrcPid, request.DstPid, request.StartNum, request.Limit) // dst peer is running, send empty result, src peer will retry later - return &base.PiecePacket{ + return &commonv1.PiecePacket{ TaskId: request.TaskId, DstPid: request.DstPid, DstAddr: s.uploadAddr, @@ -180,9 +181,9 @@ func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskReque // sendExistPieces will send as much as possible pieces func (s *server) sendExistPieces( log *logger.SugaredLoggerOnWith, - request *base.PieceTaskRequest, - sync dfdaemongrpc.Daemon_SyncPieceTasksServer, - get func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error), + request *commonv1.PieceTaskRequest, + sync dfdaemonv1.Daemon_SyncPieceTasksServer, + get func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error), sentMap map[int32]struct{}) (total int32, err error) { return sendExistPieces(sync.Context(), log, get, request, sync, sentMap, true) } @@ -190,14 +191,14 @@ func (s *server) sendExistPieces( // sendFirstPieceTasks will send as much as possible pieces, even if no available pieces func (s *server) sendFirstPieceTasks( log *logger.SugaredLoggerOnWith, - request *base.PieceTaskRequest, - sync dfdaemongrpc.Daemon_SyncPieceTasksServer, - get func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error), + request *commonv1.PieceTaskRequest, + sync dfdaemonv1.Daemon_SyncPieceTasksServer, + get func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error), sentMap map[int32]struct{}) (total int32, err error) { return sendExistPieces(sync.Context(), log, get, request, sync, sentMap, false) } -func (s *server) SyncPieceTasks(sync dfdaemongrpc.Daemon_SyncPieceTasksServer) error { +func (s *server) SyncPieceTasks(sync dfdaemonv1.Daemon_SyncPieceTasksServer) error { request, err := sync.Recv() if err != nil { logger.Errorf("receive first sync piece tasks request error: %s", err.Error()) @@ -212,7 +213,7 @@ func (s *server) SyncPieceTasks(sync dfdaemongrpc.Daemon_SyncPieceTasksServer) e attributeSent bool ) - getPieces := func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { + getPieces := func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { p, e := s.GetPieceTasks(ctx, request) if e != nil { return nil, e @@ -305,15 +306,15 @@ func (s *server) CheckHealth(context.Context) error { } func (s *server) Download(ctx context.Context, - req *dfdaemongrpc.DownRequest, results chan<- *dfdaemongrpc.DownResult) error { + req *dfdaemonv1.DownRequest, results chan<- *dfdaemonv1.DownResult) error { s.Keep() return s.doDownload(ctx, req, results, "") } -func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest, - results chan<- *dfdaemongrpc.DownResult, peerID string) error { +func (s *server) doDownload(ctx context.Context, req *dfdaemonv1.DownRequest, + results chan<- *dfdaemonv1.DownResult, peerID string) error { if req.UrlMeta == nil { - req.UrlMeta = &base.UrlMeta{} + req.UrlMeta = &commonv1.UrlMeta{} } // init peer task request, peer uses different peer id to generate every request @@ -322,7 +323,7 @@ func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest, peerID = idgen.PeerID(s.peerHost.Ip) } peerTask := &peer.FileTaskRequest{ - PeerTaskRequest: scheduler.PeerTaskRequest{ + PeerTaskRequest: schedulerv1.PeerTaskRequest{ Url: req.Url, UrlMeta: req.UrlMeta, PeerId: peerID, @@ -350,10 +351,10 @@ func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest, peerTaskProgress, tiny, err := s.peerTaskManager.StartFileTask(ctx, peerTask) if err != nil { - return dferrors.New(base.Code_UnknownError, fmt.Sprintf("%s", err)) + return dferrors.New(commonv1.Code_UnknownError, fmt.Sprintf("%s", err)) } if tiny != nil { - results <- &dfdaemongrpc.DownResult{ + results <- &dfdaemonv1.DownResult{ TaskId: tiny.TaskID, PeerId: tiny.PeerID, CompletedLength: uint64(len(tiny.Content)), @@ -375,13 +376,13 @@ func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest, if !ok { err = errors.New("progress closed unexpected") log.Errorf(err.Error()) - return dferrors.New(base.Code_UnknownError, err.Error()) + return dferrors.New(commonv1.Code_UnknownError, err.Error()) } if !p.State.Success { log.Errorf("task %s/%s failed: %d/%s", p.PeerID, p.TaskID, p.State.Code, p.State.Msg) return dferrors.New(p.State.Code, p.State.Msg) } - results <- &dfdaemongrpc.DownResult{ + results <- &dfdaemonv1.DownResult{ TaskId: p.TaskID, PeerId: p.PeerID, CompletedLength: uint64(p.CompletedLength), @@ -401,7 +402,7 @@ func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest, return nil } case <-ctx.Done(): - results <- &dfdaemongrpc.DownResult{ + results <- &dfdaemonv1.DownResult{ CompletedLength: 0, Done: true, } @@ -411,7 +412,7 @@ func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest, } } -func (s *server) StatTask(ctx context.Context, req *dfdaemongrpc.StatTaskRequest) error { +func (s *server) StatTask(ctx context.Context, req *dfdaemonv1.StatTaskRequest) error { s.Keep() taskID := idgen.TaskID(req.Url, req.UrlMeta) log := logger.With("function", "StatTask", "URL", req.Url, "Tag", req.UrlMeta.Tag, "taskID", taskID, "LocalOnly", req.LocalOnly) @@ -426,7 +427,7 @@ func (s *server) StatTask(ctx context.Context, req *dfdaemongrpc.StatTaskRequest if req.LocalOnly { msg := "task not found in local cache" log.Info(msg) - return dferrors.New(base.Code_PeerTaskNotFound, msg) + return dferrors.New(commonv1.Code_PeerTaskNotFound, msg) } // Check scheduler if other peers hold the task @@ -440,14 +441,14 @@ func (s *server) StatTask(ctx context.Context, req *dfdaemongrpc.StatTaskRequest } msg := fmt.Sprintf("task found but not available for download, state %s, has available peer %t", task.State, task.HasAvailablePeer) log.Info(msg) - return dferrors.New(base.Code_PeerTaskNotFound, msg) + return dferrors.New(commonv1.Code_PeerTaskNotFound, msg) } func (s *server) isTaskCompleted(taskID string) bool { return s.storageManager.FindCompletedTask(taskID) != nil } -func (s *server) ImportTask(ctx context.Context, req *dfdaemongrpc.ImportTaskRequest) error { +func (s *server) ImportTask(ctx context.Context, req *dfdaemonv1.ImportTaskRequest) error { s.Keep() peerID := idgen.PeerID(s.peerHost.Ip) taskID := idgen.TaskID(req.Url, req.UrlMeta) @@ -509,7 +510,7 @@ func (s *server) ImportTask(ctx context.Context, req *dfdaemongrpc.ImportTaskReq return nil } -func (s *server) ExportTask(ctx context.Context, req *dfdaemongrpc.ExportTaskRequest) error { +func (s *server) ExportTask(ctx context.Context, req *dfdaemonv1.ExportTaskRequest) error { s.Keep() taskID := idgen.TaskID(req.Url, req.UrlMeta) log := logger.With("function", "ExportTask", "URL", req.Url, "Tag", req.UrlMeta.Tag, "taskID", taskID, "destination", req.Output) @@ -521,7 +522,7 @@ func (s *server) ExportTask(ctx context.Context, req *dfdaemongrpc.ExportTaskReq if req.LocalOnly { msg := fmt.Sprintf("task not found in local storage") log.Info(msg) - return dferrors.New(base.Code_PeerTaskNotFound, msg) + return dferrors.New(commonv1.Code_PeerTaskNotFound, msg) } log.Info("task not found, try from peers") return s.exportFromPeers(ctx, log, req) @@ -534,7 +535,7 @@ func (s *server) ExportTask(ctx context.Context, req *dfdaemongrpc.ExportTaskReq return nil } -func (s *server) exportFromLocal(ctx context.Context, req *dfdaemongrpc.ExportTaskRequest, peerID string) error { +func (s *server) exportFromLocal(ctx context.Context, req *dfdaemonv1.ExportTaskRequest, peerID string) error { return s.storageManager.Store(ctx, &storage.StoreRequest{ CommonTaskRequest: storage.CommonTaskRequest{ PeerID: peerID, @@ -545,13 +546,13 @@ func (s *server) exportFromLocal(ctx context.Context, req *dfdaemongrpc.ExportTa }) } -func (s *server) exportFromPeers(ctx context.Context, log *logger.SugaredLoggerOnWith, req *dfdaemongrpc.ExportTaskRequest) error { +func (s *server) exportFromPeers(ctx context.Context, log *logger.SugaredLoggerOnWith, req *dfdaemonv1.ExportTaskRequest) error { peerID := idgen.PeerID(s.peerHost.Ip) taskID := idgen.TaskID(req.Url, req.UrlMeta) task, err := s.peerTaskManager.StatTask(ctx, taskID) if err != nil { - if dferrors.CheckError(err, base.Code_PeerTaskNotFound) { + if dferrors.CheckError(err, commonv1.Code_PeerTaskNotFound) { log.Info("task not found in P2P network") } else { msg := fmt.Sprintf("failed to StatTask from peers: %s", err) @@ -562,18 +563,18 @@ func (s *server) exportFromPeers(ctx context.Context, log *logger.SugaredLoggerO if task.State != resource.TaskStateSucceeded || !task.HasAvailablePeer { msg := fmt.Sprintf("task found but not available for download, state %s, has available peer %t", task.State, task.HasAvailablePeer) log.Info(msg) - return dferrors.New(base.Code_PeerTaskNotFound, msg) + return dferrors.New(commonv1.Code_PeerTaskNotFound, msg) } // Task exists in peers var ( start = time.Now() - drc = make(chan *dfdaemongrpc.DownResult, 1) + drc = make(chan *dfdaemonv1.DownResult, 1) errChan = make(chan error, 3) - result *dfdaemongrpc.DownResult + result *dfdaemonv1.DownResult downError error ) - downRequest := &dfdaemongrpc.DownRequest{ + downRequest := &dfdaemonv1.DownRequest{ Url: req.Url, Output: req.Output, Timeout: req.Timeout, @@ -609,7 +610,7 @@ func (s *server) exportFromPeers(ctx context.Context, log *logger.SugaredLoggerO return nil } -func call(ctx context.Context, peerID string, drc chan *dfdaemongrpc.DownResult, s *server, req *dfdaemongrpc.DownRequest, errChan chan error) { +func call(ctx context.Context, peerID string, drc chan *dfdaemonv1.DownResult, s *server, req *dfdaemonv1.DownRequest, errChan chan error) { err := safe.Call(func() { if err := s.doDownload(ctx, req, drc, peerID); err != nil { errChan <- err @@ -621,7 +622,7 @@ func call(ctx context.Context, peerID string, drc chan *dfdaemongrpc.DownResult, } } -func (s *server) DeleteTask(ctx context.Context, req *dfdaemongrpc.DeleteTaskRequest) error { +func (s *server) DeleteTask(ctx context.Context, req *dfdaemonv1.DeleteTaskRequest) error { s.Keep() taskID := idgen.TaskID(req.Url, req.UrlMeta) log := logger.With("function", "DeleteTask", "URL", req.Url, "Tag", req.UrlMeta.Tag, "taskID", taskID) diff --git a/client/daemon/rpcserver/rpcserver_test.go b/client/daemon/rpcserver/rpcserver_test.go index 1dc936907..532038aa7 100644 --- a/client/daemon/rpcserver/rpcserver_test.go +++ b/client/daemon/rpcserver/rpcserver_test.go @@ -30,6 +30,10 @@ import ( "github.com/phayes/freeport" testifyassert "github.com/stretchr/testify/assert" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage/mocks" @@ -37,11 +41,8 @@ import ( "d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/net/ip" - "d7y.io/dragonfly/v2/pkg/rpc/base" - dfdaemongrpc "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" dfclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" dfdaemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) func TestMain(m *testing.M) { @@ -77,17 +78,17 @@ func Test_ServeDownload(t *testing.T) { }) m := &server{ KeepAlive: util.NewKeepAlive("test"), - peerHost: &scheduler.PeerHost{}, + peerHost: &schedulerv1.PeerHost{}, peerTaskManager: mockPeerTaskManager, } m.downloadServer = dfdaemonserver.New(m) _, client := setupPeerServerAndClient(t, m, assert, m.ServeDownload) - request := &dfdaemongrpc.DownRequest{ + request := &dfdaemonv1.DownRequest{ Uuid: uuid.Generate().String(), Url: "http://localhost/test", Output: "./testdata/file1", DisableBackSource: false, - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Tag: "unit test", }, Pattern: "p2p", @@ -97,8 +98,8 @@ func Test_ServeDownload(t *testing.T) { assert.Nil(err, "client download grpc call should be ok") var ( - lastResult *dfdaemongrpc.DownResult - curResult *dfdaemongrpc.DownResult + lastResult *dfdaemonv1.DownResult + curResult *dfdaemonv1.DownResult ) for { curResult, err = down.Recv() @@ -119,22 +120,22 @@ func Test_ServePeer(t *testing.T) { var maxPieceNum uint32 = 10 mockStorageManger := mocks.NewMockManager(ctrl) - mockStorageManger.EXPECT().GetPieces(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(func(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { + mockStorageManger.EXPECT().GetPieces(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(func(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { var ( - pieces []*base.PieceInfo + pieces []*commonv1.PieceInfo pieceSize = uint32(1024) ) for i := req.StartNum; i < req.Limit+req.StartNum && i < maxPieceNum; i++ { - pieces = append(pieces, &base.PieceInfo{ + pieces = append(pieces, &commonv1.PieceInfo{ PieceNum: int32(i), RangeStart: uint64(i * pieceSize), RangeSize: pieceSize, PieceMd5: "", PieceOffset: uint64(i * pieceSize), - PieceStyle: base.PieceStyle_PLAIN, + PieceStyle: commonv1.PieceStyle_PLAIN, }) } - return &base.PiecePacket{ + return &commonv1.PiecePacket{ TaskId: "", DstPid: "", DstAddr: "", @@ -146,7 +147,7 @@ func Test_ServePeer(t *testing.T) { }) s := &server{ KeepAlive: util.NewKeepAlive("test"), - peerHost: &scheduler.PeerHost{}, + peerHost: &schedulerv1.PeerHost{}, storageManager: mockStorageManger, } s.peerServer = dfdaemonserver.New(s) @@ -154,12 +155,12 @@ func Test_ServePeer(t *testing.T) { defer s.peerServer.GracefulStop() var tests = []struct { - request *base.PieceTaskRequest + request *commonv1.PieceTaskRequest responsePieceSize int }{ { - request: &base.PieceTaskRequest{ - TaskId: idgen.TaskID("http://www.test.com", &base.UrlMeta{}), + request: &commonv1.PieceTaskRequest{ + TaskId: idgen.TaskID("http://www.test.com", &commonv1.UrlMeta{}), SrcPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4), StartNum: 0, @@ -169,8 +170,8 @@ func Test_ServePeer(t *testing.T) { responsePieceSize: 1, }, { - request: &base.PieceTaskRequest{ - TaskId: idgen.TaskID("http://www.test.com", &base.UrlMeta{}), + request: &commonv1.PieceTaskRequest{ + TaskId: idgen.TaskID("http://www.test.com", &commonv1.UrlMeta{}), SrcPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4), StartNum: 0, @@ -180,8 +181,8 @@ func Test_ServePeer(t *testing.T) { responsePieceSize: 4, }, { - request: &base.PieceTaskRequest{ - TaskId: idgen.TaskID("http://www.test.com", &base.UrlMeta{}), + request: &commonv1.PieceTaskRequest{ + TaskId: idgen.TaskID("http://www.test.com", &commonv1.UrlMeta{}), SrcPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4), StartNum: 8, @@ -191,8 +192,8 @@ func Test_ServePeer(t *testing.T) { responsePieceSize: 1, }, { - request: &base.PieceTaskRequest{ - TaskId: idgen.TaskID("http://www.test.com", &base.UrlMeta{}), + request: &commonv1.PieceTaskRequest{ + TaskId: idgen.TaskID("http://www.test.com", &commonv1.UrlMeta{}), SrcPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4), StartNum: 8, @@ -373,11 +374,11 @@ func Test_SyncPieceTasks(t *testing.T) { } var ( - totalPieces []*base.PieceInfo + totalPieces []*commonv1.PieceInfo lock sync.Mutex ) - var addedPieces = make(map[uint32]*base.PieceInfo) + var addedPieces = make(map[uint32]*commonv1.PieceInfo) for _, p := range tc.existPieces { if p.end == 0 { p.end = p.start @@ -386,12 +387,12 @@ func Test_SyncPieceTasks(t *testing.T) { if _, ok := addedPieces[uint32(i)]; ok { continue } - piece := &base.PieceInfo{ + piece := &commonv1.PieceInfo{ PieceNum: int32(i), RangeStart: uint64(i) * uint64(pieceSize), RangeSize: pieceSize, PieceOffset: uint64(i) * uint64(pieceSize), - PieceStyle: base.PieceStyle_PLAIN, + PieceStyle: commonv1.PieceStyle_PLAIN, } totalPieces = append(totalPieces, piece) addedPieces[uint32(i)] = piece @@ -400,8 +401,8 @@ func Test_SyncPieceTasks(t *testing.T) { mockStorageManger.EXPECT().GetPieces(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { - var pieces []*base.PieceInfo + func(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { + var pieces []*commonv1.PieceInfo lock.Lock() for i := req.StartNum; i < tc.totalPieces; i++ { if piece, ok := addedPieces[i]; ok { @@ -411,7 +412,7 @@ func Test_SyncPieceTasks(t *testing.T) { } } lock.Unlock() - return &base.PiecePacket{ + return &commonv1.PiecePacket{ TaskId: req.TaskId, DstPid: req.DstPid, DstAddr: "", @@ -423,8 +424,8 @@ func Test_SyncPieceTasks(t *testing.T) { }) mockStorageManger.EXPECT().GetExtendAttribute(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { - return &base.ExtendAttribute{ + func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) { + return &commonv1.ExtendAttribute{ Header: map[string]string{ "Test": "test", }, @@ -432,7 +433,7 @@ func Test_SyncPieceTasks(t *testing.T) { }) mockTaskManager := peer.NewMockTaskManager(ctrl) mockTaskManager.EXPECT().Subscribe(gomock.Any()).AnyTimes().DoAndReturn( - func(request *base.PieceTaskRequest) (*peer.SubscribeResponse, bool) { + func(request *commonv1.PieceTaskRequest) (*peer.SubscribeResponse, bool) { ch := make(chan *peer.PieceInfo) success := make(chan struct{}) fail := make(chan struct{}) @@ -447,12 +448,12 @@ func Test_SyncPieceTasks(t *testing.T) { if _, ok := addedPieces[uint32(j)]; ok { continue } - piece := &base.PieceInfo{ + piece := &commonv1.PieceInfo{ PieceNum: int32(j), RangeStart: uint64(j) * uint64(pieceSize), RangeSize: pieceSize, PieceOffset: uint64(j) * uint64(pieceSize), - PieceStyle: base.PieceStyle_PLAIN, + PieceStyle: commonv1.PieceStyle_PLAIN, } totalPieces = append(totalPieces, piece) addedPieces[uint32(j)] = piece @@ -483,7 +484,7 @@ func Test_SyncPieceTasks(t *testing.T) { s := &server{ KeepAlive: util.NewKeepAlive("test"), - peerHost: &scheduler.PeerHost{}, + peerHost: &schedulerv1.PeerHost{}, storageManager: mockStorageManger, peerTaskManager: mockTaskManager, } @@ -496,7 +497,7 @@ func Test_SyncPieceTasks(t *testing.T) { Type: dfnet.TCP, Addr: fmt.Sprintf("127.0.0.1:%d", port), }, - &base.PieceTaskRequest{ + &commonv1.PieceTaskRequest{ TaskId: tc.name, SrcPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4), @@ -515,7 +516,7 @@ func Test_SyncPieceTasks(t *testing.T) { } else { go func() { for _, n := range tc.requestPieces { - request := &base.PieceTaskRequest{ + request := &commonv1.PieceTaskRequest{ TaskId: tc.name, SrcPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4), diff --git a/client/daemon/rpcserver/seeder.go b/client/daemon/rpcserver/seeder.go index 3b01fc86c..7a18ded6c 100644 --- a/client/daemon/rpcserver/seeder.go +++ b/client/daemon/rpcserver/seeder.go @@ -25,6 +25,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/peer" @@ -32,36 +36,33 @@ import ( logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/net/http" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/base/common" - "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + "d7y.io/dragonfly/v2/pkg/rpc/common" ) type seeder struct { server *server } -func (s *seeder) GetPieceTasks(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { +func (s *seeder) GetPieceTasks(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { return s.server.GetPieceTasks(ctx, request) } -func (s *seeder) SyncPieceTasks(tasksServer cdnsystem.Seeder_SyncPieceTasksServer) error { +func (s *seeder) SyncPieceTasks(tasksServer cdnsystemv1.Seeder_SyncPieceTasksServer) error { return s.server.SyncPieceTasks(tasksServer) } -func (s *seeder) ObtainSeeds(seedRequest *cdnsystem.SeedRequest, seedsServer cdnsystem.Seeder_ObtainSeedsServer) error { +func (s *seeder) ObtainSeeds(seedRequest *cdnsystemv1.SeedRequest, seedsServer cdnsystemv1.Seeder_ObtainSeedsServer) error { metrics.SeedPeerConcurrentDownloadGauge.Inc() defer metrics.SeedPeerConcurrentDownloadGauge.Dec() metrics.SeedPeerDownloadCount.Add(1) s.server.Keep() if seedRequest.UrlMeta == nil { - seedRequest.UrlMeta = &base.UrlMeta{} + seedRequest.UrlMeta = &commonv1.UrlMeta{} } req := peer.SeedTaskRequest{ - PeerTaskRequest: scheduler.PeerTaskRequest{ + PeerTaskRequest: schedulerv1.PeerTaskRequest{ Url: seedRequest.Url, UrlMeta: seedRequest.UrlMeta, PeerId: idgen.SeedPeerID(s.server.peerHost.Ip), // when reuse peer task, peer id will be replaced. @@ -114,10 +115,10 @@ func (s *seeder) ObtainSeeds(seedRequest *cdnsystem.SeedRequest, seedsServer cdn log.Infof("start seed task") err = seedsServer.Send( - &cdnsystem.PieceSeed{ + &cdnsystemv1.PieceSeed{ PeerId: resp.PeerID, HostId: req.PeerHost.Id, - PieceInfo: &base.PieceInfo{ + PieceInfo: &commonv1.PieceInfo{ PieceNum: common.BeginOfPiece, }, Done: false, @@ -149,7 +150,7 @@ func (s *seeder) ObtainSeeds(seedRequest *cdnsystem.SeedRequest, seedsServer cdn type seedSynchronizer struct { *peer.SeedTaskResponse *logger.SugaredLoggerOnWith - seedsServer cdnsystem.Seeder_ObtainSeedsServer + seedsServer cdnsystemv1.Seeder_ObtainSeedsServer seedTaskRequest *peer.SeedTaskRequest startNanoSecond int64 attributeSent bool @@ -210,7 +211,7 @@ func (s *seedSynchronizer) sendPieceSeeds(reuse bool) (err error) { func (s *seedSynchronizer) sendRemindingPieceSeeds(desired int32, reuse bool) error { for { pp, err := s.Storage.GetPieces(s.Context, - &base.PieceTaskRequest{ + &commonv1.PieceTaskRequest{ TaskId: s.TaskID, StartNum: uint32(desired), Limit: 16, @@ -274,7 +275,7 @@ func (s *seedSynchronizer) sendOrderedPieceSeeds(desired, orderedNum int32, fini var contentLength int64 = -1 for ; cur <= orderedNum; cur++ { pp, err := s.Storage.GetPieces(s.Context, - &base.PieceTaskRequest{ + &commonv1.PieceTaskRequest{ TaskId: s.TaskID, StartNum: uint32(cur), Limit: 1, @@ -314,8 +315,8 @@ func (s *seedSynchronizer) sendOrderedPieceSeeds(desired, orderedNum int32, fini return contentLength, cur, nil } -func (s *seedSynchronizer) compositePieceSeed(pp *base.PiecePacket, piece *base.PieceInfo) cdnsystem.PieceSeed { - return cdnsystem.PieceSeed{ +func (s *seedSynchronizer) compositePieceSeed(pp *commonv1.PiecePacket, piece *commonv1.PieceInfo) cdnsystemv1.PieceSeed { + return cdnsystemv1.PieceSeed{ PeerId: s.seedTaskRequest.PeerId, HostId: s.seedTaskRequest.PeerHost.Id, PieceInfo: piece, diff --git a/client/daemon/rpcserver/seeder_test.go b/client/daemon/rpcserver/seeder_test.go index cfe6e086d..b1d9959af 100644 --- a/client/daemon/rpcserver/seeder_test.go +++ b/client/daemon/rpcserver/seeder_test.go @@ -30,18 +30,19 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace" + cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage/mocks" "d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/pkg/dfnet" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/base/common" - "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" - cdnclient "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" + "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" + "d7y.io/dragonfly/v2/pkg/rpc/common" dfdaemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) func Test_ObtainSeeds(t *testing.T) { @@ -196,11 +197,11 @@ func Test_ObtainSeeds(t *testing.T) { } var ( - totalPieces []*base.PieceInfo + totalPieces []*commonv1.PieceInfo lock sync.Mutex ) - var addedPieces = make(map[uint32]*base.PieceInfo) + var addedPieces = make(map[uint32]*commonv1.PieceInfo) for _, p := range tc.existPieces { if p.end == 0 { p.end = p.start @@ -209,12 +210,12 @@ func Test_ObtainSeeds(t *testing.T) { if _, ok := addedPieces[uint32(i)]; ok { continue } - piece := &base.PieceInfo{ + piece := &commonv1.PieceInfo{ PieceNum: int32(i), RangeStart: uint64(i) * uint64(pieceSize), RangeSize: pieceSize, PieceOffset: uint64(i) * uint64(pieceSize), - PieceStyle: base.PieceStyle_PLAIN, + PieceStyle: commonv1.PieceStyle_PLAIN, } totalPieces = append(totalPieces, piece) addedPieces[uint32(i)] = piece @@ -223,8 +224,8 @@ func Test_ObtainSeeds(t *testing.T) { mockStorageManger.EXPECT().GetPieces(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { - var pieces []*base.PieceInfo + func(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { + var pieces []*commonv1.PieceInfo lock.Lock() for i := req.StartNum; i < tc.totalPieces; i++ { if piece, ok := addedPieces[i]; ok { @@ -234,7 +235,7 @@ func Test_ObtainSeeds(t *testing.T) { } } lock.Unlock() - return &base.PiecePacket{ + return &commonv1.PiecePacket{ TaskId: req.TaskId, DstPid: req.DstPid, DstAddr: "", @@ -246,8 +247,8 @@ func Test_ObtainSeeds(t *testing.T) { }) mockStorageManger.EXPECT().GetExtendAttribute(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( - func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { - return &base.ExtendAttribute{ + func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) { + return &commonv1.ExtendAttribute{ Header: map[string]string{ "Test": "test", }, @@ -270,12 +271,12 @@ func Test_ObtainSeeds(t *testing.T) { if _, ok := addedPieces[uint32(j)]; ok { continue } - piece := &base.PieceInfo{ + piece := &commonv1.PieceInfo{ PieceNum: int32(j), RangeStart: uint64(j) * uint64(pieceSize), RangeSize: pieceSize, PieceOffset: uint64(j) * uint64(pieceSize), - PieceStyle: base.PieceStyle_PLAIN, + PieceStyle: commonv1.PieceStyle_PLAIN, } totalPieces = append(totalPieces, piece) addedPieces[uint32(j)] = piece @@ -313,7 +314,7 @@ func Test_ObtainSeeds(t *testing.T) { s := &server{ KeepAlive: util.NewKeepAlive("test"), - peerHost: &scheduler.PeerHost{}, + peerHost: &schedulerv1.PeerHost{}, storageManager: mockStorageManger, peerTaskManager: mockTaskManager, } @@ -323,7 +324,7 @@ func Test_ObtainSeeds(t *testing.T) { pps, err := client.ObtainSeeds( context.Background(), - &cdnsystem.SeedRequest{ + &cdnsystemv1.SeedRequest{ TaskId: "fake-task-id", Url: "http://localhost/path/to/file", UrlMeta: nil, @@ -361,9 +362,9 @@ func Test_ObtainSeeds(t *testing.T) { } } -func setupSeederServerAndClient(t *testing.T, srv *server, sd *seeder, assert *testifyassert.Assertions, serveFunc func(listener net.Listener) error) (int, cdnclient.CdnClient) { +func setupSeederServerAndClient(t *testing.T, srv *server, sd *seeder, assert *testifyassert.Assertions, serveFunc func(listener net.Listener) error) (int, client.CdnClient) { srv.peerServer = dfdaemonserver.New(srv) - cdnsystem.RegisterSeederServer(srv.peerServer, sd) + cdnsystemv1.RegisterSeederServer(srv.peerServer, sd) port, err := freeport.GetFreePort() if err != nil { @@ -378,7 +379,7 @@ func setupSeederServerAndClient(t *testing.T, srv *server, sd *seeder, assert *t } }() - client := cdnclient.GetClientByAddr([]dfnet.NetAddr{ + client := client.GetClientByAddr([]dfnet.NetAddr{ { Type: dfnet.TCP, Addr: fmt.Sprintf(":%d", port), diff --git a/client/daemon/rpcserver/subscriber.go b/client/daemon/rpcserver/subscriber.go index 28f41e22e..7dfdabbed 100644 --- a/client/daemon/rpcserver/subscriber.go +++ b/client/daemon/rpcserver/subscriber.go @@ -25,19 +25,20 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + "d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/internal/dferrors" logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" ) type subscriber struct { sync.Mutex // lock for sent map and grpc Send *logger.SugaredLoggerOnWith *peer.SubscribeResponse - sync dfdaemon.Daemon_SyncPieceTasksServer - request *base.PieceTaskRequest + sync dfdaemonv1.Daemon_SyncPieceTasksServer + request *commonv1.PieceTaskRequest skipPieceCount uint32 totalPieces int32 sentMap map[int32]struct{} @@ -46,7 +47,7 @@ type subscriber struct { attributeSent *atomic.Bool } -func (s *subscriber) getPieces(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { +func (s *subscriber) getPieces(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { p, err := s.Storage.GetPieces(ctx, request) if err != nil { return nil, err @@ -67,15 +68,15 @@ func (s *subscriber) getPieces(ctx context.Context, request *base.PieceTaskReque func sendExistPieces( ctx context.Context, log *logger.SugaredLoggerOnWith, - get func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error), - request *base.PieceTaskRequest, - sync dfdaemon.Daemon_SyncPieceTasksServer, + get func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error), + request *commonv1.PieceTaskRequest, + sync dfdaemonv1.Daemon_SyncPieceTasksServer, sentMap map[int32]struct{}, skipSendZeroPiece bool) (total int32, err error) { if request.Limit <= 0 { request.Limit = 16 } - var pp *base.PiecePacket + var pp *commonv1.PiecePacket for { pp, err = get(ctx, request) if err != nil { @@ -225,7 +226,7 @@ loop: s.Unlock() msg := "peer task success, but can not send all pieces" s.Errorf(msg) - return dferrors.Newf(base.Code_ClientError, msg) + return dferrors.Newf(commonv1.Code_ClientError, msg) } s.Unlock() break loop diff --git a/client/daemon/storage/local_storage.go b/client/daemon/storage/local_storage.go index 31b98e51f..f37e66a59 100644 --- a/client/daemon/storage/local_storage.go +++ b/client/daemon/storage/local_storage.go @@ -30,11 +30,12 @@ import ( "go.uber.org/atomic" + commonv1 "d7y.io/api/pkg/apis/common/v1" + clientutil "d7y.io/dragonfly/v2/client/util" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/internal/util" "d7y.io/dragonfly/v2/pkg/digest" - "d7y.io/dragonfly/v2/pkg/rpc/base" ) type localTaskStore struct { @@ -396,7 +397,7 @@ func (t *localTaskStore) Store(ctx context.Context, req *StoreRequest) error { return err } -func (t *localTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { +func (t *localTaskStore) GetPieces(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { if req == nil { return nil, ErrBadRequest } @@ -408,7 +409,7 @@ func (t *localTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskReque t.RLock() defer t.RUnlock() t.touch() - piecePacket := &base.PiecePacket{ + piecePacket := &commonv1.PiecePacket{ TaskId: req.TaskId, DstPid: t.PeerID, TotalPiece: t.TotalPieces, @@ -425,7 +426,7 @@ func (t *localTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskReque } if piece, ok := t.Pieces[num]; ok { piecePacket.PieceInfos = append(piecePacket.PieceInfos, - &base.PieceInfo{ + &commonv1.PieceInfo{ PieceNum: piece.Num, RangeStart: uint64(piece.Range.Start), RangeSize: uint32(piece.Range.Length), @@ -449,7 +450,7 @@ func (t *localTaskStore) GetTotalPieces(ctx context.Context, req *PeerTaskMetada return t.TotalPieces, nil } -func (t *localTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*base.ExtendAttribute, error) { +func (t *localTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*commonv1.ExtendAttribute, error) { if t.invalid.Load() { t.Errorf("invalid digest, refuse to get total pieces") return nil, ErrInvalidDigest @@ -463,7 +464,7 @@ func (t *localTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTaskMe hdr[k] = t.Header.Get(k) } } - return &base.ExtendAttribute{Header: hdr}, nil + return &commonv1.ExtendAttribute{Header: hdr}, nil } func (t *localTaskStore) CanReclaim() bool { diff --git a/client/daemon/storage/local_storage_subtask.go b/client/daemon/storage/local_storage_subtask.go index f552a68e9..2c6314752 100644 --- a/client/daemon/storage/local_storage_subtask.go +++ b/client/daemon/storage/local_storage_subtask.go @@ -24,10 +24,11 @@ import ( "go.uber.org/atomic" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/client/util" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/digest" - "d7y.io/dragonfly/v2/pkg/rpc/base" ) // TODO need refactor with localTaskStore, currently, localSubTaskStore code copies from localTaskStore @@ -204,7 +205,7 @@ func (t *localSubTaskStore) ReadAllPieces(ctx context.Context, req *ReadAllPiece }, nil } -func (t *localSubTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { +func (t *localSubTaskStore) GetPieces(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { if t.invalid.Load() { t.Errorf("invalid digest, refuse to get pieces") return nil, ErrInvalidDigest @@ -213,7 +214,7 @@ func (t *localSubTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskRe t.RLock() defer t.RUnlock() t.parent.touch() - piecePacket := &base.PiecePacket{ + piecePacket := &commonv1.PiecePacket{ TaskId: req.TaskId, DstPid: t.PeerID, TotalPiece: t.TotalPieces, @@ -227,7 +228,7 @@ func (t *localSubTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskRe for i := int32(0); i < int32(req.Limit); i++ { if piece, ok := t.Pieces[int32(req.StartNum)+i]; ok { - piecePacket.PieceInfos = append(piecePacket.PieceInfos, &base.PieceInfo{ + piecePacket.PieceInfos = append(piecePacket.PieceInfos, &commonv1.PieceInfo{ PieceNum: piece.Num, RangeStart: uint64(piece.Range.Start), RangeSize: uint32(piece.Range.Length), @@ -396,7 +397,7 @@ func (t *localSubTaskStore) Reclaim() error { return nil } -func (t *localSubTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*base.ExtendAttribute, error) { +func (t *localSubTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*commonv1.ExtendAttribute, error) { if t.invalid.Load() { t.Errorf("invalid digest, refuse to get total pieces") return nil, ErrInvalidDigest @@ -410,5 +411,5 @@ func (t *localSubTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTas hdr[k] = t.Header.Get(k) } } - return &base.ExtendAttribute{Header: hdr}, nil + return &commonv1.ExtendAttribute{Header: hdr}, nil } diff --git a/client/daemon/storage/local_storage_test.go b/client/daemon/storage/local_storage_test.go index 738f31373..20f1874ae 100644 --- a/client/daemon/storage/local_storage_test.go +++ b/client/daemon/storage/local_storage_test.go @@ -31,13 +31,14 @@ import ( testifyassert "github.com/stretchr/testify/assert" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/test" clientutil "d7y.io/dragonfly/v2/client/util" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/internal/util" "d7y.io/dragonfly/v2/pkg/digest" - "d7y.io/dragonfly/v2/pkg/rpc/base" _ "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" ) @@ -195,7 +196,7 @@ func TestLocalTaskStore_PutAndGetPiece(t *testing.T) { Start: int64(p.start), Length: int64(p.end - p.start), }, - Style: base.PieceStyle_PLAIN, + Style: commonv1.PieceStyle_PLAIN, }, Reader: bytes.NewBuffer(testBytes[p.start:p.end]), }) @@ -225,7 +226,7 @@ func TestLocalTaskStore_PutAndGetPiece(t *testing.T) { Start: int64(p.start), Length: int64(p.end - p.start), }, - Style: base.PieceStyle_PLAIN, + Style: commonv1.PieceStyle_PLAIN, }, }) assert.Nil(err, "get piece reader should be ok") diff --git a/client/daemon/storage/metadata.go b/client/daemon/storage/metadata.go index f89327de0..e55999811 100644 --- a/client/daemon/storage/metadata.go +++ b/client/daemon/storage/metadata.go @@ -19,8 +19,9 @@ package storage import ( "io" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/client/util" - "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/source" ) @@ -44,11 +45,11 @@ type PeerTaskMetadata struct { } type PieceMetadata struct { - Num int32 `json:"num,omitempty"` - Md5 string `json:"md5,omitempty"` - Offset uint64 `json:"offset,omitempty"` - Range util.Range `json:"range,omitempty"` - Style base.PieceStyle `json:"style,omitempty"` + Num int32 `json:"num,omitempty"` + Md5 string `json:"md5,omitempty"` + Offset uint64 `json:"offset,omitempty"` + Range util.Range `json:"range,omitempty"` + Style commonv1.PieceStyle `json:"style,omitempty"` // time(nanosecond) consumed Cost uint64 `json:"cost,omitempty"` } diff --git a/client/daemon/storage/mocks/stroage_manager_mock.go b/client/daemon/storage/mocks/stroage_manager_mock.go index 92c9de65e..fb2d7957a 100644 --- a/client/daemon/storage/mocks/stroage_manager_mock.go +++ b/client/daemon/storage/mocks/stroage_manager_mock.go @@ -10,9 +10,9 @@ import ( reflect "reflect" time "time" + v1 "d7y.io/api/pkg/apis/common/v1" storage "d7y.io/dragonfly/v2/client/daemon/storage" util "d7y.io/dragonfly/v2/client/util" - base "d7y.io/dragonfly/v2/pkg/rpc/base" gomock "github.com/golang/mock/gomock" ) @@ -40,10 +40,10 @@ func (m *MockTaskStorageDriver) EXPECT() *MockTaskStorageDriverMockRecorder { } // GetExtendAttribute mocks base method. -func (m *MockTaskStorageDriver) GetExtendAttribute(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { +func (m *MockTaskStorageDriver) GetExtendAttribute(ctx context.Context, req *storage.PeerTaskMetadata) (*v1.ExtendAttribute, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetExtendAttribute", ctx, req) - ret0, _ := ret[0].(*base.ExtendAttribute) + ret0, _ := ret[0].(*v1.ExtendAttribute) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -55,10 +55,10 @@ func (mr *MockTaskStorageDriverMockRecorder) GetExtendAttribute(ctx, req interfa } // GetPieces mocks base method. -func (m *MockTaskStorageDriver) GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { +func (m *MockTaskStorageDriver) GetPieces(ctx context.Context, req *v1.PieceTaskRequest) (*v1.PiecePacket, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetPieces", ctx, req) - ret0, _ := ret[0].(*base.PiecePacket) + ret0, _ := ret[0].(*v1.PiecePacket) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -342,10 +342,10 @@ func (mr *MockManagerMockRecorder) FindPartialCompletedTask(taskID, rg interface } // GetExtendAttribute mocks base method. -func (m *MockManager) GetExtendAttribute(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { +func (m *MockManager) GetExtendAttribute(ctx context.Context, req *storage.PeerTaskMetadata) (*v1.ExtendAttribute, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetExtendAttribute", ctx, req) - ret0, _ := ret[0].(*base.ExtendAttribute) + ret0, _ := ret[0].(*v1.ExtendAttribute) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -357,10 +357,10 @@ func (mr *MockManagerMockRecorder) GetExtendAttribute(ctx, req interface{}) *gom } // GetPieces mocks base method. -func (m *MockManager) GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { +func (m *MockManager) GetPieces(ctx context.Context, req *v1.PieceTaskRequest) (*v1.PiecePacket, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetPieces", ctx, req) - ret0, _ := ret[0].(*base.PiecePacket) + ret0, _ := ret[0].(*v1.PiecePacket) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/client/daemon/storage/storage_manager.go b/client/daemon/storage/storage_manager.go index 6b012dac6..e8c6f4e19 100644 --- a/client/daemon/storage/storage_manager.go +++ b/client/daemon/storage/storage_manager.go @@ -38,11 +38,12 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/gc" "d7y.io/dragonfly/v2/client/util" logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/rpc/base" ) type TaskStorageDriver interface { @@ -56,11 +57,11 @@ type TaskStorageDriver interface { ReadAllPieces(ctx context.Context, req *ReadAllPiecesRequest) (io.ReadCloser, error) - GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) + GetPieces(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) GetTotalPieces(ctx context.Context, req *PeerTaskMetadata) (int32, error) - GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*base.ExtendAttribute, error) + GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*commonv1.ExtendAttribute, error) UpdateTask(ctx context.Context, req *UpdateTaskRequest) error @@ -316,7 +317,7 @@ func (s *storageManager) Store(ctx context.Context, req *StoreRequest) error { return t.Store(ctx, req) } -func (s *storageManager) GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { +func (s *storageManager) GetPieces(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { t, ok := s.LoadTask( PeerTaskMetadata{ TaskID: req.TaskId, @@ -340,7 +341,7 @@ func (s *storageManager) GetTotalPieces(ctx context.Context, req *PeerTaskMetada return t.(TaskStorageDriver).GetTotalPieces(ctx, req) } -func (s *storageManager) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*base.ExtendAttribute, error) { +func (s *storageManager) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*commonv1.ExtendAttribute, error) { t, ok := s.LoadTask( PeerTaskMetadata{ TaskID: req.TaskID, diff --git a/client/daemon/transport/transport.go b/client/daemon/transport/transport.go index 5288a82d4..b2df58965 100644 --- a/client/daemon/transport/transport.go +++ b/client/daemon/transport/transport.go @@ -35,14 +35,15 @@ import ( "go.opentelemetry.io/otel/propagation" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + errordetailsv1 "d7y.io/api/pkg/apis/errordetails/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/util" logger "d7y.io/dragonfly/v2/internal/dflog" nethttp "d7y.io/dragonfly/v2/pkg/net/http" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/errordetails" ) var _ *logger.SugaredLoggerOnWith // pin this package for no log code generation @@ -70,7 +71,7 @@ type transport struct { defaultFilter string // defaultFilter is used for registering steam task - defaultPattern base.Pattern + defaultPattern commonv1.Pattern // defaultTag is used when http request without X-Dragonfly-Tag Header defaultTag string @@ -125,7 +126,7 @@ func WithDefaultFilter(f string) Option { } // WithDefaultPattern sets default pattern -func WithDefaultPattern(pattern base.Pattern) Option { +func WithDefaultPattern(pattern commonv1.Pattern) Option { return func(rt *transport) *transport { rt.defaultPattern = pattern return rt @@ -215,7 +216,7 @@ func (rt *transport) download(ctx context.Context, req *http.Request) (*http.Res log.Infof("start download with url: %s", url) // Init meta value - meta := &base.UrlMeta{Header: map[string]string{}} + meta := &commonv1.UrlMeta{Header: map[string]string{}} var rg *util.Range // Set meta range's value @@ -261,7 +262,7 @@ func (rt *transport) download(ctx context.Context, req *http.Request) (*http.Res if st, ok := status.FromError(err); ok { for _, detail := range st.Details() { switch d := detail.(type) { - case *errordetails.SourceError: + case *errordetailsv1.SourceError: hdr := nethttp.MapToHeader(attr) for k, v := range d.Metadata.Header { hdr.Set(k, v) diff --git a/client/dfcache/dfcache.go b/client/dfcache/dfcache.go index b4a77be59..3e7f37acd 100644 --- a/client/dfcache/dfcache.go +++ b/client/dfcache/dfcache.go @@ -24,12 +24,13 @@ import ( "os" "time" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + "d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/internal/dferrors" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/basic" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" daemonclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" ) @@ -89,7 +90,7 @@ func statTask(ctx context.Context, client daemonclient.DaemonClient, cfg *config } // Task not found, return os.ErrNotExist - if dferrors.CheckError(statError, base.Code_PeerTaskNotFound) { + if dferrors.CheckError(statError, commonv1.Code_PeerTaskNotFound) { return os.ErrNotExist } @@ -98,10 +99,10 @@ func statTask(ctx context.Context, client daemonclient.DaemonClient, cfg *config return statError } -func newStatRequest(cfg *config.DfcacheConfig) *dfdaemon.StatTaskRequest { - return &dfdaemon.StatTaskRequest{ +func newStatRequest(cfg *config.DfcacheConfig) *dfdaemonv1.StatTaskRequest { + return &dfdaemonv1.StatTaskRequest{ Url: newCid(cfg.Cid), - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Tag: cfg.Tag, }, LocalOnly: cfg.LocalOnly, @@ -158,12 +159,12 @@ func importTask(ctx context.Context, client daemonclient.DaemonClient, cfg *conf return nil } -func newImportRequest(cfg *config.DfcacheConfig) *dfdaemon.ImportTaskRequest { - return &dfdaemon.ImportTaskRequest{ - Type: base.TaskType_DfCache, +func newImportRequest(cfg *config.DfcacheConfig) *dfdaemonv1.ImportTaskRequest { + return &dfdaemonv1.ImportTaskRequest{ + Type: commonv1.TaskType_DfCache, Url: newCid(cfg.Cid), Path: cfg.Path, - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Tag: cfg.Tag, }, } @@ -217,7 +218,7 @@ func exportTask(ctx context.Context, client daemonclient.DaemonClient, cfg *conf } // Task not found, return os.ErrNotExist - if dferrors.CheckError(exportError, base.Code_PeerTaskNotFound) { + if dferrors.CheckError(exportError, commonv1.Code_PeerTaskNotFound) { return os.ErrNotExist } @@ -226,13 +227,13 @@ func exportTask(ctx context.Context, client daemonclient.DaemonClient, cfg *conf return exportError } -func newExportRequest(cfg *config.DfcacheConfig) *dfdaemon.ExportTaskRequest { - return &dfdaemon.ExportTaskRequest{ +func newExportRequest(cfg *config.DfcacheConfig) *dfdaemonv1.ExportTaskRequest { + return &dfdaemonv1.ExportTaskRequest{ Url: newCid(cfg.Cid), Output: cfg.Output, Timeout: uint64(cfg.Timeout), Limit: float64(cfg.RateLimit), - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Tag: cfg.Tag, }, Uid: int64(basic.UserID), @@ -290,10 +291,10 @@ func deleteTask(ctx context.Context, client daemonclient.DaemonClient, cfg *conf return nil } -func newDeleteRequest(cfg *config.DfcacheConfig) *dfdaemon.DeleteTaskRequest { - return &dfdaemon.DeleteTaskRequest{ +func newDeleteRequest(cfg *config.DfcacheConfig) *dfdaemonv1.DeleteTaskRequest { + return &dfdaemonv1.DeleteTaskRequest{ Url: newCid(cfg.Cid), - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Tag: cfg.Tag, }, } diff --git a/client/dfget/dfget.go b/client/dfget/dfget.go index ca34b1bbb..bd5b505e6 100644 --- a/client/dfget/dfget.go +++ b/client/dfget/dfget.go @@ -33,12 +33,13 @@ import ( "github.com/go-http-utils/headers" "github.com/schollz/progressbar/v3" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + "d7y.io/dragonfly/v2/client/config" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/basic" "d7y.io/dragonfly/v2/pkg/digest" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" daemonclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" "d7y.io/dragonfly/v2/pkg/source" pkgstrings "d7y.io/dragonfly/v2/pkg/strings" @@ -91,7 +92,7 @@ func singleDownload(ctx context.Context, client daemonclient.DaemonClient, cfg * var ( start = time.Now() stream *daemonclient.DownResultStream - result *dfdaemon.DownResult + result *dfdaemonv1.DownResult pb *progressbar.ProgressBar request = newDownRequest(cfg, hdr) downError error @@ -220,20 +221,20 @@ func parseHeader(s []string) map[string]string { return hdr } -func newDownRequest(cfg *config.DfgetConfig, hdr map[string]string) *dfdaemon.DownRequest { +func newDownRequest(cfg *config.DfgetConfig, hdr map[string]string) *dfdaemonv1.DownRequest { var rg string if r, ok := hdr[headers.Range]; ok { rg = strings.TrimLeft(r, "bytes=") } else { rg = cfg.Range } - return &dfdaemon.DownRequest{ + return &dfdaemonv1.DownRequest{ Url: cfg.URL, Output: cfg.Output, Timeout: uint64(cfg.Timeout), Limit: float64(cfg.RateLimit.Limit), DisableBackSource: cfg.DisableBackSource, - UrlMeta: &base.UrlMeta{ + UrlMeta: &commonv1.UrlMeta{ Digest: cfg.Digest, Tag: cfg.Tag, Range: rg, diff --git a/cmd/dfcache/cmd/root.go b/cmd/dfcache/cmd/root.go index 307a7c509..87fbd39dc 100644 --- a/cmd/dfcache/cmd/root.go +++ b/cmd/dfcache/cmd/root.go @@ -50,7 +50,7 @@ file that has been imported or added into P2P network by other peer, it's the us responsibility to go back to source and add file into P2P network. ` -// rootCmd represents the base command when called without any subcommands +// rootCmd represents the commonv1 command when called without any subcommands var rootCmd = &cobra.Command{ Use: "dfcache [flags]", Short: "the P2P cache client of dragonfly", diff --git a/cmd/dfget/cmd/root.go b/cmd/dfget/cmd/root.go index 56c7956aa..d86e8dd0b 100644 --- a/cmd/dfget/cmd/root.go +++ b/cmd/dfget/cmd/root.go @@ -57,7 +57,7 @@ peers to download pieces from it if it owns them. In addition, dfget has the abilities to provide more advanced functionality, such as network bandwidth limit, transmission encryption and so on.` -// rootCmd represents the base command when called without any subcommands +// rootCmd represents the commonv1 command when called without any subcommands var rootCmd = &cobra.Command{ Use: "dfget url -O path", Short: "the P2P client of dragonfly", diff --git a/cmd/dfstore/cmd/root.go b/cmd/dfstore/cmd/root.go index 884f0c27e..a243e3fdb 100644 --- a/cmd/dfstore/cmd/root.go +++ b/cmd/dfstore/cmd/root.go @@ -43,7 +43,7 @@ Rely on S3 or OSS as the backend to ensure storage reliability. In the process of object storage, P2P Cache is effectively used for fast read and write storage. ` -// rootCmd represents the base command when called without any subcommands +// rootCmd represents the commonv1 command when called without any subcommands var rootCmd = &cobra.Command{ Use: "dfstore [flags]", Short: "object storage client of dragonfly.", diff --git a/cmd/manager/cmd/root.go b/cmd/manager/cmd/root.go index 91825d182..a2a2cdef7 100644 --- a/cmd/manager/cmd/root.go +++ b/cmd/manager/cmd/root.go @@ -36,7 +36,7 @@ var ( cfg *config.Config ) -// rootCmd represents the base command when called without any subcommands +// rootCmd represents the commonv1 command when called without any subcommands var rootCmd = &cobra.Command{ Use: "manager", Short: "The central manager of dragonfly.", diff --git a/cmd/scheduler/cmd/root.go b/cmd/scheduler/cmd/root.go index 13c52c662..c3550ea9c 100644 --- a/cmd/scheduler/cmd/root.go +++ b/cmd/scheduler/cmd/root.go @@ -37,7 +37,7 @@ var ( cfg *config.Config ) -// rootCmd represents the base command when called without any subcommands +// rootCmd represents the commonv1 command when called without any subcommands var rootCmd = &cobra.Command{ Use: "scheduler", Short: "the scheduler of dragonfly", diff --git a/go.mod b/go.mod index c23e0652d..21d61a53c 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module d7y.io/dragonfly/v2 go 1.18 require ( + d7y.io/api v1.0.1 github.com/RichardKnop/machinery v1.10.6 github.com/Showmax/go-fqdn v1.0.0 github.com/VividCortex/mysqlerr v1.0.0 @@ -16,7 +17,6 @@ require ( github.com/distribution/distribution/v3 v3.0.0-20220620080156-3e4f8a0ab147 github.com/docker/go-connections v0.4.0 github.com/docker/go-units v0.4.0 - github.com/envoyproxy/protoc-gen-validate v0.6.7 github.com/gammazero/deque v0.2.0 github.com/gin-contrib/cors v1.3.1 github.com/gin-contrib/static v0.0.1 @@ -70,11 +70,11 @@ require ( golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26 golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f - golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b + golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 golang.org/x/time v0.0.0-20220609170525-579cf78fd858 google.golang.org/api v0.90.0 google.golang.org/grpc v1.48.0 - google.golang.org/protobuf v1.28.0 + google.golang.org/protobuf v1.28.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v3 v3.0.1 gorm.io/driver/mysql v1.3.4 @@ -102,6 +102,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/denisenkom/go-mssqldb v0.12.2 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/envoyproxy/protoc-gen-validate v0.6.7 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-echarts/go-echarts/v2 v2.2.4 // indirect @@ -195,12 +196,12 @@ require ( go.mongodb.org/mongo-driver v1.9.1 // indirect go.opencensus.io v0.23.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect + golang.org/x/net v0.0.0-20220728211354-c7608f3a8462 // indirect golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.11 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03 // indirect + google.golang.org/genproto v0.0.0-20220728213248-dd149ef739b9 // indirect gopkg.in/ini.v1 v1.66.6 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gorm.io/driver/sqlserver v1.3.2 // indirect diff --git a/go.sum b/go.sum index 05ffe3a1c..19a93c67d 100644 --- a/go.sum +++ b/go.sum @@ -69,6 +69,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +d7y.io/api v1.0.1 h1:FCtyOacd7hBk3H6TFyVBLW9cAlFaS8YyQ7LPcYdrBBY= +d7y.io/api v1.0.1/go.mod h1:GFnWPZFe4DUW70aOQikRZF0pvXpbUwAsGSCAZFFitPo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= @@ -1262,8 +1264,9 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220728211354-c7608f3a8462 h1:UreQrH7DbFXSi9ZFox6FNT3WBooWmdANpU+IfkT1T4I= +golang.org/x/net v0.0.0-20220728211354-c7608f3a8462/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1403,8 +1406,8 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8= -golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1656,8 +1659,8 @@ google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljW google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03 h1:W70HjnmXFJm+8RNjOpIDYW2nKsSi/af0VvIZUtYkwuU= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220728213248-dd149ef739b9 h1:d3fKQZK+1rWQMg3xLKQbPMirUCo29I/NRdI2WarSzTg= +google.golang.org/genproto v0.0.0-20220728213248-dd149ef739b9/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1710,8 +1713,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/hack/protoc.sh b/hack/protoc.sh deleted file mode 100755 index 7f955d50a..000000000 --- a/hack/protoc.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -PROTOC_ALL_IMAGE=${PROTOC_ALL_IMAGE:-"namely/protoc-all:1.47_0"} -PROTO_PATH=pkg/rpc -LANGUAGE=go - -proto_modules="base cdnsystem dfdaemon manager scheduler errordetails" - -echo "generate protos..." - -for module in ${proto_modules}; do - if docker run --rm -v $PWD:/defs ${PROTOC_ALL_IMAGE} \ - -d ${PROTO_PATH}/$module -i . \ - -l ${LANGUAGE} -o . \ - --go-source-relative \ - --with-validator \ - --validator-source-relative; then - echo "generate protos ${module} successfully" - else - echo "generate protos ${module} failed" - fi -done diff --git a/internal/dferrors/error.go b/internal/dferrors/error.go index 79b9dbe32..6a1c27d61 100644 --- a/internal/dferrors/error.go +++ b/internal/dferrors/error.go @@ -20,7 +20,7 @@ import ( "errors" "fmt" - "d7y.io/dragonfly/v2/pkg/rpc/base" + commonv1 "d7y.io/api/pkg/apis/common/v1" ) // common and framework errors @@ -39,7 +39,7 @@ func IsEndOfStream(err error) bool { } type DfError struct { - Code base.Code + Code commonv1.Code Message string } @@ -47,21 +47,21 @@ func (s *DfError) Error() string { return fmt.Sprintf("[%d]%s", s.Code, s.Message) } -func New(code base.Code, msg string) *DfError { +func New(code commonv1.Code, msg string) *DfError { return &DfError{ Code: code, Message: msg, } } -func Newf(code base.Code, format string, a ...any) *DfError { +func Newf(code commonv1.Code, format string, a ...any) *DfError { return &DfError{ Code: code, Message: fmt.Sprintf(format, a...), } } -func CheckError(err error, code base.Code) bool { +func CheckError(err error, code commonv1.Code) bool { if err == nil { return false } diff --git a/manager/middlewares/error.go b/manager/middlewares/error.go index f5c0ae212..297fd89f2 100644 --- a/manager/middlewares/error.go +++ b/manager/middlewares/error.go @@ -27,8 +27,9 @@ import ( "golang.org/x/crypto/bcrypt" "gorm.io/gorm" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/internal/dferrors" - "d7y.io/dragonfly/v2/pkg/rpc/base" ) type ErrorResponse struct { @@ -58,7 +59,7 @@ func Error() gin.HandlerFunc { var dferr *dferrors.DfError if errors.As(err.Err, &dferr) { switch dferr.Code { - case base.Code_InvalidResourceType: + case commonv1.Code_InvalidResourceType: c.JSON(http.StatusBadRequest, ErrorResponse{ Message: http.StatusText(http.StatusBadRequest), }) diff --git a/manager/rpcserver/rpcserver.go b/manager/rpcserver/rpcserver.go index b8ba1b036..da4d85cc2 100644 --- a/manager/rpcserver/rpcserver.go +++ b/manager/rpcserver/rpcserver.go @@ -35,6 +35,8 @@ import ( "google.golang.org/grpc/status" "gorm.io/gorm" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/manager/cache" "d7y.io/dragonfly/v2/manager/config" @@ -44,7 +46,6 @@ import ( "d7y.io/dragonfly/v2/manager/searcher" "d7y.io/dragonfly/v2/manager/types" "d7y.io/dragonfly/v2/pkg/objectstorage" - "d7y.io/dragonfly/v2/pkg/rpc/manager" ) // Default middlewares for stream. @@ -80,7 +81,7 @@ type Server struct { // Searcher interface. searcher searcher.Searcher // Manager grpc interface. - manager.UnimplementedManagerServer + managerv1.UnimplementedManagerServer // Object storage interface. objectStorage objectstorage.ObjectStorage // Object storage configuration. @@ -108,14 +109,14 @@ func New( }, opts...)...) // Register servers on grpc server. - manager.RegisterManagerServer(grpcServer, server) + managerv1.RegisterManagerServer(grpcServer, server) healthpb.RegisterHealthServer(grpcServer, health.NewServer()) return grpcServer } // Get SeedPeer and SeedPeer cluster configuration. -func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerRequest) (*manager.SeedPeer, error) { - var pbSeedPeer manager.SeedPeer +func (s *Server) GetSeedPeer(ctx context.Context, req *managerv1.GetSeedPeerRequest) (*managerv1.SeedPeer, error) { + var pbSeedPeer managerv1.SeedPeer cacheKey := cache.MakeSeedPeerCacheKey(uint(req.SeedPeerClusterId), req.HostName, req.Ip) // Cache hit. @@ -143,10 +144,10 @@ func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerReques } // Construct schedulers. - var pbSchedulers []*manager.Scheduler + var pbSchedulers []*managerv1.Scheduler for _, schedulerCluster := range seedPeer.SeedPeerCluster.SchedulerClusters { for _, scheduler := range schedulerCluster.Schedulers { - pbSchedulers = append(pbSchedulers, &manager.Scheduler{ + pbSchedulers = append(pbSchedulers, &managerv1.Scheduler{ Id: uint64(scheduler.ID), HostName: scheduler.HostName, Idc: scheduler.IDC, @@ -160,7 +161,7 @@ func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerReques } // Construct seed peer. - pbSeedPeer = manager.SeedPeer{ + pbSeedPeer = managerv1.SeedPeer{ Id: uint64(seedPeer.ID), Type: seedPeer.Type, HostName: seedPeer.HostName, @@ -173,7 +174,7 @@ func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerReques ObjectStoragePort: seedPeer.ObjectStoragePort, State: seedPeer.State, SeedPeerClusterId: uint64(seedPeer.SeedPeerClusterID), - SeedPeerCluster: &manager.SeedPeerCluster{ + SeedPeerCluster: &managerv1.SeedPeerCluster{ Id: uint64(seedPeer.SeedPeerCluster.ID), Name: seedPeer.SeedPeerCluster.Name, Bio: seedPeer.SeedPeerCluster.BIO, @@ -196,7 +197,7 @@ func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerReques } // Update SeedPeer configuration. -func (s *Server) UpdateSeedPeer(ctx context.Context, req *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) { +func (s *Server) UpdateSeedPeer(ctx context.Context, req *managerv1.UpdateSeedPeerRequest) (*managerv1.SeedPeer, error) { seedPeer := model.SeedPeer{} if err := s.db.WithContext(ctx).First(&seedPeer, model.SeedPeer{ HostName: req.HostName, @@ -229,7 +230,7 @@ func (s *Server) UpdateSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer logger.Warnf("%s refresh keepalive status failed in seed peer cluster %d", seedPeer.HostName, seedPeer.SeedPeerClusterID) } - return &manager.SeedPeer{ + return &managerv1.SeedPeer{ Id: uint64(seedPeer.ID), HostName: seedPeer.HostName, Type: seedPeer.Type, @@ -246,7 +247,7 @@ func (s *Server) UpdateSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer } // Create SeedPeer and associate cluster. -func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) { +func (s *Server) createSeedPeer(ctx context.Context, req *managerv1.UpdateSeedPeerRequest) (*managerv1.SeedPeer, error) { seedPeer := model.SeedPeer{ HostName: req.HostName, Type: req.Type, @@ -264,7 +265,7 @@ func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer return nil, status.Error(codes.Unknown, err.Error()) } - return &manager.SeedPeer{ + return &managerv1.SeedPeer{ Id: uint64(seedPeer.ID), HostName: seedPeer.HostName, Type: seedPeer.Type, @@ -281,8 +282,8 @@ func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer } // Get Scheduler and Scheduler cluster configuration. -func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequest) (*manager.Scheduler, error) { - var pbScheduler manager.Scheduler +func (s *Server) GetScheduler(ctx context.Context, req *managerv1.GetSchedulerRequest) (*managerv1.Scheduler, error) { + var pbScheduler managerv1.Scheduler cacheKey := cache.MakeSchedulerCacheKey(uint(req.SchedulerClusterId), req.HostName, req.Ip) // Cache hit. @@ -316,7 +317,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ } // Construct seed peers. - var pbSeedPeers []*manager.SeedPeer + var pbSeedPeers []*managerv1.SeedPeer for _, seedPeerCluster := range scheduler.SchedulerCluster.SeedPeerClusters { seedPeerClusterConfig, err := seedPeerCluster.Config.MarshalJSON() if err != nil { @@ -324,7 +325,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ } for _, seedPeer := range seedPeerCluster.SeedPeers { - pbSeedPeers = append(pbSeedPeers, &manager.SeedPeer{ + pbSeedPeers = append(pbSeedPeers, &managerv1.SeedPeer{ Id: uint64(seedPeer.ID), HostName: seedPeer.HostName, Type: seedPeer.Type, @@ -337,7 +338,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ ObjectStoragePort: seedPeer.ObjectStoragePort, State: seedPeer.State, SeedPeerClusterId: uint64(seedPeer.SeedPeerClusterID), - SeedPeerCluster: &manager.SeedPeerCluster{ + SeedPeerCluster: &managerv1.SeedPeerCluster{ Id: uint64(seedPeerCluster.ID), Name: seedPeerCluster.Name, Bio: seedPeerCluster.BIO, @@ -348,7 +349,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ } // Construct scheduler. - pbScheduler = manager.Scheduler{ + pbScheduler = managerv1.Scheduler{ Id: uint64(scheduler.ID), HostName: scheduler.HostName, Idc: scheduler.IDC, @@ -358,7 +359,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ Port: scheduler.Port, State: scheduler.State, SchedulerClusterId: uint64(scheduler.SchedulerClusterID), - SchedulerCluster: &manager.SchedulerCluster{ + SchedulerCluster: &managerv1.SchedulerCluster{ Id: uint64(scheduler.SchedulerCluster.ID), Name: scheduler.SchedulerCluster.Name, Bio: scheduler.SchedulerCluster.BIO, @@ -382,7 +383,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ } // Update scheduler configuration. -func (s *Server) UpdateScheduler(ctx context.Context, req *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) { +func (s *Server) UpdateScheduler(ctx context.Context, req *managerv1.UpdateSchedulerRequest) (*managerv1.Scheduler, error) { scheduler := model.Scheduler{} if err := s.db.WithContext(ctx).First(&scheduler, model.Scheduler{ HostName: req.HostName, @@ -412,7 +413,7 @@ func (s *Server) UpdateScheduler(ctx context.Context, req *manager.UpdateSchedul logger.Warnf("%s refresh keepalive status failed in scheduler cluster %d", scheduler.HostName, scheduler.SchedulerClusterID) } - return &manager.Scheduler{ + return &managerv1.Scheduler{ Id: uint64(scheduler.ID), HostName: scheduler.HostName, Idc: scheduler.IDC, @@ -426,7 +427,7 @@ func (s *Server) UpdateScheduler(ctx context.Context, req *manager.UpdateSchedul } // Create scheduler and associate cluster. -func (s *Server) createScheduler(ctx context.Context, req *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) { +func (s *Server) createScheduler(ctx context.Context, req *managerv1.UpdateSchedulerRequest) (*managerv1.Scheduler, error) { scheduler := model.Scheduler{ HostName: req.HostName, IDC: req.Idc, @@ -441,7 +442,7 @@ func (s *Server) createScheduler(ctx context.Context, req *manager.UpdateSchedul return nil, status.Error(codes.Unknown, err.Error()) } - return &manager.Scheduler{ + return &managerv1.Scheduler{ Id: uint64(scheduler.ID), HostName: scheduler.HostName, Idc: scheduler.IDC, @@ -455,11 +456,11 @@ func (s *Server) createScheduler(ctx context.Context, req *manager.UpdateSchedul } // List acitve schedulers configuration. -func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) { +func (s *Server) ListSchedulers(ctx context.Context, req *managerv1.ListSchedulersRequest) (*managerv1.ListSchedulersResponse, error) { log := logger.WithHostnameAndIP(req.HostName, req.Ip) // Count the number of the active peer. - if s.config.Metrics.EnablePeerGauge && req.SourceType == manager.SourceType_PEER_SOURCE { + if s.config.Metrics.EnablePeerGauge && req.SourceType == managerv1.SourceType_PEER_SOURCE { count, err := s.getPeerCount(ctx, req) if err != nil { log.Warnf("get peer count failed: %s", err.Error()) @@ -468,7 +469,7 @@ func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulers } } - var pbListSchedulersResponse manager.ListSchedulersResponse + var pbListSchedulersResponse managerv1.ListSchedulersResponse cacheKey := cache.MakeSchedulersCacheKeyForPeer(req.HostName, req.Ip) // Cache hit. @@ -503,10 +504,10 @@ func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulers // Construct schedulers. for _, scheduler := range schedulers { - seedPeers := []*manager.SeedPeer{} + seedPeers := []*managerv1.SeedPeer{} for _, seedPeerCluster := range scheduler.SchedulerCluster.SeedPeerClusters { for _, seedPeer := range seedPeerCluster.SeedPeers { - seedPeers = append(seedPeers, &manager.SeedPeer{ + seedPeers = append(seedPeers, &managerv1.SeedPeer{ Id: uint64(seedPeer.ID), HostName: seedPeer.HostName, Type: seedPeer.Type, @@ -523,7 +524,7 @@ func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulers } } - pbListSchedulersResponse.Schedulers = append(pbListSchedulersResponse.Schedulers, &manager.Scheduler{ + pbListSchedulersResponse.Schedulers = append(pbListSchedulersResponse.Schedulers, &managerv1.Scheduler{ Id: uint64(scheduler.ID), HostName: scheduler.HostName, Idc: scheduler.IDC, @@ -551,7 +552,7 @@ func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulers } // Get the number of active peers -func (s *Server) getPeerCount(ctx context.Context, req *manager.ListSchedulersRequest) (int, error) { +func (s *Server) getPeerCount(ctx context.Context, req *managerv1.ListSchedulersRequest) (int, error) { cacheKey := cache.MakePeerCacheKey(req.HostName, req.Ip) if err := s.rdb.Set(ctx, cacheKey, types.Peer{ ID: cacheKey, @@ -570,7 +571,7 @@ func (s *Server) getPeerCount(ctx context.Context, req *manager.ListSchedulersRe } // Get object storage configuration. -func (s *Server) GetObjectStorage(ctx context.Context, req *manager.GetObjectStorageRequest) (*manager.ObjectStorage, error) { +func (s *Server) GetObjectStorage(ctx context.Context, req *managerv1.GetObjectStorageRequest) (*managerv1.ObjectStorage, error) { log := logger.WithHostnameAndIP(req.HostName, req.Ip) if !s.objectStorageConfig.Enable { @@ -579,7 +580,7 @@ func (s *Server) GetObjectStorage(ctx context.Context, req *manager.GetObjectSto return nil, status.Error(codes.NotFound, msg) } - return &manager.ObjectStorage{ + return &managerv1.ObjectStorage{ Name: s.objectStorageConfig.Name, Region: s.objectStorageConfig.Region, Endpoint: s.objectStorageConfig.Endpoint, @@ -589,7 +590,7 @@ func (s *Server) GetObjectStorage(ctx context.Context, req *manager.GetObjectSto } // List buckets configuration. -func (s *Server) ListBuckets(ctx context.Context, req *manager.ListBucketsRequest) (*manager.ListBucketsResponse, error) { +func (s *Server) ListBuckets(ctx context.Context, req *managerv1.ListBucketsRequest) (*managerv1.ListBucketsResponse, error) { log := logger.WithHostnameAndIP(req.HostName, req.Ip) if !s.objectStorageConfig.Enable { @@ -598,7 +599,7 @@ func (s *Server) ListBuckets(ctx context.Context, req *manager.ListBucketsReques return nil, status.Error(codes.NotFound, msg) } - var pbListBucketsResponse manager.ListBucketsResponse + var pbListBucketsResponse managerv1.ListBucketsResponse cacheKey := cache.MakeBucketsCacheKey(s.objectStorageConfig.Name) // Cache hit. @@ -617,7 +618,7 @@ func (s *Server) ListBuckets(ctx context.Context, req *manager.ListBucketsReques // Construct schedulers. for _, bucket := range buckets { - pbListBucketsResponse.Buckets = append(pbListBucketsResponse.Buckets, &manager.Bucket{ + pbListBucketsResponse.Buckets = append(pbListBucketsResponse.Buckets, &managerv1.Bucket{ Name: bucket.Name, }) } @@ -636,7 +637,7 @@ func (s *Server) ListBuckets(ctx context.Context, req *manager.ListBucketsReques } // KeepAlive with manager. -func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error { +func (s *Server) KeepAlive(stream managerv1.Manager_KeepAliveServer) error { req, err := stream.Recv() if err != nil { logger.Errorf("keepalive failed for the first time: %v", err) @@ -649,7 +650,7 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error { logger.Infof("%s keepalive successfully for the first time in cluster %d", hostName, clusterID) // Initialize active scheduler. - if sourceType == manager.SourceType_SCHEDULER_SOURCE { + if sourceType == managerv1.SourceType_SCHEDULER_SOURCE { scheduler := model.Scheduler{} if err := s.db.First(&scheduler, model.Scheduler{ HostName: hostName, @@ -669,7 +670,7 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error { } // Initialize active seed peer. - if sourceType == manager.SourceType_SEED_PEER_SOURCE { + if sourceType == managerv1.SourceType_SEED_PEER_SOURCE { seedPeer := model.SeedPeer{} if err := s.db.First(&seedPeer, model.SeedPeer{ HostName: hostName, @@ -692,7 +693,7 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error { _, err := stream.Recv() if err != nil { // Inactive scheduler. - if sourceType == manager.SourceType_SCHEDULER_SOURCE { + if sourceType == managerv1.SourceType_SCHEDULER_SOURCE { scheduler := model.Scheduler{} if err := s.db.First(&scheduler, model.Scheduler{ HostName: hostName, @@ -712,7 +713,7 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error { } // Inactive seed peer. - if sourceType == manager.SourceType_SEED_PEER_SOURCE { + if sourceType == managerv1.SourceType_SEED_PEER_SOURCE { seedPeer := model.SeedPeer{} if err := s.db.First(&seedPeer, model.SeedPeer{ HostName: hostName, diff --git a/manager/searcher/mocks/searcher_mock.go b/manager/searcher/mocks/searcher_mock.go index 47a9df07d..c6c9abc7f 100644 --- a/manager/searcher/mocks/searcher_mock.go +++ b/manager/searcher/mocks/searcher_mock.go @@ -8,8 +8,8 @@ import ( context "context" reflect "reflect" + v1 "d7y.io/api/pkg/apis/manager/v1" model "d7y.io/dragonfly/v2/manager/model" - manager "d7y.io/dragonfly/v2/pkg/rpc/manager" gomock "github.com/golang/mock/gomock" ) @@ -37,7 +37,7 @@ func (m *MockSearcher) EXPECT() *MockSearcherMockRecorder { } // FindSchedulerClusters mocks base method. -func (m *MockSearcher) FindSchedulerClusters(arg0 context.Context, arg1 []model.SchedulerCluster, arg2 *manager.ListSchedulersRequest) ([]model.SchedulerCluster, error) { +func (m *MockSearcher) FindSchedulerClusters(arg0 context.Context, arg1 []model.SchedulerCluster, arg2 *v1.ListSchedulersRequest) ([]model.SchedulerCluster, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FindSchedulerClusters", arg0, arg1, arg2) ret0, _ := ret[0].([]model.SchedulerCluster) diff --git a/manager/searcher/searcher.go b/manager/searcher/searcher.go index 828dff943..9b22dedab 100644 --- a/manager/searcher/searcher.go +++ b/manager/searcher/searcher.go @@ -27,10 +27,11 @@ import ( "github.com/mitchellh/mapstructure" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/manager/model" "d7y.io/dragonfly/v2/pkg/math" - "d7y.io/dragonfly/v2/pkg/rpc/manager" ) const ( @@ -83,7 +84,7 @@ type Scopes struct { type Searcher interface { // FindSchedulerClusters finds scheduler clusters that best matches the evaluation - FindSchedulerClusters(context.Context, []model.SchedulerCluster, *manager.ListSchedulersRequest) ([]model.SchedulerCluster, error) + FindSchedulerClusters(context.Context, []model.SchedulerCluster, *managerv1.ListSchedulersRequest) ([]model.SchedulerCluster, error) } type searcher struct{} @@ -100,7 +101,7 @@ func New(pluginDir string) Searcher { } // FindSchedulerClusters finds scheduler clusters that best matches the evaluation -func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []model.SchedulerCluster, client *manager.ListSchedulersRequest) ([]model.SchedulerCluster, error) { +func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []model.SchedulerCluster, client *managerv1.ListSchedulersRequest) ([]model.SchedulerCluster, error) { conditions := client.HostInfo if len(conditions) <= 0 { return nil, errors.New("empty conditions") diff --git a/manager/searcher/searcher_test.go b/manager/searcher/searcher_test.go index a6d3f5e0f..83ffa99d4 100644 --- a/manager/searcher/searcher_test.go +++ b/manager/searcher/searcher_test.go @@ -22,8 +22,9 @@ import ( "github.com/stretchr/testify/assert" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + "d7y.io/dragonfly/v2/manager/model" - "d7y.io/dragonfly/v2/pkg/rpc/manager" ) func TestSchedulerCluster(t *testing.T) { @@ -730,7 +731,7 @@ func TestSchedulerCluster(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { searcher := New(pluginDir) - clusters, ok := searcher.FindSchedulerClusters(context.Background(), tc.schedulerClusters, &manager.ListSchedulersRequest{ + clusters, ok := searcher.FindSchedulerClusters(context.Background(), tc.schedulerClusters, &managerv1.ListSchedulersRequest{ HostName: "foo", Ip: "127.0.0.1", HostInfo: tc.conditions, diff --git a/manager/searcher/testdata/main.go b/manager/searcher/testdata/main.go index 5d0bc1179..e70735252 100644 --- a/manager/searcher/testdata/main.go +++ b/manager/searcher/testdata/main.go @@ -21,9 +21,10 @@ import ( "fmt" "os" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + "d7y.io/dragonfly/v2/manager/model" "d7y.io/dragonfly/v2/manager/searcher" - "d7y.io/dragonfly/v2/pkg/rpc/manager" ) func main() { @@ -33,7 +34,7 @@ func main() { os.Exit(1) } - clusters, err := s.FindSchedulerClusters(context.Background(), []model.SchedulerCluster{}, &manager.ListSchedulersRequest{}) + clusters, err := s.FindSchedulerClusters(context.Background(), []model.SchedulerCluster{}, &managerv1.ListSchedulersRequest{}) if err != nil { fmt.Println("scheduler cluster not found") os.Exit(1) diff --git a/manager/searcher/testdata/plugin/searcher.go b/manager/searcher/testdata/plugin/searcher.go index a1fca3ed0..b4247fc6a 100644 --- a/manager/searcher/testdata/plugin/searcher.go +++ b/manager/searcher/testdata/plugin/searcher.go @@ -19,13 +19,14 @@ package main import ( "context" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + "d7y.io/dragonfly/v2/manager/model" - "d7y.io/dragonfly/v2/pkg/rpc/manager" ) type searcher struct{} -func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []model.SchedulerCluster, client *manager.ListSchedulersRequest) ([]model.SchedulerCluster, error) { +func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []model.SchedulerCluster, client *managerv1.ListSchedulersRequest) ([]model.SchedulerCluster, error) { return []model.SchedulerCluster{{Name: "foo"}}, nil } diff --git a/pkg/idgen/task_id.go b/pkg/idgen/task_id.go index 4191837d2..7e39f5a8f 100644 --- a/pkg/idgen/task_id.go +++ b/pkg/idgen/task_id.go @@ -19,9 +19,10 @@ package idgen import ( "strings" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/pkg/digest" neturl "d7y.io/dragonfly/v2/pkg/net/url" - "d7y.io/dragonfly/v2/pkg/rpc/base" pkgstrings "d7y.io/dragonfly/v2/pkg/strings" ) @@ -31,19 +32,19 @@ const ( // TaskID generates a task id. // filter is separated by & character. -func TaskID(url string, meta *base.UrlMeta) string { +func TaskID(url string, meta *commonv1.UrlMeta) string { return taskID(url, meta, false) } // ParentTaskID generates a task id like TaskID, but without range. // this func is used to check the parent tasks for ranged requests -func ParentTaskID(url string, meta *base.UrlMeta) string { +func ParentTaskID(url string, meta *commonv1.UrlMeta) string { return taskID(url, meta, true) } // taskID generates a task id. // filter is separated by & character. -func taskID(url string, meta *base.UrlMeta, ignoreRange bool) string { +func taskID(url string, meta *commonv1.UrlMeta, ignoreRange bool) string { if meta == nil { return digest.SHA256FromStrings(url) } diff --git a/pkg/idgen/task_id_test.go b/pkg/idgen/task_id_test.go index 207f6c9d0..3c7788277 100644 --- a/pkg/idgen/task_id_test.go +++ b/pkg/idgen/task_id_test.go @@ -21,14 +21,14 @@ import ( "github.com/stretchr/testify/assert" - "d7y.io/dragonfly/v2/pkg/rpc/base" + commonv1 "d7y.io/api/pkg/apis/common/v1" ) func TestTaskID(t *testing.T) { tests := []struct { name string url string - meta *base.UrlMeta + meta *commonv1.UrlMeta ignoreRange bool expect func(t *testing.T, d any) }{ @@ -44,7 +44,7 @@ func TestTaskID(t *testing.T) { { name: "generate taskID with meta", url: "https://example.com", - meta: &base.UrlMeta{ + meta: &commonv1.UrlMeta{ Range: "foo", Digest: "bar", Tag: "", @@ -57,7 +57,7 @@ func TestTaskID(t *testing.T) { { name: "generate taskID with meta", url: "https://example.com", - meta: &base.UrlMeta{ + meta: &commonv1.UrlMeta{ Range: "foo", Digest: "bar", Tag: "", @@ -71,7 +71,7 @@ func TestTaskID(t *testing.T) { { name: "generate taskID with filter", url: "https://example.com?foo=foo&bar=bar", - meta: &base.UrlMeta{ + meta: &commonv1.UrlMeta{ Tag: "foo", Filter: "foo&bar", }, @@ -83,7 +83,7 @@ func TestTaskID(t *testing.T) { { name: "generate taskID with tag", url: "https://example.com", - meta: &base.UrlMeta{ + meta: &commonv1.UrlMeta{ Tag: "foo", }, expect: func(t *testing.T, d any) { diff --git a/pkg/rpc/base/base.pb.go b/pkg/rpc/base/base.pb.go deleted file mode 100644 index b8d5937a9..000000000 --- a/pkg/rpc/base/base.pb.go +++ /dev/null @@ -1,1255 +0,0 @@ -// -// Copyright 2020 The Dragonfly Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: pkg/rpc/base/base.proto - -package base - -import ( - _ "github.com/envoyproxy/protoc-gen-validate/validate" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Code int32 - -const ( - Code_X_UNSPECIFIED Code = 0 - // success code 200-299 - Code_Success Code = 200 - // framework can not find server node - Code_ServerUnavailable Code = 500 - // common response error 1000-1999 - // client can be migrated to another scheduler/CDN - Code_ResourceLacked Code = 1000 - Code_BackToSourceAborted Code = 1001 - Code_BadRequest Code = 1400 - Code_PeerTaskNotFound Code = 1404 - Code_UnknownError Code = 1500 - Code_RequestTimeOut Code = 1504 - // client response error 4000-4999 - Code_ClientError Code = 4000 - Code_ClientPieceRequestFail Code = 4001 // get piece task from other peer error - Code_ClientScheduleTimeout Code = 4002 // wait scheduler response timeout - Code_ClientContextCanceled Code = 4003 - Code_ClientWaitPieceReady Code = 4004 // when target peer downloads from source slowly, should wait - Code_ClientPieceDownloadFail Code = 4005 - Code_ClientRequestLimitFail Code = 4006 - Code_ClientConnectionError Code = 4007 - Code_ClientBackSourceError Code = 4008 - Code_ClientPieceNotFound Code = 4404 - // scheduler response error 5000-5999 - Code_SchedError Code = 5000 - Code_SchedNeedBackSource Code = 5001 // client should try to download from source - Code_SchedPeerGone Code = 5002 // client should disconnect from scheduler - Code_SchedPeerNotFound Code = 5004 // peer not found in scheduler - Code_SchedPeerPieceResultReportFail Code = 5005 // report piece - Code_SchedTaskStatusError Code = 5006 // task status is fail - // cdnsystem response error 6000-6999 - Code_CDNTaskRegistryFail Code = 6001 - Code_CDNTaskNotFound Code = 6404 - // manager response error 7000-7999 - Code_InvalidResourceType Code = 7001 -) - -// Enum value maps for Code. -var ( - Code_name = map[int32]string{ - 0: "X_UNSPECIFIED", - 200: "Success", - 500: "ServerUnavailable", - 1000: "ResourceLacked", - 1001: "BackToSourceAborted", - 1400: "BadRequest", - 1404: "PeerTaskNotFound", - 1500: "UnknownError", - 1504: "RequestTimeOut", - 4000: "ClientError", - 4001: "ClientPieceRequestFail", - 4002: "ClientScheduleTimeout", - 4003: "ClientContextCanceled", - 4004: "ClientWaitPieceReady", - 4005: "ClientPieceDownloadFail", - 4006: "ClientRequestLimitFail", - 4007: "ClientConnectionError", - 4008: "ClientBackSourceError", - 4404: "ClientPieceNotFound", - 5000: "SchedError", - 5001: "SchedNeedBackSource", - 5002: "SchedPeerGone", - 5004: "SchedPeerNotFound", - 5005: "SchedPeerPieceResultReportFail", - 5006: "SchedTaskStatusError", - 6001: "CDNTaskRegistryFail", - 6404: "CDNTaskNotFound", - 7001: "InvalidResourceType", - } - Code_value = map[string]int32{ - "X_UNSPECIFIED": 0, - "Success": 200, - "ServerUnavailable": 500, - "ResourceLacked": 1000, - "BackToSourceAborted": 1001, - "BadRequest": 1400, - "PeerTaskNotFound": 1404, - "UnknownError": 1500, - "RequestTimeOut": 1504, - "ClientError": 4000, - "ClientPieceRequestFail": 4001, - "ClientScheduleTimeout": 4002, - "ClientContextCanceled": 4003, - "ClientWaitPieceReady": 4004, - "ClientPieceDownloadFail": 4005, - "ClientRequestLimitFail": 4006, - "ClientConnectionError": 4007, - "ClientBackSourceError": 4008, - "ClientPieceNotFound": 4404, - "SchedError": 5000, - "SchedNeedBackSource": 5001, - "SchedPeerGone": 5002, - "SchedPeerNotFound": 5004, - "SchedPeerPieceResultReportFail": 5005, - "SchedTaskStatusError": 5006, - "CDNTaskRegistryFail": 6001, - "CDNTaskNotFound": 6404, - "InvalidResourceType": 7001, - } -) - -func (x Code) Enum() *Code { - p := new(Code) - *p = x - return p -} - -func (x Code) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Code) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_rpc_base_base_proto_enumTypes[0].Descriptor() -} - -func (Code) Type() protoreflect.EnumType { - return &file_pkg_rpc_base_base_proto_enumTypes[0] -} - -func (x Code) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Code.Descriptor instead. -func (Code) EnumDescriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{0} -} - -type PieceStyle int32 - -const ( - PieceStyle_PLAIN PieceStyle = 0 -) - -// Enum value maps for PieceStyle. -var ( - PieceStyle_name = map[int32]string{ - 0: "PLAIN", - } - PieceStyle_value = map[string]int32{ - "PLAIN": 0, - } -) - -func (x PieceStyle) Enum() *PieceStyle { - p := new(PieceStyle) - *p = x - return p -} - -func (x PieceStyle) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (PieceStyle) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_rpc_base_base_proto_enumTypes[1].Descriptor() -} - -func (PieceStyle) Type() protoreflect.EnumType { - return &file_pkg_rpc_base_base_proto_enumTypes[1] -} - -func (x PieceStyle) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use PieceStyle.Descriptor instead. -func (PieceStyle) EnumDescriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{1} -} - -type SizeScope int32 - -const ( - // size > one piece size - SizeScope_NORMAL SizeScope = 0 - // 128 byte < size <= one piece size and be plain type - SizeScope_SMALL SizeScope = 1 - // size <= 128 byte and be plain type - SizeScope_TINY SizeScope = 2 -) - -// Enum value maps for SizeScope. -var ( - SizeScope_name = map[int32]string{ - 0: "NORMAL", - 1: "SMALL", - 2: "TINY", - } - SizeScope_value = map[string]int32{ - "NORMAL": 0, - "SMALL": 1, - "TINY": 2, - } -) - -func (x SizeScope) Enum() *SizeScope { - p := new(SizeScope) - *p = x - return p -} - -func (x SizeScope) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SizeScope) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_rpc_base_base_proto_enumTypes[2].Descriptor() -} - -func (SizeScope) Type() protoreflect.EnumType { - return &file_pkg_rpc_base_base_proto_enumTypes[2] -} - -func (x SizeScope) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SizeScope.Descriptor instead. -func (SizeScope) EnumDescriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{2} -} - -// Pattern represents pattern of task. -type Pattern int32 - -const ( - // Default pattern, scheduler will use all p2p node - // include dfdaemon and seed peers. - Pattern_P2P Pattern = 0 - // Seed peer pattern, scheduler will use only seed peers. - Pattern_SEED_PEER Pattern = 1 - // Source pattern, scheduler will say back source - // when there is no available peer in p2p. - Pattern_SOURCE Pattern = 2 -) - -// Enum value maps for Pattern. -var ( - Pattern_name = map[int32]string{ - 0: "P2P", - 1: "SEED_PEER", - 2: "SOURCE", - } - Pattern_value = map[string]int32{ - "P2P": 0, - "SEED_PEER": 1, - "SOURCE": 2, - } -) - -func (x Pattern) Enum() *Pattern { - p := new(Pattern) - *p = x - return p -} - -func (x Pattern) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Pattern) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_rpc_base_base_proto_enumTypes[3].Descriptor() -} - -func (Pattern) Type() protoreflect.EnumType { - return &file_pkg_rpc_base_base_proto_enumTypes[3] -} - -func (x Pattern) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Pattern.Descriptor instead. -func (Pattern) EnumDescriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{3} -} - -// TaskType represents type of task. -type TaskType int32 - -const ( - // Normal is normal type of task, - // normal task is a normal p2p task. - TaskType_Normal TaskType = 0 - // DfCache is dfcache type of task, - // dfcache task is a cache task, and the task url is fake url. - // It can only be used for caching and cannot be downloaded back to source. - TaskType_DfCache TaskType = 1 - // DfStore is dfstore type of task, - // dfstore task is a persistent task in backend. - TaskType_DfStore TaskType = 2 -) - -// Enum value maps for TaskType. -var ( - TaskType_name = map[int32]string{ - 0: "Normal", - 1: "DfCache", - 2: "DfStore", - } - TaskType_value = map[string]int32{ - "Normal": 0, - "DfCache": 1, - "DfStore": 2, - } -) - -func (x TaskType) Enum() *TaskType { - p := new(TaskType) - *p = x - return p -} - -func (x TaskType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (TaskType) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_rpc_base_base_proto_enumTypes[4].Descriptor() -} - -func (TaskType) Type() protoreflect.EnumType { - return &file_pkg_rpc_base_base_proto_enumTypes[4] -} - -func (x TaskType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use TaskType.Descriptor instead. -func (TaskType) EnumDescriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{4} -} - -type GrpcDfError struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Code Code `protobuf:"varint,1,opt,name=code,proto3,enum=base.Code" json:"code,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` -} - -func (x *GrpcDfError) Reset() { - *x = GrpcDfError{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_base_base_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcDfError) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcDfError) ProtoMessage() {} - -func (x *GrpcDfError) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_base_base_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcDfError.ProtoReflect.Descriptor instead. -func (*GrpcDfError) Descriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{0} -} - -func (x *GrpcDfError) GetCode() Code { - if x != nil { - return x.Code - } - return Code_X_UNSPECIFIED -} - -func (x *GrpcDfError) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -// UrlMeta describes url meta info. -type UrlMeta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // digest checks integrity of url content, for example md5:xxx or sha256:yyy - Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` - // url tag identifies different task for same url, conflict with digest - Tag string `protobuf:"bytes,2,opt,name=tag,proto3" json:"tag,omitempty"` - // content range for url - Range string `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` - // filter url used to generate task id - Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"` - // other url header infos - Header map[string]string `protobuf:"bytes,5,rep,name=header,proto3" json:"header,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *UrlMeta) Reset() { - *x = UrlMeta{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_base_base_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UrlMeta) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UrlMeta) ProtoMessage() {} - -func (x *UrlMeta) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_base_base_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UrlMeta.ProtoReflect.Descriptor instead. -func (*UrlMeta) Descriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{1} -} - -func (x *UrlMeta) GetDigest() string { - if x != nil { - return x.Digest - } - return "" -} - -func (x *UrlMeta) GetTag() string { - if x != nil { - return x.Tag - } - return "" -} - -func (x *UrlMeta) GetRange() string { - if x != nil { - return x.Range - } - return "" -} - -func (x *UrlMeta) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *UrlMeta) GetHeader() map[string]string { - if x != nil { - return x.Header - } - return nil -} - -type HostLoad struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // cpu usage - CpuRatio float32 `protobuf:"fixed32,1,opt,name=cpu_ratio,json=cpuRatio,proto3" json:"cpu_ratio,omitempty"` - // memory usage - MemRatio float32 `protobuf:"fixed32,2,opt,name=mem_ratio,json=memRatio,proto3" json:"mem_ratio,omitempty"` - // disk space usage - DiskRatio float32 `protobuf:"fixed32,3,opt,name=disk_ratio,json=diskRatio,proto3" json:"disk_ratio,omitempty"` -} - -func (x *HostLoad) Reset() { - *x = HostLoad{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_base_base_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HostLoad) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HostLoad) ProtoMessage() {} - -func (x *HostLoad) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_base_base_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HostLoad.ProtoReflect.Descriptor instead. -func (*HostLoad) Descriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{2} -} - -func (x *HostLoad) GetCpuRatio() float32 { - if x != nil { - return x.CpuRatio - } - return 0 -} - -func (x *HostLoad) GetMemRatio() float32 { - if x != nil { - return x.MemRatio - } - return 0 -} - -func (x *HostLoad) GetDiskRatio() float32 { - if x != nil { - return x.DiskRatio - } - return 0 -} - -type PieceTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - SrcPid string `protobuf:"bytes,2,opt,name=src_pid,json=srcPid,proto3" json:"src_pid,omitempty"` - DstPid string `protobuf:"bytes,3,opt,name=dst_pid,json=dstPid,proto3" json:"dst_pid,omitempty"` - // piece number - StartNum uint32 `protobuf:"varint,4,opt,name=start_num,json=startNum,proto3" json:"start_num,omitempty"` - // expected piece count, limit = 0 represent request pieces as many shards as possible - Limit uint32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"` -} - -func (x *PieceTaskRequest) Reset() { - *x = PieceTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_base_base_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PieceTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PieceTaskRequest) ProtoMessage() {} - -func (x *PieceTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_base_base_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PieceTaskRequest.ProtoReflect.Descriptor instead. -func (*PieceTaskRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{3} -} - -func (x *PieceTaskRequest) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *PieceTaskRequest) GetSrcPid() string { - if x != nil { - return x.SrcPid - } - return "" -} - -func (x *PieceTaskRequest) GetDstPid() string { - if x != nil { - return x.DstPid - } - return "" -} - -func (x *PieceTaskRequest) GetStartNum() uint32 { - if x != nil { - return x.StartNum - } - return 0 -} - -func (x *PieceTaskRequest) GetLimit() uint32 { - if x != nil { - return x.Limit - } - return 0 -} - -type PieceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // piece_num < 0 represent start report piece flag - PieceNum int32 `protobuf:"varint,1,opt,name=piece_num,json=pieceNum,proto3" json:"piece_num,omitempty"` - RangeStart uint64 `protobuf:"varint,2,opt,name=range_start,json=rangeStart,proto3" json:"range_start,omitempty"` - RangeSize uint32 `protobuf:"varint,3,opt,name=range_size,json=rangeSize,proto3" json:"range_size,omitempty"` - PieceMd5 string `protobuf:"bytes,4,opt,name=piece_md5,json=pieceMd5,proto3" json:"piece_md5,omitempty"` - PieceOffset uint64 `protobuf:"varint,5,opt,name=piece_offset,json=pieceOffset,proto3" json:"piece_offset,omitempty"` - PieceStyle PieceStyle `protobuf:"varint,6,opt,name=piece_style,json=pieceStyle,proto3,enum=base.PieceStyle" json:"piece_style,omitempty"` - // total time(millisecond) consumed - DownloadCost uint64 `protobuf:"varint,7,opt,name=download_cost,json=downloadCost,proto3" json:"download_cost,omitempty"` -} - -func (x *PieceInfo) Reset() { - *x = PieceInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_base_base_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PieceInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PieceInfo) ProtoMessage() {} - -func (x *PieceInfo) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_base_base_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PieceInfo.ProtoReflect.Descriptor instead. -func (*PieceInfo) Descriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{4} -} - -func (x *PieceInfo) GetPieceNum() int32 { - if x != nil { - return x.PieceNum - } - return 0 -} - -func (x *PieceInfo) GetRangeStart() uint64 { - if x != nil { - return x.RangeStart - } - return 0 -} - -func (x *PieceInfo) GetRangeSize() uint32 { - if x != nil { - return x.RangeSize - } - return 0 -} - -func (x *PieceInfo) GetPieceMd5() string { - if x != nil { - return x.PieceMd5 - } - return "" -} - -func (x *PieceInfo) GetPieceOffset() uint64 { - if x != nil { - return x.PieceOffset - } - return 0 -} - -func (x *PieceInfo) GetPieceStyle() PieceStyle { - if x != nil { - return x.PieceStyle - } - return PieceStyle_PLAIN -} - -func (x *PieceInfo) GetDownloadCost() uint64 { - if x != nil { - return x.DownloadCost - } - return 0 -} - -type ExtendAttribute struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // task response header, eg: HTTP Response Header - Header map[string]string `protobuf:"bytes,1,rep,name=header,proto3" json:"header,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // task response code, eg: HTTP Status Code - StatusCode int32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` - // task response status, eg: HTTP Status - Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` -} - -func (x *ExtendAttribute) Reset() { - *x = ExtendAttribute{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_base_base_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtendAttribute) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtendAttribute) ProtoMessage() {} - -func (x *ExtendAttribute) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_base_base_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtendAttribute.ProtoReflect.Descriptor instead. -func (*ExtendAttribute) Descriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{5} -} - -func (x *ExtendAttribute) GetHeader() map[string]string { - if x != nil { - return x.Header - } - return nil -} - -func (x *ExtendAttribute) GetStatusCode() int32 { - if x != nil { - return x.StatusCode - } - return 0 -} - -func (x *ExtendAttribute) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -type PiecePacket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - DstPid string `protobuf:"bytes,3,opt,name=dst_pid,json=dstPid,proto3" json:"dst_pid,omitempty"` - // ip:port - DstAddr string `protobuf:"bytes,4,opt,name=dst_addr,json=dstAddr,proto3" json:"dst_addr,omitempty"` - PieceInfos []*PieceInfo `protobuf:"bytes,5,rep,name=piece_infos,json=pieceInfos,proto3" json:"piece_infos,omitempty"` - // total piece count for url, total_piece represent total piece is unknown - TotalPiece int32 `protobuf:"varint,6,opt,name=total_piece,json=totalPiece,proto3" json:"total_piece,omitempty"` - // content_length < 0 represent content length is unknown - ContentLength int64 `protobuf:"varint,7,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` - // sha256 code of all piece md5 - PieceMd5Sign string `protobuf:"bytes,8,opt,name=piece_md5_sign,json=pieceMd5Sign,proto3" json:"piece_md5_sign,omitempty"` - // task extend attribute - ExtendAttribute *ExtendAttribute `protobuf:"bytes,9,opt,name=extend_attribute,json=extendAttribute,proto3" json:"extend_attribute,omitempty"` -} - -func (x *PiecePacket) Reset() { - *x = PiecePacket{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_base_base_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PiecePacket) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PiecePacket) ProtoMessage() {} - -func (x *PiecePacket) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_base_base_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PiecePacket.ProtoReflect.Descriptor instead. -func (*PiecePacket) Descriptor() ([]byte, []int) { - return file_pkg_rpc_base_base_proto_rawDescGZIP(), []int{6} -} - -func (x *PiecePacket) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *PiecePacket) GetDstPid() string { - if x != nil { - return x.DstPid - } - return "" -} - -func (x *PiecePacket) GetDstAddr() string { - if x != nil { - return x.DstAddr - } - return "" -} - -func (x *PiecePacket) GetPieceInfos() []*PieceInfo { - if x != nil { - return x.PieceInfos - } - return nil -} - -func (x *PiecePacket) GetTotalPiece() int32 { - if x != nil { - return x.TotalPiece - } - return 0 -} - -func (x *PiecePacket) GetContentLength() int64 { - if x != nil { - return x.ContentLength - } - return 0 -} - -func (x *PiecePacket) GetPieceMd5Sign() string { - if x != nil { - return x.PieceMd5Sign - } - return "" -} - -func (x *PiecePacket) GetExtendAttribute() *ExtendAttribute { - if x != nil { - return x.ExtendAttribute - } - return nil -} - -var File_pkg_rpc_base_base_proto protoreflect.FileDescriptor - -var file_pkg_rpc_base_base_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x62, - 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x62, 0x61, 0x73, 0x65, 0x1a, - 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x47, 0x0a, 0x0b, 0x47, 0x72, 0x70, 0x63, - 0x44, 0x66, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, - 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x22, 0x93, 0x02, 0x0a, 0x07, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x3f, 0x0a, - 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xfa, - 0x42, 0x24, 0x72, 0x22, 0x32, 0x1d, 0x5e, 0x28, 0x6d, 0x64, 0x35, 0x29, 0x7c, 0x28, 0x73, 0x68, - 0x61, 0x32, 0x35, 0x36, 0x29, 0x3a, 0x5b, 0x41, 0x2d, 0x46, 0x61, 0x2d, 0x66, 0x30, 0x2d, 0x39, - 0x5d, 0x2b, 0x24, 0xd0, 0x01, 0x01, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x10, - 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x61, 0x67, - 0x12, 0x2f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x19, 0xfa, 0x42, 0x16, 0x72, 0x14, 0x32, 0x0f, 0x5e, 0x5b, 0x30, 0x2d, 0x39, 0x5d, 0x2b, 0x2d, - 0x5b, 0x30, 0x2d, 0x39, 0x5d, 0x2a, 0x24, 0xd0, 0x01, 0x01, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x06, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x61, 0x73, 0x65, - 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0b, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x48, 0x6f, 0x73, 0x74, - 0x4c, 0x6f, 0x61, 0x64, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x70, 0x75, 0x5f, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x0a, 0x0a, 0x1d, 0x00, - 0x00, 0x80, 0x3f, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x52, 0x08, 0x63, 0x70, 0x75, 0x52, 0x61, 0x74, - 0x69, 0x6f, 0x12, 0x2c, 0x0a, 0x09, 0x6d, 0x65, 0x6d, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x02, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x0a, 0x0a, 0x1d, 0x00, 0x00, 0x80, - 0x3f, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x6d, 0x52, 0x61, 0x74, 0x69, 0x6f, - 0x12, 0x2e, 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x02, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x0a, 0x0a, 0x1d, 0x00, 0x00, 0x80, 0x3f, - 0x2d, 0x00, 0x00, 0x00, 0x00, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x52, 0x61, 0x74, 0x69, 0x6f, - 0x22, 0xbd, 0x01, 0x0a, 0x10, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, - 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x70, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, - 0x01, 0x52, 0x06, 0x73, 0x72, 0x63, 0x50, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x64, 0x73, 0x74, - 0x5f, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x06, 0x64, 0x73, 0x74, 0x50, 0x69, 0x64, 0x12, 0x24, 0x0a, 0x09, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x00, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4e, 0x75, - 0x6d, 0x12, 0x1d, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x00, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x22, 0xe1, 0x02, 0x0a, 0x09, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1b, - 0x0a, 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x08, 0x70, 0x69, 0x65, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x12, 0x28, 0x0a, 0x0b, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x00, 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, - 0x28, 0x00, 0x52, 0x09, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x58, 0x0a, - 0x09, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x6d, 0x64, 0x35, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x3b, 0xfa, 0x42, 0x38, 0x72, 0x36, 0x32, 0x31, 0x28, 0x5b, 0x61, 0x2d, 0x66, 0x5c, 0x64, - 0x5d, 0x7b, 0x33, 0x32, 0x7d, 0x7c, 0x5b, 0x41, 0x2d, 0x46, 0x5c, 0x64, 0x5d, 0x7b, 0x33, 0x32, - 0x7d, 0x7c, 0x5b, 0x61, 0x2d, 0x66, 0x5c, 0x64, 0x5d, 0x7b, 0x31, 0x36, 0x7d, 0x7c, 0x5b, 0x41, - 0x2d, 0x46, 0x5c, 0x64, 0x5d, 0x7b, 0x31, 0x36, 0x7d, 0x29, 0xd0, 0x01, 0x01, 0x52, 0x08, 0x70, - 0x69, 0x65, 0x63, 0x65, 0x4d, 0x64, 0x35, 0x12, 0x2a, 0x0a, 0x0c, 0x70, 0x69, 0x65, 0x63, 0x65, - 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x32, 0x02, 0x28, 0x00, 0x52, 0x0b, 0x70, 0x69, 0x65, 0x63, 0x65, 0x4f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x12, 0x31, 0x0a, 0x0b, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x79, - 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, - 0x50, 0x69, 0x65, 0x63, 0x65, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x52, 0x0a, 0x70, 0x69, 0x65, 0x63, - 0x65, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x2c, 0x0a, 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x32, 0x02, 0x28, 0x00, 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, - 0x43, 0x6f, 0x73, 0x74, 0x22, 0xc0, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x2e, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, - 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x43, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x39, 0x0a, 0x0b, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, 0x02, 0x0a, 0x0b, 0x50, 0x69, 0x65, 0x63, - 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, - 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x64, 0x73, 0x74, - 0x5f, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x06, 0x64, 0x73, 0x74, 0x50, 0x69, 0x64, 0x12, 0x22, 0x0a, 0x08, 0x64, - 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x64, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, - 0x30, 0x0a, 0x0b, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, - 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, - 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x69, 0x65, - 0x63, 0x65, 0x5f, 0x6d, 0x64, 0x35, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x70, 0x69, 0x65, 0x63, 0x65, 0x4d, 0x64, 0x35, 0x53, 0x69, 0x67, 0x6e, 0x12, - 0x40, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x61, 0x73, 0x65, - 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x2a, 0xae, 0x05, 0x0a, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x58, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, - 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x10, 0xc8, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, - 0x10, 0xf4, 0x03, 0x12, 0x13, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, - 0x61, 0x63, 0x6b, 0x65, 0x64, 0x10, 0xe8, 0x07, 0x12, 0x18, 0x0a, 0x13, 0x42, 0x61, 0x63, 0x6b, - 0x54, 0x6f, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x10, - 0xe9, 0x07, 0x12, 0x0f, 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x10, 0xf8, 0x0a, 0x12, 0x15, 0x0a, 0x10, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x4e, - 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x10, 0xfc, 0x0a, 0x12, 0x11, 0x0a, 0x0c, 0x55, 0x6e, - 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0xdc, 0x0b, 0x12, 0x13, 0x0a, - 0x0e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x4f, 0x75, 0x74, 0x10, - 0xe0, 0x0b, 0x12, 0x10, 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x10, 0xa0, 0x1f, 0x12, 0x1b, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x69, - 0x65, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x10, 0xa1, - 0x1f, 0x12, 0x1a, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x10, 0xa2, 0x1f, 0x12, 0x1a, 0x0a, - 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x43, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x65, 0x64, 0x10, 0xa3, 0x1f, 0x12, 0x19, 0x0a, 0x14, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x57, 0x61, 0x69, 0x74, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x61, 0x64, - 0x79, 0x10, 0xa4, 0x1f, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x69, - 0x65, 0x63, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x10, - 0xa5, 0x1f, 0x12, 0x1b, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x10, 0xa6, 0x1f, 0x12, - 0x1a, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0xa7, 0x1f, 0x12, 0x1a, 0x0a, 0x15, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x10, 0xa8, 0x1f, 0x12, 0x18, 0x0a, 0x13, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x50, 0x69, 0x65, 0x63, 0x65, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x10, 0xb4, - 0x22, 0x12, 0x0f, 0x0a, 0x0a, 0x53, 0x63, 0x68, 0x65, 0x64, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, - 0x88, 0x27, 0x12, 0x18, 0x0a, 0x13, 0x53, 0x63, 0x68, 0x65, 0x64, 0x4e, 0x65, 0x65, 0x64, 0x42, - 0x61, 0x63, 0x6b, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x10, 0x89, 0x27, 0x12, 0x12, 0x0a, 0x0d, - 0x53, 0x63, 0x68, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x47, 0x6f, 0x6e, 0x65, 0x10, 0x8a, 0x27, - 0x12, 0x16, 0x0a, 0x11, 0x53, 0x63, 0x68, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x6f, 0x74, - 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x10, 0x8c, 0x27, 0x12, 0x23, 0x0a, 0x1e, 0x53, 0x63, 0x68, 0x65, - 0x64, 0x50, 0x65, 0x65, 0x72, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x10, 0x8d, 0x27, 0x12, 0x19, 0x0a, - 0x14, 0x53, 0x63, 0x68, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0x8e, 0x27, 0x12, 0x18, 0x0a, 0x13, 0x43, 0x44, 0x4e, 0x54, - 0x61, 0x73, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x10, - 0xf1, 0x2e, 0x12, 0x14, 0x0a, 0x0f, 0x43, 0x44, 0x4e, 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x74, - 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x10, 0x84, 0x32, 0x12, 0x18, 0x0a, 0x13, 0x49, 0x6e, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x10, - 0xd9, 0x36, 0x2a, 0x17, 0x0a, 0x0a, 0x50, 0x69, 0x65, 0x63, 0x65, 0x53, 0x74, 0x79, 0x6c, 0x65, - 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x2a, 0x2c, 0x0a, 0x09, 0x53, - 0x69, 0x7a, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, - 0x41, 0x4c, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x4d, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x12, - 0x08, 0x0a, 0x04, 0x54, 0x49, 0x4e, 0x59, 0x10, 0x02, 0x2a, 0x2d, 0x0a, 0x07, 0x50, 0x61, 0x74, - 0x74, 0x65, 0x72, 0x6e, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x32, 0x50, 0x10, 0x00, 0x12, 0x0d, 0x0a, - 0x09, 0x53, 0x45, 0x45, 0x44, 0x5f, 0x50, 0x45, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, - 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x2a, 0x30, 0x0a, 0x08, 0x54, 0x61, 0x73, 0x6b, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x10, 0x00, - 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x66, 0x43, 0x61, 0x63, 0x68, 0x65, 0x10, 0x01, 0x12, 0x0b, 0x0a, - 0x07, 0x44, 0x66, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x10, 0x02, 0x42, 0x22, 0x5a, 0x20, 0x64, 0x37, - 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x64, 0x72, 0x61, 0x67, 0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, - 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pkg_rpc_base_base_proto_rawDescOnce sync.Once - file_pkg_rpc_base_base_proto_rawDescData = file_pkg_rpc_base_base_proto_rawDesc -) - -func file_pkg_rpc_base_base_proto_rawDescGZIP() []byte { - file_pkg_rpc_base_base_proto_rawDescOnce.Do(func() { - file_pkg_rpc_base_base_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_rpc_base_base_proto_rawDescData) - }) - return file_pkg_rpc_base_base_proto_rawDescData -} - -var file_pkg_rpc_base_base_proto_enumTypes = make([]protoimpl.EnumInfo, 5) -var file_pkg_rpc_base_base_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_pkg_rpc_base_base_proto_goTypes = []interface{}{ - (Code)(0), // 0: base.Code - (PieceStyle)(0), // 1: base.PieceStyle - (SizeScope)(0), // 2: base.SizeScope - (Pattern)(0), // 3: base.Pattern - (TaskType)(0), // 4: base.TaskType - (*GrpcDfError)(nil), // 5: base.GrpcDfError - (*UrlMeta)(nil), // 6: base.UrlMeta - (*HostLoad)(nil), // 7: base.HostLoad - (*PieceTaskRequest)(nil), // 8: base.PieceTaskRequest - (*PieceInfo)(nil), // 9: base.PieceInfo - (*ExtendAttribute)(nil), // 10: base.ExtendAttribute - (*PiecePacket)(nil), // 11: base.PiecePacket - nil, // 12: base.UrlMeta.HeaderEntry - nil, // 13: base.ExtendAttribute.HeaderEntry -} -var file_pkg_rpc_base_base_proto_depIdxs = []int32{ - 0, // 0: base.GrpcDfError.code:type_name -> base.Code - 12, // 1: base.UrlMeta.header:type_name -> base.UrlMeta.HeaderEntry - 1, // 2: base.PieceInfo.piece_style:type_name -> base.PieceStyle - 13, // 3: base.ExtendAttribute.header:type_name -> base.ExtendAttribute.HeaderEntry - 9, // 4: base.PiecePacket.piece_infos:type_name -> base.PieceInfo - 10, // 5: base.PiecePacket.extend_attribute:type_name -> base.ExtendAttribute - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name -} - -func init() { file_pkg_rpc_base_base_proto_init() } -func file_pkg_rpc_base_base_proto_init() { - if File_pkg_rpc_base_base_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_pkg_rpc_base_base_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcDfError); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_base_base_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UrlMeta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_base_base_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HostLoad); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_base_base_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PieceTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_base_base_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PieceInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_base_base_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtendAttribute); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_base_base_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PiecePacket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_rpc_base_base_proto_rawDesc, - NumEnums: 5, - NumMessages: 9, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_pkg_rpc_base_base_proto_goTypes, - DependencyIndexes: file_pkg_rpc_base_base_proto_depIdxs, - EnumInfos: file_pkg_rpc_base_base_proto_enumTypes, - MessageInfos: file_pkg_rpc_base_base_proto_msgTypes, - }.Build() - File_pkg_rpc_base_base_proto = out.File - file_pkg_rpc_base_base_proto_rawDesc = nil - file_pkg_rpc_base_base_proto_goTypes = nil - file_pkg_rpc_base_base_proto_depIdxs = nil -} diff --git a/pkg/rpc/base/base.pb.validate.go b/pkg/rpc/base/base.pb.validate.go deleted file mode 100644 index 87f6dfebe..000000000 --- a/pkg/rpc/base/base.pb.validate.go +++ /dev/null @@ -1,681 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: pkg/rpc/base/base.proto - -package base - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} -) - -// Validate checks the field values on GrpcDfError with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. -func (m *GrpcDfError) Validate() error { - if m == nil { - return nil - } - - // no validation rules for Code - - // no validation rules for Message - - return nil -} - -// GrpcDfErrorValidationError is the validation error returned by -// GrpcDfError.Validate if the designated constraints aren't met. -type GrpcDfErrorValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GrpcDfErrorValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GrpcDfErrorValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GrpcDfErrorValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GrpcDfErrorValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GrpcDfErrorValidationError) ErrorName() string { return "GrpcDfErrorValidationError" } - -// Error satisfies the builtin error interface -func (e GrpcDfErrorValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGrpcDfError.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GrpcDfErrorValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GrpcDfErrorValidationError{} - -// Validate checks the field values on UrlMeta with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *UrlMeta) Validate() error { - if m == nil { - return nil - } - - if m.GetDigest() != "" { - - if !_UrlMeta_Digest_Pattern.MatchString(m.GetDigest()) { - return UrlMetaValidationError{ - field: "Digest", - reason: "value does not match regex pattern \"^(md5)|(sha256):[A-Fa-f0-9]+$\"", - } - } - - } - - // no validation rules for Tag - - if m.GetRange() != "" { - - if !_UrlMeta_Range_Pattern.MatchString(m.GetRange()) { - return UrlMetaValidationError{ - field: "Range", - reason: "value does not match regex pattern \"^[0-9]+-[0-9]*$\"", - } - } - - } - - // no validation rules for Filter - - // no validation rules for Header - - return nil -} - -// UrlMetaValidationError is the validation error returned by UrlMeta.Validate -// if the designated constraints aren't met. -type UrlMetaValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e UrlMetaValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e UrlMetaValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e UrlMetaValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e UrlMetaValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e UrlMetaValidationError) ErrorName() string { return "UrlMetaValidationError" } - -// Error satisfies the builtin error interface -func (e UrlMetaValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sUrlMeta.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = UrlMetaValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = UrlMetaValidationError{} - -var _UrlMeta_Digest_Pattern = regexp.MustCompile("^(md5)|(sha256):[A-Fa-f0-9]+$") - -var _UrlMeta_Range_Pattern = regexp.MustCompile("^[0-9]+-[0-9]*$") - -// Validate checks the field values on HostLoad with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *HostLoad) Validate() error { - if m == nil { - return nil - } - - if val := m.GetCpuRatio(); val < 0 || val > 1 { - return HostLoadValidationError{ - field: "CpuRatio", - reason: "value must be inside range [0, 1]", - } - } - - if val := m.GetMemRatio(); val < 0 || val > 1 { - return HostLoadValidationError{ - field: "MemRatio", - reason: "value must be inside range [0, 1]", - } - } - - if val := m.GetDiskRatio(); val < 0 || val > 1 { - return HostLoadValidationError{ - field: "DiskRatio", - reason: "value must be inside range [0, 1]", - } - } - - return nil -} - -// HostLoadValidationError is the validation error returned by -// HostLoad.Validate if the designated constraints aren't met. -type HostLoadValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e HostLoadValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e HostLoadValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e HostLoadValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e HostLoadValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e HostLoadValidationError) ErrorName() string { return "HostLoadValidationError" } - -// Error satisfies the builtin error interface -func (e HostLoadValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sHostLoad.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = HostLoadValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = HostLoadValidationError{} - -// Validate checks the field values on PieceTaskRequest with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. -func (m *PieceTaskRequest) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetTaskId()) < 1 { - return PieceTaskRequestValidationError{ - field: "TaskId", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetSrcPid()) < 1 { - return PieceTaskRequestValidationError{ - field: "SrcPid", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetDstPid()) < 1 { - return PieceTaskRequestValidationError{ - field: "DstPid", - reason: "value length must be at least 1 runes", - } - } - - if m.GetStartNum() < 0 { - return PieceTaskRequestValidationError{ - field: "StartNum", - reason: "value must be greater than or equal to 0", - } - } - - if m.GetLimit() < 0 { - return PieceTaskRequestValidationError{ - field: "Limit", - reason: "value must be greater than or equal to 0", - } - } - - return nil -} - -// PieceTaskRequestValidationError is the validation error returned by -// PieceTaskRequest.Validate if the designated constraints aren't met. -type PieceTaskRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PieceTaskRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PieceTaskRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PieceTaskRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PieceTaskRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PieceTaskRequestValidationError) ErrorName() string { return "PieceTaskRequestValidationError" } - -// Error satisfies the builtin error interface -func (e PieceTaskRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPieceTaskRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PieceTaskRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PieceTaskRequestValidationError{} - -// Validate checks the field values on PieceInfo with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *PieceInfo) Validate() error { - if m == nil { - return nil - } - - // no validation rules for PieceNum - - if m.GetRangeStart() < 0 { - return PieceInfoValidationError{ - field: "RangeStart", - reason: "value must be greater than or equal to 0", - } - } - - if m.GetRangeSize() < 0 { - return PieceInfoValidationError{ - field: "RangeSize", - reason: "value must be greater than or equal to 0", - } - } - - if m.GetPieceMd5() != "" { - - if !_PieceInfo_PieceMd5_Pattern.MatchString(m.GetPieceMd5()) { - return PieceInfoValidationError{ - field: "PieceMd5", - reason: "value does not match regex pattern \"([a-f\\\\d]{32}|[A-F\\\\d]{32}|[a-f\\\\d]{16}|[A-F\\\\d]{16})\"", - } - } - - } - - if m.GetPieceOffset() < 0 { - return PieceInfoValidationError{ - field: "PieceOffset", - reason: "value must be greater than or equal to 0", - } - } - - // no validation rules for PieceStyle - - if m.GetDownloadCost() < 0 { - return PieceInfoValidationError{ - field: "DownloadCost", - reason: "value must be greater than or equal to 0", - } - } - - return nil -} - -// PieceInfoValidationError is the validation error returned by -// PieceInfo.Validate if the designated constraints aren't met. -type PieceInfoValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PieceInfoValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PieceInfoValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PieceInfoValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PieceInfoValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PieceInfoValidationError) ErrorName() string { return "PieceInfoValidationError" } - -// Error satisfies the builtin error interface -func (e PieceInfoValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPieceInfo.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PieceInfoValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PieceInfoValidationError{} - -var _PieceInfo_PieceMd5_Pattern = regexp.MustCompile("([a-f\\d]{32}|[A-F\\d]{32}|[a-f\\d]{16}|[A-F\\d]{16})") - -// Validate checks the field values on ExtendAttribute with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. -func (m *ExtendAttribute) Validate() error { - if m == nil { - return nil - } - - // no validation rules for Header - - // no validation rules for StatusCode - - // no validation rules for Status - - return nil -} - -// ExtendAttributeValidationError is the validation error returned by -// ExtendAttribute.Validate if the designated constraints aren't met. -type ExtendAttributeValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ExtendAttributeValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ExtendAttributeValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ExtendAttributeValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ExtendAttributeValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ExtendAttributeValidationError) ErrorName() string { return "ExtendAttributeValidationError" } - -// Error satisfies the builtin error interface -func (e ExtendAttributeValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sExtendAttribute.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ExtendAttributeValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ExtendAttributeValidationError{} - -// Validate checks the field values on PiecePacket with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. -func (m *PiecePacket) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetTaskId()) < 1 { - return PiecePacketValidationError{ - field: "TaskId", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetDstPid()) < 1 { - return PiecePacketValidationError{ - field: "DstPid", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetDstAddr()) < 1 { - return PiecePacketValidationError{ - field: "DstAddr", - reason: "value length must be at least 1 runes", - } - } - - for idx, item := range m.GetPieceInfos() { - _, _ = idx, item - - if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PiecePacketValidationError{ - field: fmt.Sprintf("PieceInfos[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - // no validation rules for TotalPiece - - // no validation rules for ContentLength - - // no validation rules for PieceMd5Sign - - if v, ok := interface{}(m.GetExtendAttribute()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PiecePacketValidationError{ - field: "ExtendAttribute", - reason: "embedded message failed validation", - cause: err, - } - } - } - - return nil -} - -// PiecePacketValidationError is the validation error returned by -// PiecePacket.Validate if the designated constraints aren't met. -type PiecePacketValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PiecePacketValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PiecePacketValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PiecePacketValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PiecePacketValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PiecePacketValidationError) ErrorName() string { return "PiecePacketValidationError" } - -// Error satisfies the builtin error interface -func (e PiecePacketValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPiecePacket.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PiecePacketValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PiecePacketValidationError{} diff --git a/pkg/rpc/base/base.proto b/pkg/rpc/base/base.proto deleted file mode 100644 index e0f39f8ff..000000000 --- a/pkg/rpc/base/base.proto +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -syntax = "proto3"; - -package base; - -import "validate/validate.proto"; - -option go_package = "d7y.io/dragonfly/v2/pkg/rpc/base"; - -enum Code{ - X_UNSPECIFIED = 0; - // success code 200-299 - Success = 200; - // framework can not find server node - ServerUnavailable = 500; - - // common response error 1000-1999 - // client can be migrated to another scheduler/CDN - ResourceLacked = 1000; - BackToSourceAborted = 1001; - BadRequest = 1400; - PeerTaskNotFound = 1404; - UnknownError = 1500; - RequestTimeOut = 1504; - - // client response error 4000-4999 - ClientError = 4000; - ClientPieceRequestFail = 4001; // get piece task from other peer error - ClientScheduleTimeout = 4002; // wait scheduler response timeout - ClientContextCanceled = 4003; - ClientWaitPieceReady = 4004; // when target peer downloads from source slowly, should wait - ClientPieceDownloadFail = 4005; - ClientRequestLimitFail = 4006; - ClientConnectionError = 4007; - ClientBackSourceError = 4008; - ClientPieceNotFound = 4404; - - // scheduler response error 5000-5999 - SchedError = 5000; - SchedNeedBackSource = 5001; // client should try to download from source - SchedPeerGone = 5002; // client should disconnect from scheduler - SchedPeerNotFound = 5004; // peer not found in scheduler - SchedPeerPieceResultReportFail = 5005; // report piece - SchedTaskStatusError = 5006; // task status is fail - - // cdnsystem response error 6000-6999 - CDNTaskRegistryFail = 6001; - CDNTaskNotFound = 6404; - - // manager response error 7000-7999 - InvalidResourceType = 7001; -} - -enum PieceStyle{ - PLAIN = 0; -} - -enum SizeScope{ - // size > one piece size - NORMAL = 0; - // 128 byte < size <= one piece size and be plain type - SMALL = 1; - // size <= 128 byte and be plain type - TINY = 2; -} - -// Pattern represents pattern of task. -enum Pattern{ - // Default pattern, scheduler will use all p2p node - // include dfdaemon and seed peers. - P2P = 0; - - // Seed peer pattern, scheduler will use only seed peers. - SEED_PEER = 1; - - // Source pattern, scheduler will say back source - // when there is no available peer in p2p. - SOURCE = 2; -} - -// TaskType represents type of task. -enum TaskType{ - // Normal is normal type of task, - // normal task is a normal p2p task. - Normal = 0; - - // DfCache is dfcache type of task, - // dfcache task is a cache task, and the task url is fake url. - // It can only be used for caching and cannot be downloaded back to source. - DfCache = 1; - - // DfStore is dfstore type of task, - // dfstore task is a persistent task in backend. - DfStore = 2; -} - -message GrpcDfError { - Code code = 1; - string message = 2; -} - -// UrlMeta describes url meta info. -message UrlMeta{ - // digest checks integrity of url content, for example md5:xxx or sha256:yyy - string digest = 1 [(validate.rules).string = {pattern: "^(md5)|(sha256):[A-Fa-f0-9]+$", ignore_empty:true}]; - // url tag identifies different task for same url, conflict with digest - string tag = 2; - // content range for url - string range = 3 [(validate.rules).string = {pattern: "^[0-9]+-[0-9]*$", ignore_empty:true}]; - // filter url used to generate task id - string filter = 4; - // other url header infos - map header = 5; -} - -message HostLoad{ - // cpu usage - float cpu_ratio = 1 [(validate.rules).float = {gte: 0, lte: 1}]; - // memory usage - float mem_ratio = 2 [(validate.rules).float = {gte: 0, lte: 1}]; - // disk space usage - float disk_ratio = 3 [(validate.rules).float = {gte: 0, lte: 1}]; -} - -message PieceTaskRequest{ - string task_id = 1 [(validate.rules).string.min_len = 1]; - string src_pid = 2 [(validate.rules).string.min_len = 1]; - string dst_pid = 3 [(validate.rules).string.min_len = 1]; - // piece number - uint32 start_num = 4 [(validate.rules).uint32.gte = 0]; - // expected piece count, limit = 0 represent request pieces as many shards as possible - uint32 limit = 5 [(validate.rules).uint32.gte = 0]; -} - -message PieceInfo{ - // piece_num < 0 represent start report piece flag - int32 piece_num = 1; - uint64 range_start = 2 [(validate.rules).uint64.gte = 0]; - uint32 range_size = 3 [(validate.rules).uint32.gte = 0]; - string piece_md5 = 4 [(validate.rules).string = {pattern:"([a-f\\d]{32}|[A-F\\d]{32}|[a-f\\d]{16}|[A-F\\d]{16})", ignore_empty:true}]; - uint64 piece_offset = 5 [(validate.rules).uint64.gte = 0]; - base.PieceStyle piece_style = 6; - // total time(millisecond) consumed - uint64 download_cost = 7 [(validate.rules).uint64.gte = 0]; -} - -message ExtendAttribute{ - // task response header, eg: HTTP Response Header - map header = 1; - // task response code, eg: HTTP Status Code - int32 status_code = 2; - // task response status, eg: HTTP Status - string status = 3; -} - -message PiecePacket{ - string task_id = 2 [(validate.rules).string.min_len = 1]; - string dst_pid = 3 [(validate.rules).string.min_len = 1]; - // ip:port - string dst_addr = 4 [(validate.rules).string.min_len = 1]; - repeated PieceInfo piece_infos = 5; - // total piece count for url, total_piece represent total piece is unknown - int32 total_piece = 6; - // content_length < 0 represent content length is unknown - int64 content_length = 7; - // sha256 code of all piece md5 - string piece_md5_sign = 8; - // task extend attribute - ExtendAttribute extend_attribute = 9; -} diff --git a/pkg/rpc/base/mocks/base_mock.go b/pkg/rpc/base/mocks/base_mock.go deleted file mode 100644 index dc3272a3e..000000000 --- a/pkg/rpc/base/mocks/base_mock.go +++ /dev/null @@ -1,5 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: base/base.pb.go - -// Package mocks is a generated GoMock package. -package mocks diff --git a/pkg/rpc/cdnsystem/cdnsystem.pb.go b/pkg/rpc/cdnsystem/cdnsystem.pb.go deleted file mode 100644 index eea9b9236..000000000 --- a/pkg/rpc/cdnsystem/cdnsystem.pb.go +++ /dev/null @@ -1,591 +0,0 @@ -// -// Copyright 2020 The Dragonfly Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: pkg/rpc/cdnsystem/cdnsystem.proto - -package cdnsystem - -import ( - context "context" - base "d7y.io/dragonfly/v2/pkg/rpc/base" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type SeedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - UrlMeta *base.UrlMeta `protobuf:"bytes,3,opt,name=url_meta,json=urlMeta,proto3" json:"url_meta,omitempty"` -} - -func (x *SeedRequest) Reset() { - *x = SeedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SeedRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SeedRequest) ProtoMessage() {} - -func (x *SeedRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SeedRequest.ProtoReflect.Descriptor instead. -func (*SeedRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescGZIP(), []int{0} -} - -func (x *SeedRequest) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *SeedRequest) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *SeedRequest) GetUrlMeta() *base.UrlMeta { - if x != nil { - return x.UrlMeta - } - return nil -} - -// keep piece meta and data separately -// check piece md5, md5s sign and total content length -type PieceSeed struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // peer id for cdn node, need suffix with _CDN - PeerId string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` - // cdn host id - HostId string `protobuf:"bytes,3,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"` - PieceInfo *base.PieceInfo `protobuf:"bytes,4,opt,name=piece_info,json=pieceInfo,proto3" json:"piece_info,omitempty"` - // whether or not all seeds are downloaded - Done bool `protobuf:"varint,5,opt,name=done,proto3" json:"done,omitempty"` - // content total length for the url, content_length < 0 represent content length is unknown - ContentLength int64 `protobuf:"varint,6,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` - // total piece count, -1 represents task is downloading or failed - TotalPieceCount int32 `protobuf:"varint,7,opt,name=total_piece_count,json=totalPieceCount,proto3" json:"total_piece_count,omitempty"` - // begin time for the piece downloading - BeginTime uint64 `protobuf:"varint,8,opt,name=begin_time,json=beginTime,proto3" json:"begin_time,omitempty"` - // end time for the piece downloading - EndTime uint64 `protobuf:"varint,9,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - // task extend attribute - ExtendAttribute *base.ExtendAttribute `protobuf:"bytes,10,opt,name=extend_attribute,json=extendAttribute,proto3" json:"extend_attribute,omitempty"` -} - -func (x *PieceSeed) Reset() { - *x = PieceSeed{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PieceSeed) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PieceSeed) ProtoMessage() {} - -func (x *PieceSeed) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PieceSeed.ProtoReflect.Descriptor instead. -func (*PieceSeed) Descriptor() ([]byte, []int) { - return file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescGZIP(), []int{1} -} - -func (x *PieceSeed) GetPeerId() string { - if x != nil { - return x.PeerId - } - return "" -} - -func (x *PieceSeed) GetHostId() string { - if x != nil { - return x.HostId - } - return "" -} - -func (x *PieceSeed) GetPieceInfo() *base.PieceInfo { - if x != nil { - return x.PieceInfo - } - return nil -} - -func (x *PieceSeed) GetDone() bool { - if x != nil { - return x.Done - } - return false -} - -func (x *PieceSeed) GetContentLength() int64 { - if x != nil { - return x.ContentLength - } - return 0 -} - -func (x *PieceSeed) GetTotalPieceCount() int32 { - if x != nil { - return x.TotalPieceCount - } - return 0 -} - -func (x *PieceSeed) GetBeginTime() uint64 { - if x != nil { - return x.BeginTime - } - return 0 -} - -func (x *PieceSeed) GetEndTime() uint64 { - if x != nil { - return x.EndTime - } - return 0 -} - -func (x *PieceSeed) GetExtendAttribute() *base.ExtendAttribute { - if x != nil { - return x.ExtendAttribute - } - return nil -} - -var File_pkg_rpc_cdnsystem_cdnsystem_proto protoreflect.FileDescriptor - -var file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, - 0x74, 0x65, 0x6d, 0x2f, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x1a, 0x17, - 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x62, 0x61, 0x73, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x75, 0x0a, 0x0b, 0x53, 0x65, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, - 0x64, 0x12, 0x1a, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, 0x01, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x28, 0x0a, - 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x07, - 0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x22, 0xe2, 0x02, 0x0a, 0x09, 0x50, 0x69, 0x65, 0x63, - 0x65, 0x53, 0x65, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, - 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, - 0x01, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x70, 0x69, 0x65, - 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, - 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x25, 0x0a, - 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69, - 0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x10, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x32, 0xc4, 0x01, 0x0a, - 0x06, 0x53, 0x65, 0x65, 0x64, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0b, 0x4f, 0x62, 0x74, 0x61, 0x69, - 0x6e, 0x53, 0x65, 0x65, 0x64, 0x73, 0x12, 0x16, 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, - 0x65, 0x6d, 0x2e, 0x53, 0x65, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, - 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, - 0x53, 0x65, 0x65, 0x64, 0x30, 0x01, 0x12, 0x3a, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x69, 0x65, - 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, - 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x11, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x3f, 0x0a, 0x0e, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x62, - 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x28, - 0x01, 0x30, 0x01, 0x42, 0x27, 0x5a, 0x25, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x64, 0x72, - 0x61, 0x67, 0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, - 0x70, 0x63, 0x2f, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescOnce sync.Once - file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescData = file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDesc -) - -func file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescGZIP() []byte { - file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescOnce.Do(func() { - file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescData) - }) - return file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescData -} - -var file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_pkg_rpc_cdnsystem_cdnsystem_proto_goTypes = []interface{}{ - (*SeedRequest)(nil), // 0: cdnsystem.SeedRequest - (*PieceSeed)(nil), // 1: cdnsystem.PieceSeed - (*base.UrlMeta)(nil), // 2: base.UrlMeta - (*base.PieceInfo)(nil), // 3: base.PieceInfo - (*base.ExtendAttribute)(nil), // 4: base.ExtendAttribute - (*base.PieceTaskRequest)(nil), // 5: base.PieceTaskRequest - (*base.PiecePacket)(nil), // 6: base.PiecePacket -} -var file_pkg_rpc_cdnsystem_cdnsystem_proto_depIdxs = []int32{ - 2, // 0: cdnsystem.SeedRequest.url_meta:type_name -> base.UrlMeta - 3, // 1: cdnsystem.PieceSeed.piece_info:type_name -> base.PieceInfo - 4, // 2: cdnsystem.PieceSeed.extend_attribute:type_name -> base.ExtendAttribute - 0, // 3: cdnsystem.Seeder.ObtainSeeds:input_type -> cdnsystem.SeedRequest - 5, // 4: cdnsystem.Seeder.GetPieceTasks:input_type -> base.PieceTaskRequest - 5, // 5: cdnsystem.Seeder.SyncPieceTasks:input_type -> base.PieceTaskRequest - 1, // 6: cdnsystem.Seeder.ObtainSeeds:output_type -> cdnsystem.PieceSeed - 6, // 7: cdnsystem.Seeder.GetPieceTasks:output_type -> base.PiecePacket - 6, // 8: cdnsystem.Seeder.SyncPieceTasks:output_type -> base.PiecePacket - 6, // [6:9] is the sub-list for method output_type - 3, // [3:6] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_pkg_rpc_cdnsystem_cdnsystem_proto_init() } -func file_pkg_rpc_cdnsystem_cdnsystem_proto_init() { - if File_pkg_rpc_cdnsystem_cdnsystem_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SeedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PieceSeed); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_pkg_rpc_cdnsystem_cdnsystem_proto_goTypes, - DependencyIndexes: file_pkg_rpc_cdnsystem_cdnsystem_proto_depIdxs, - MessageInfos: file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes, - }.Build() - File_pkg_rpc_cdnsystem_cdnsystem_proto = out.File - file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDesc = nil - file_pkg_rpc_cdnsystem_cdnsystem_proto_goTypes = nil - file_pkg_rpc_cdnsystem_cdnsystem_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// SeederClient is the client API for Seeder service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type SeederClient interface { - // Generate seeds and return to scheduler - ObtainSeeds(ctx context.Context, in *SeedRequest, opts ...grpc.CallOption) (Seeder_ObtainSeedsClient, error) - // Get piece tasks from cdn - GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) - // Sync piece tasks with other peers - SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Seeder_SyncPieceTasksClient, error) -} - -type seederClient struct { - cc grpc.ClientConnInterface -} - -func NewSeederClient(cc grpc.ClientConnInterface) SeederClient { - return &seederClient{cc} -} - -func (c *seederClient) ObtainSeeds(ctx context.Context, in *SeedRequest, opts ...grpc.CallOption) (Seeder_ObtainSeedsClient, error) { - stream, err := c.cc.NewStream(ctx, &_Seeder_serviceDesc.Streams[0], "/cdnsystem.Seeder/ObtainSeeds", opts...) - if err != nil { - return nil, err - } - x := &seederObtainSeedsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Seeder_ObtainSeedsClient interface { - Recv() (*PieceSeed, error) - grpc.ClientStream -} - -type seederObtainSeedsClient struct { - grpc.ClientStream -} - -func (x *seederObtainSeedsClient) Recv() (*PieceSeed, error) { - m := new(PieceSeed) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *seederClient) GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { - out := new(base.PiecePacket) - err := c.cc.Invoke(ctx, "/cdnsystem.Seeder/GetPieceTasks", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *seederClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Seeder_SyncPieceTasksClient, error) { - stream, err := c.cc.NewStream(ctx, &_Seeder_serviceDesc.Streams[1], "/cdnsystem.Seeder/SyncPieceTasks", opts...) - if err != nil { - return nil, err - } - x := &seederSyncPieceTasksClient{stream} - return x, nil -} - -type Seeder_SyncPieceTasksClient interface { - Send(*base.PieceTaskRequest) error - Recv() (*base.PiecePacket, error) - grpc.ClientStream -} - -type seederSyncPieceTasksClient struct { - grpc.ClientStream -} - -func (x *seederSyncPieceTasksClient) Send(m *base.PieceTaskRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *seederSyncPieceTasksClient) Recv() (*base.PiecePacket, error) { - m := new(base.PiecePacket) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// SeederServer is the server API for Seeder service. -type SeederServer interface { - // Generate seeds and return to scheduler - ObtainSeeds(*SeedRequest, Seeder_ObtainSeedsServer) error - // Get piece tasks from cdn - GetPieceTasks(context.Context, *base.PieceTaskRequest) (*base.PiecePacket, error) - // Sync piece tasks with other peers - SyncPieceTasks(Seeder_SyncPieceTasksServer) error -} - -// UnimplementedSeederServer can be embedded to have forward compatible implementations. -type UnimplementedSeederServer struct { -} - -func (*UnimplementedSeederServer) ObtainSeeds(*SeedRequest, Seeder_ObtainSeedsServer) error { - return status.Errorf(codes.Unimplemented, "method ObtainSeeds not implemented") -} -func (*UnimplementedSeederServer) GetPieceTasks(context.Context, *base.PieceTaskRequest) (*base.PiecePacket, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPieceTasks not implemented") -} -func (*UnimplementedSeederServer) SyncPieceTasks(Seeder_SyncPieceTasksServer) error { - return status.Errorf(codes.Unimplemented, "method SyncPieceTasks not implemented") -} - -func RegisterSeederServer(s *grpc.Server, srv SeederServer) { - s.RegisterService(&_Seeder_serviceDesc, srv) -} - -func _Seeder_ObtainSeeds_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SeedRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(SeederServer).ObtainSeeds(m, &seederObtainSeedsServer{stream}) -} - -type Seeder_ObtainSeedsServer interface { - Send(*PieceSeed) error - grpc.ServerStream -} - -type seederObtainSeedsServer struct { - grpc.ServerStream -} - -func (x *seederObtainSeedsServer) Send(m *PieceSeed) error { - return x.ServerStream.SendMsg(m) -} - -func _Seeder_GetPieceTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(base.PieceTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SeederServer).GetPieceTasks(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cdnsystem.Seeder/GetPieceTasks", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SeederServer).GetPieceTasks(ctx, req.(*base.PieceTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Seeder_SyncPieceTasks_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SeederServer).SyncPieceTasks(&seederSyncPieceTasksServer{stream}) -} - -type Seeder_SyncPieceTasksServer interface { - Send(*base.PiecePacket) error - Recv() (*base.PieceTaskRequest, error) - grpc.ServerStream -} - -type seederSyncPieceTasksServer struct { - grpc.ServerStream -} - -func (x *seederSyncPieceTasksServer) Send(m *base.PiecePacket) error { - return x.ServerStream.SendMsg(m) -} - -func (x *seederSyncPieceTasksServer) Recv() (*base.PieceTaskRequest, error) { - m := new(base.PieceTaskRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Seeder_serviceDesc = grpc.ServiceDesc{ - ServiceName: "cdnsystem.Seeder", - HandlerType: (*SeederServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetPieceTasks", - Handler: _Seeder_GetPieceTasks_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ObtainSeeds", - Handler: _Seeder_ObtainSeeds_Handler, - ServerStreams: true, - }, - { - StreamName: "SyncPieceTasks", - Handler: _Seeder_SyncPieceTasks_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "pkg/rpc/cdnsystem/cdnsystem.proto", -} diff --git a/pkg/rpc/cdnsystem/cdnsystem.pb.validate.go b/pkg/rpc/cdnsystem/cdnsystem.pb.validate.go deleted file mode 100644 index 9c2da34ed..000000000 --- a/pkg/rpc/cdnsystem/cdnsystem.pb.validate.go +++ /dev/null @@ -1,237 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: pkg/rpc/cdnsystem/cdnsystem.proto - -package cdnsystem - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} -) - -// Validate checks the field values on SeedRequest with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. -func (m *SeedRequest) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetTaskId()) < 1 { - return SeedRequestValidationError{ - field: "TaskId", - reason: "value length must be at least 1 runes", - } - } - - if uri, err := url.Parse(m.GetUrl()); err != nil { - return SeedRequestValidationError{ - field: "Url", - reason: "value must be a valid URI", - cause: err, - } - } else if !uri.IsAbs() { - return SeedRequestValidationError{ - field: "Url", - reason: "value must be absolute", - } - } - - if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SeedRequestValidationError{ - field: "UrlMeta", - reason: "embedded message failed validation", - cause: err, - } - } - } - - return nil -} - -// SeedRequestValidationError is the validation error returned by -// SeedRequest.Validate if the designated constraints aren't met. -type SeedRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SeedRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SeedRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SeedRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SeedRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SeedRequestValidationError) ErrorName() string { return "SeedRequestValidationError" } - -// Error satisfies the builtin error interface -func (e SeedRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSeedRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SeedRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SeedRequestValidationError{} - -// Validate checks the field values on PieceSeed with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *PieceSeed) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetPeerId()) < 1 { - return PieceSeedValidationError{ - field: "PeerId", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetHostId()) < 1 { - return PieceSeedValidationError{ - field: "HostId", - reason: "value length must be at least 1 runes", - } - } - - if v, ok := interface{}(m.GetPieceInfo()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PieceSeedValidationError{ - field: "PieceInfo", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for Done - - // no validation rules for ContentLength - - // no validation rules for TotalPieceCount - - // no validation rules for BeginTime - - // no validation rules for EndTime - - if v, ok := interface{}(m.GetExtendAttribute()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PieceSeedValidationError{ - field: "ExtendAttribute", - reason: "embedded message failed validation", - cause: err, - } - } - } - - return nil -} - -// PieceSeedValidationError is the validation error returned by -// PieceSeed.Validate if the designated constraints aren't met. -type PieceSeedValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PieceSeedValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PieceSeedValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PieceSeedValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PieceSeedValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PieceSeedValidationError) ErrorName() string { return "PieceSeedValidationError" } - -// Error satisfies the builtin error interface -func (e PieceSeedValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPieceSeed.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PieceSeedValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PieceSeedValidationError{} diff --git a/pkg/rpc/cdnsystem/cdnsystem.proto b/pkg/rpc/cdnsystem/cdnsystem.proto deleted file mode 100644 index 54be1f8ef..000000000 --- a/pkg/rpc/cdnsystem/cdnsystem.proto +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -syntax = "proto3"; - -package cdnsystem; - -import "pkg/rpc/base/base.proto"; -import "validate/validate.proto"; - -option go_package = "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"; - -message SeedRequest{ - string task_id = 1 [(validate.rules).string.min_len = 1]; - string url = 2 [(validate.rules).string.uri = true]; - base.UrlMeta url_meta = 3; -} - -// keep piece meta and data separately -// check piece md5, md5s sign and total content length -message PieceSeed{ - // peer id for cdn node, need suffix with _CDN - string peer_id = 2 [(validate.rules).string.min_len = 1]; - // cdn host id - string host_id = 3 [(validate.rules).string.min_len = 1]; - base.PieceInfo piece_info = 4; - - // whether or not all seeds are downloaded - bool done = 5; - // content total length for the url, content_length < 0 represent content length is unknown - int64 content_length = 6; - // total piece count, -1 represents task is downloading or failed - int32 total_piece_count = 7; - // begin time for the piece downloading - uint64 begin_time = 8; - // end time for the piece downloading - uint64 end_time = 9; - // task extend attribute - base.ExtendAttribute extend_attribute = 10; -} - -// CDN System RPC Service -service Seeder{ - // Generate seeds and return to scheduler - rpc ObtainSeeds(SeedRequest)returns(stream PieceSeed); - // Get piece tasks from cdn - rpc GetPieceTasks(base.PieceTaskRequest)returns(base.PiecePacket); - // Sync piece tasks with other peers - rpc SyncPieceTasks(stream base.PieceTaskRequest)returns(stream base.PiecePacket); -} diff --git a/pkg/rpc/cdnsystem/client/client.go b/pkg/rpc/cdnsystem/client/client.go index e063ec245..e47013cfc 100644 --- a/pkg/rpc/cdnsystem/client/client.go +++ b/pkg/rpc/cdnsystem/client/client.go @@ -26,11 +26,12 @@ import ( "google.golang.org/grpc" + cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1" + commonv1 "d7y.io/api/pkg/apis/common/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/rpc" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" ) func GetClientByAddr(addrs []dfnet.NetAddr, opts ...grpc.DialOption) CdnClient { @@ -61,13 +62,13 @@ func GetElasticClientByAddrs(addrs []dfnet.NetAddr, opts ...grpc.DialOption) (Cd return elasticCdnClient, nil } -// CdnClient see cdnsystem.CdnClient +// CdnClient see cdnsystemv1.CdnClient type CdnClient interface { - ObtainSeeds(ctx context.Context, sr *cdnsystem.SeedRequest, opts ...grpc.CallOption) (*PieceSeedStream, error) + ObtainSeeds(ctx context.Context, sr *cdnsystemv1.SeedRequest, opts ...grpc.CallOption) (*PieceSeedStream, error) - GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) + GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (*commonv1.PiecePacket, error) - SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (cdnsystem.Seeder_SyncPieceTasksClient, error) + SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (cdnsystemv1.Seeder_SyncPieceTasksClient, error) UpdateState(addrs []dfnet.NetAddr) @@ -80,27 +81,27 @@ type cdnClient struct { var _ CdnClient = (*cdnClient)(nil) -func (cc *cdnClient) getCdnClient(key string, stick bool) (cdnsystem.SeederClient, string, error) { +func (cc *cdnClient) getCdnClient(key string, stick bool) (cdnsystemv1.SeederClient, string, error) { clientConn, err := cc.Connection.GetClientConn(key, stick) if err != nil { return nil, "", fmt.Errorf("get ClientConn for hashKey %s: %w", key, err) } - return cdnsystem.NewSeederClient(clientConn), clientConn.Target(), nil + return cdnsystemv1.NewSeederClient(clientConn), clientConn.Target(), nil } -func (cc *cdnClient) getSeederClientWithTarget(target string) (cdnsystem.SeederClient, error) { +func (cc *cdnClient) getSeederClientWithTarget(target string) (cdnsystemv1.SeederClient, error) { conn, err := cc.Connection.GetClientConnByTarget(target) if err != nil { return nil, err } - return cdnsystem.NewSeederClient(conn), nil + return cdnsystemv1.NewSeederClient(conn), nil } -func (cc *cdnClient) ObtainSeeds(ctx context.Context, sr *cdnsystem.SeedRequest, opts ...grpc.CallOption) (*PieceSeedStream, error) { +func (cc *cdnClient) ObtainSeeds(ctx context.Context, sr *cdnsystemv1.SeedRequest, opts ...grpc.CallOption) (*PieceSeedStream, error) { return newPieceSeedStream(ctx, cc, sr.TaskId, sr, opts) } -func (cc *cdnClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { +func (cc *cdnClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (*commonv1.PiecePacket, error) { client, err := cc.getSeederClientWithTarget(addr.GetEndpoint()) if err != nil { return nil, err @@ -108,7 +109,7 @@ func (cc *cdnClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req return client.GetPieceTasks(ctx, req, opts...) } -func (cc *cdnClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *base.PieceTaskRequest, opts ...grpc.CallOption) (cdnsystem.Seeder_SyncPieceTasksClient, error) { +func (cc *cdnClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (cdnsystemv1.Seeder_SyncPieceTasksClient, error) { client, err := cc.getSeederClientWithTarget(addr.GetEndpoint()) if err != nil { return nil, err diff --git a/pkg/rpc/cdnsystem/client/mocks/client_mock.go b/pkg/rpc/cdnsystem/client/mocks/client_mock.go index 3b921d32a..dfd307b2d 100644 --- a/pkg/rpc/cdnsystem/client/mocks/client_mock.go +++ b/pkg/rpc/cdnsystem/client/mocks/client_mock.go @@ -8,9 +8,9 @@ import ( context "context" reflect "reflect" + v1 "d7y.io/api/pkg/apis/cdnsystem/v1" + v10 "d7y.io/api/pkg/apis/common/v1" dfnet "d7y.io/dragonfly/v2/pkg/dfnet" - base "d7y.io/dragonfly/v2/pkg/rpc/base" - cdnsystem "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" client "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" gomock "github.com/golang/mock/gomock" grpc "google.golang.org/grpc" @@ -54,14 +54,14 @@ func (mr *MockCdnClientMockRecorder) Close() *gomock.Call { } // GetPieceTasks mocks base method. -func (m *MockCdnClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { +func (m *MockCdnClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *v10.PieceTaskRequest, opts ...grpc.CallOption) (*v10.PiecePacket, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, addr, req} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) - ret0, _ := ret[0].(*base.PiecePacket) + ret0, _ := ret[0].(*v10.PiecePacket) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -74,7 +74,7 @@ func (mr *MockCdnClientMockRecorder) GetPieceTasks(ctx, addr, req interface{}, o } // ObtainSeeds mocks base method. -func (m *MockCdnClient) ObtainSeeds(ctx context.Context, sr *cdnsystem.SeedRequest, opts ...grpc.CallOption) (*client.PieceSeedStream, error) { +func (m *MockCdnClient) ObtainSeeds(ctx context.Context, sr *v1.SeedRequest, opts ...grpc.CallOption) (*client.PieceSeedStream, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, sr} for _, a := range opts { @@ -94,14 +94,14 @@ func (mr *MockCdnClientMockRecorder) ObtainSeeds(ctx, sr interface{}, opts ...in } // SyncPieceTasks mocks base method. -func (m *MockCdnClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (cdnsystem.Seeder_SyncPieceTasksClient, error) { +func (m *MockCdnClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *v10.PieceTaskRequest, opts ...grpc.CallOption) (v1.Seeder_SyncPieceTasksClient, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, addr, ptr} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...) - ret0, _ := ret[0].(cdnsystem.Seeder_SyncPieceTasksClient) + ret0, _ := ret[0].(v1.Seeder_SyncPieceTasksClient) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/pkg/rpc/cdnsystem/client/piece_seed_stream.go b/pkg/rpc/cdnsystem/client/piece_seed_stream.go index 0b1ca2eaf..8022bb94a 100644 --- a/pkg/rpc/cdnsystem/client/piece_seed_stream.go +++ b/pkg/rpc/cdnsystem/client/piece_seed_stream.go @@ -25,10 +25,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1" + "d7y.io/dragonfly/v2/internal/dferrors" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/rpc" - "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" ) type PieceSeedStream struct { @@ -36,16 +37,16 @@ type PieceSeedStream struct { sc *cdnClient ctx context.Context hashKey string - sr *cdnsystem.SeedRequest + sr *cdnsystemv1.SeedRequest opts []grpc.CallOption // stream for one client - stream cdnsystem.Seeder_ObtainSeedsClient + stream cdnsystemv1.Seeder_ObtainSeedsClient // server list which cannot serve failedServers []string rpc.RetryMeta } -func newPieceSeedStream(ctx context.Context, sc *cdnClient, hashKey string, sr *cdnsystem.SeedRequest, opts []grpc.CallOption) (*PieceSeedStream, error) { +func newPieceSeedStream(ctx context.Context, sc *cdnClient, hashKey string, sr *cdnsystemv1.SeedRequest, opts []grpc.CallOption) (*PieceSeedStream, error) { pss := &PieceSeedStream{ sc: sc, ctx: ctx, @@ -68,7 +69,7 @@ func newPieceSeedStream(ctx context.Context, sc *cdnClient, hashKey string, sr * func (pss *PieceSeedStream) initStream() error { var target string stream, err := rpc.ExecuteWithRetry(func() (any, error) { - var client cdnsystem.SeederClient + var client cdnsystemv1.SeederClient var err error client, target, err = pss.sc.getCdnClient(pss.hashKey, false) if err != nil { @@ -83,17 +84,17 @@ func (pss *PieceSeedStream) initStream() error { logger.WithTaskID(pss.hashKey).Errorf("initStream: invoke cdn node %s ObtainSeeds failed: %v", target, err) return pss.replaceClient(pss.hashKey, err) } - pss.stream = stream.(cdnsystem.Seeder_ObtainSeedsClient) + pss.stream = stream.(cdnsystemv1.Seeder_ObtainSeedsClient) pss.StreamTimes = 1 return nil } -func (pss *PieceSeedStream) Recv() (ps *cdnsystem.PieceSeed, err error) { +func (pss *PieceSeedStream) Recv() (ps *cdnsystemv1.PieceSeed, err error) { pss.sc.UpdateAccessNodeMapByHashKey(pss.hashKey) return pss.stream.Recv() } -func (pss *PieceSeedStream) retryRecv(cause error) (*cdnsystem.PieceSeed, error) { +func (pss *PieceSeedStream) retryRecv(cause error) (*cdnsystemv1.PieceSeed, error) { if status.Code(cause) == codes.DeadlineExceeded || status.Code(cause) == codes.Canceled { return nil, cause } @@ -111,7 +112,7 @@ func (pss *PieceSeedStream) replaceStream(cause error) error { } var target string stream, err := rpc.ExecuteWithRetry(func() (any, error) { - var client cdnsystem.SeederClient + var client cdnsystemv1.SeederClient var err error client, target, err = pss.sc.getCdnClient(pss.hashKey, true) if err != nil { @@ -123,7 +124,7 @@ func (pss *PieceSeedStream) replaceStream(cause error) error { logger.WithTaskID(pss.hashKey).Infof("replaceStream: invoke cdn node %s ObtainSeeds failed: %v", target, err) return pss.replaceStream(cause) } - pss.stream = stream.(cdnsystem.Seeder_ObtainSeedsClient) + pss.stream = stream.(cdnsystemv1.Seeder_ObtainSeedsClient) pss.StreamTimes++ return nil } @@ -137,7 +138,7 @@ func (pss *PieceSeedStream) replaceClient(key string, cause error) error { pss.failedServers = append(pss.failedServers, preNode) var target string stream, err := rpc.ExecuteWithRetry(func() (any, error) { - var client cdnsystem.SeederClient + var client cdnsystemv1.SeederClient var err error client, target, err = pss.sc.getCdnClient(key, true) if err != nil { @@ -149,7 +150,7 @@ func (pss *PieceSeedStream) replaceClient(key string, cause error) error { logger.WithTaskID(pss.hashKey).Infof("replaceClient: invoke cdn node %s ObtainSeeds failed: %v", target, err) return pss.replaceClient(key, cause) } - pss.stream = stream.(cdnsystem.Seeder_ObtainSeedsClient) + pss.stream = stream.(cdnsystemv1.Seeder_ObtainSeedsClient) pss.StreamTimes = 1 return nil } diff --git a/pkg/rpc/cdnsystem/mocks/cdnsystem_mock.go b/pkg/rpc/cdnsystem/mocks/cdnsystem_mock.go deleted file mode 100644 index 9b7c8f2d0..000000000 --- a/pkg/rpc/cdnsystem/mocks/cdnsystem_mock.go +++ /dev/null @@ -1,678 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: cdnsystem/cdnsystem.pb.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - base "d7y.io/dragonfly/v2/pkg/rpc/base" - cdnsystem "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" - gomock "github.com/golang/mock/gomock" - grpc "google.golang.org/grpc" - metadata "google.golang.org/grpc/metadata" -) - -// MockSeederClient is a mock of SeederClient interface. -type MockSeederClient struct { - ctrl *gomock.Controller - recorder *MockSeederClientMockRecorder -} - -// MockSeederClientMockRecorder is the mock recorder for MockSeederClient. -type MockSeederClientMockRecorder struct { - mock *MockSeederClient -} - -// NewMockSeederClient creates a new mock instance. -func NewMockSeederClient(ctrl *gomock.Controller) *MockSeederClient { - mock := &MockSeederClient{ctrl: ctrl} - mock.recorder = &MockSeederClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSeederClient) EXPECT() *MockSeederClientMockRecorder { - return m.recorder -} - -// GetPieceTasks mocks base method. -func (m *MockSeederClient) GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) - ret0, _ := ret[0].(*base.PiecePacket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPieceTasks indicates an expected call of GetPieceTasks. -func (mr *MockSeederClientMockRecorder) GetPieceTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockSeederClient)(nil).GetPieceTasks), varargs...) -} - -// ObtainSeeds mocks base method. -func (m *MockSeederClient) ObtainSeeds(ctx context.Context, in *cdnsystem.SeedRequest, opts ...grpc.CallOption) (cdnsystem.Seeder_ObtainSeedsClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ObtainSeeds", varargs...) - ret0, _ := ret[0].(cdnsystem.Seeder_ObtainSeedsClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ObtainSeeds indicates an expected call of ObtainSeeds. -func (mr *MockSeederClientMockRecorder) ObtainSeeds(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockSeederClient)(nil).ObtainSeeds), varargs...) -} - -// SyncPieceTasks mocks base method. -func (m *MockSeederClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (cdnsystem.Seeder_SyncPieceTasksClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...) - ret0, _ := ret[0].(cdnsystem.Seeder_SyncPieceTasksClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SyncPieceTasks indicates an expected call of SyncPieceTasks. -func (mr *MockSeederClientMockRecorder) SyncPieceTasks(ctx interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockSeederClient)(nil).SyncPieceTasks), varargs...) -} - -// MockSeeder_ObtainSeedsClient is a mock of Seeder_ObtainSeedsClient interface. -type MockSeeder_ObtainSeedsClient struct { - ctrl *gomock.Controller - recorder *MockSeeder_ObtainSeedsClientMockRecorder -} - -// MockSeeder_ObtainSeedsClientMockRecorder is the mock recorder for MockSeeder_ObtainSeedsClient. -type MockSeeder_ObtainSeedsClientMockRecorder struct { - mock *MockSeeder_ObtainSeedsClient -} - -// NewMockSeeder_ObtainSeedsClient creates a new mock instance. -func NewMockSeeder_ObtainSeedsClient(ctrl *gomock.Controller) *MockSeeder_ObtainSeedsClient { - mock := &MockSeeder_ObtainSeedsClient{ctrl: ctrl} - mock.recorder = &MockSeeder_ObtainSeedsClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSeeder_ObtainSeedsClient) EXPECT() *MockSeeder_ObtainSeedsClientMockRecorder { - return m.recorder -} - -// CloseSend mocks base method. -func (m *MockSeeder_ObtainSeedsClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend. -func (mr *MockSeeder_ObtainSeedsClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).CloseSend)) -} - -// Context mocks base method. -func (m *MockSeeder_ObtainSeedsClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Context)) -} - -// Header mocks base method. -func (m *MockSeeder_ObtainSeedsClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header. -func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Header)) -} - -// Recv mocks base method. -func (m *MockSeeder_ObtainSeedsClient) Recv() (*cdnsystem.PieceSeed, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*cdnsystem.PieceSeed) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv. -func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Recv)) -} - -// RecvMsg mocks base method. -func (m_2 *MockSeeder_ObtainSeedsClient) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockSeeder_ObtainSeedsClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).RecvMsg), m) -} - -// SendMsg mocks base method. -func (m_2 *MockSeeder_ObtainSeedsClient) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockSeeder_ObtainSeedsClientMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).SendMsg), m) -} - -// Trailer mocks base method. -func (m *MockSeeder_ObtainSeedsClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer. -func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Trailer)) -} - -// MockSeeder_SyncPieceTasksClient is a mock of Seeder_SyncPieceTasksClient interface. -type MockSeeder_SyncPieceTasksClient struct { - ctrl *gomock.Controller - recorder *MockSeeder_SyncPieceTasksClientMockRecorder -} - -// MockSeeder_SyncPieceTasksClientMockRecorder is the mock recorder for MockSeeder_SyncPieceTasksClient. -type MockSeeder_SyncPieceTasksClientMockRecorder struct { - mock *MockSeeder_SyncPieceTasksClient -} - -// NewMockSeeder_SyncPieceTasksClient creates a new mock instance. -func NewMockSeeder_SyncPieceTasksClient(ctrl *gomock.Controller) *MockSeeder_SyncPieceTasksClient { - mock := &MockSeeder_SyncPieceTasksClient{ctrl: ctrl} - mock.recorder = &MockSeeder_SyncPieceTasksClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSeeder_SyncPieceTasksClient) EXPECT() *MockSeeder_SyncPieceTasksClientMockRecorder { - return m.recorder -} - -// CloseSend mocks base method. -func (m *MockSeeder_SyncPieceTasksClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend. -func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).CloseSend)) -} - -// Context mocks base method. -func (m *MockSeeder_SyncPieceTasksClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).Context)) -} - -// Header mocks base method. -func (m *MockSeeder_SyncPieceTasksClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header. -func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).Header)) -} - -// Recv mocks base method. -func (m *MockSeeder_SyncPieceTasksClient) Recv() (*base.PiecePacket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*base.PiecePacket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv. -func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).Recv)) -} - -// RecvMsg mocks base method. -func (m_2 *MockSeeder_SyncPieceTasksClient) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).RecvMsg), m) -} - -// Send mocks base method. -func (m *MockSeeder_SyncPieceTasksClient) Send(arg0 *base.PieceTaskRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send. -func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).Send), arg0) -} - -// SendMsg mocks base method. -func (m_2 *MockSeeder_SyncPieceTasksClient) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).SendMsg), m) -} - -// Trailer mocks base method. -func (m *MockSeeder_SyncPieceTasksClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer. -func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).Trailer)) -} - -// MockSeederServer is a mock of SeederServer interface. -type MockSeederServer struct { - ctrl *gomock.Controller - recorder *MockSeederServerMockRecorder -} - -// MockSeederServerMockRecorder is the mock recorder for MockSeederServer. -type MockSeederServerMockRecorder struct { - mock *MockSeederServer -} - -// NewMockSeederServer creates a new mock instance. -func NewMockSeederServer(ctrl *gomock.Controller) *MockSeederServer { - mock := &MockSeederServer{ctrl: ctrl} - mock.recorder = &MockSeederServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSeederServer) EXPECT() *MockSeederServerMockRecorder { - return m.recorder -} - -// GetPieceTasks mocks base method. -func (m *MockSeederServer) GetPieceTasks(arg0 context.Context, arg1 *base.PieceTaskRequest) (*base.PiecePacket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1) - ret0, _ := ret[0].(*base.PiecePacket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPieceTasks indicates an expected call of GetPieceTasks. -func (mr *MockSeederServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockSeederServer)(nil).GetPieceTasks), arg0, arg1) -} - -// ObtainSeeds mocks base method. -func (m *MockSeederServer) ObtainSeeds(arg0 *cdnsystem.SeedRequest, arg1 cdnsystem.Seeder_ObtainSeedsServer) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ObtainSeeds", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ObtainSeeds indicates an expected call of ObtainSeeds. -func (mr *MockSeederServerMockRecorder) ObtainSeeds(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockSeederServer)(nil).ObtainSeeds), arg0, arg1) -} - -// SyncPieceTasks mocks base method. -func (m *MockSeederServer) SyncPieceTasks(arg0 cdnsystem.Seeder_SyncPieceTasksServer) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncPieceTasks", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SyncPieceTasks indicates an expected call of SyncPieceTasks. -func (mr *MockSeederServerMockRecorder) SyncPieceTasks(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockSeederServer)(nil).SyncPieceTasks), arg0) -} - -// MockSeeder_ObtainSeedsServer is a mock of Seeder_ObtainSeedsServer interface. -type MockSeeder_ObtainSeedsServer struct { - ctrl *gomock.Controller - recorder *MockSeeder_ObtainSeedsServerMockRecorder -} - -// MockSeeder_ObtainSeedsServerMockRecorder is the mock recorder for MockSeeder_ObtainSeedsServer. -type MockSeeder_ObtainSeedsServerMockRecorder struct { - mock *MockSeeder_ObtainSeedsServer -} - -// NewMockSeeder_ObtainSeedsServer creates a new mock instance. -func NewMockSeeder_ObtainSeedsServer(ctrl *gomock.Controller) *MockSeeder_ObtainSeedsServer { - mock := &MockSeeder_ObtainSeedsServer{ctrl: ctrl} - mock.recorder = &MockSeeder_ObtainSeedsServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSeeder_ObtainSeedsServer) EXPECT() *MockSeeder_ObtainSeedsServerMockRecorder { - return m.recorder -} - -// Context mocks base method. -func (m *MockSeeder_ObtainSeedsServer) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockSeeder_ObtainSeedsServerMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).Context)) -} - -// RecvMsg mocks base method. -func (m_2 *MockSeeder_ObtainSeedsServer) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockSeeder_ObtainSeedsServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).RecvMsg), m) -} - -// Send mocks base method. -func (m *MockSeeder_ObtainSeedsServer) Send(arg0 *cdnsystem.PieceSeed) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send. -func (mr *MockSeeder_ObtainSeedsServerMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).Send), arg0) -} - -// SendHeader mocks base method. -func (m *MockSeeder_ObtainSeedsServer) SendHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendHeader indicates an expected call of SendHeader. -func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SendHeader), arg0) -} - -// SendMsg mocks base method. -func (m_2 *MockSeeder_ObtainSeedsServer) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SendMsg), m) -} - -// SetHeader mocks base method. -func (m *MockSeeder_ObtainSeedsServer) SetHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetHeader indicates an expected call of SetHeader. -func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SetHeader), arg0) -} - -// SetTrailer mocks base method. -func (m *MockSeeder_ObtainSeedsServer) SetTrailer(arg0 metadata.MD) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTrailer", arg0) -} - -// SetTrailer indicates an expected call of SetTrailer. -func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SetTrailer), arg0) -} - -// MockSeeder_SyncPieceTasksServer is a mock of Seeder_SyncPieceTasksServer interface. -type MockSeeder_SyncPieceTasksServer struct { - ctrl *gomock.Controller - recorder *MockSeeder_SyncPieceTasksServerMockRecorder -} - -// MockSeeder_SyncPieceTasksServerMockRecorder is the mock recorder for MockSeeder_SyncPieceTasksServer. -type MockSeeder_SyncPieceTasksServerMockRecorder struct { - mock *MockSeeder_SyncPieceTasksServer -} - -// NewMockSeeder_SyncPieceTasksServer creates a new mock instance. -func NewMockSeeder_SyncPieceTasksServer(ctrl *gomock.Controller) *MockSeeder_SyncPieceTasksServer { - mock := &MockSeeder_SyncPieceTasksServer{ctrl: ctrl} - mock.recorder = &MockSeeder_SyncPieceTasksServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSeeder_SyncPieceTasksServer) EXPECT() *MockSeeder_SyncPieceTasksServerMockRecorder { - return m.recorder -} - -// Context mocks base method. -func (m *MockSeeder_SyncPieceTasksServer) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).Context)) -} - -// Recv mocks base method. -func (m *MockSeeder_SyncPieceTasksServer) Recv() (*base.PieceTaskRequest, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*base.PieceTaskRequest) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv. -func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).Recv)) -} - -// RecvMsg mocks base method. -func (m_2 *MockSeeder_SyncPieceTasksServer) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).RecvMsg), m) -} - -// Send mocks base method. -func (m *MockSeeder_SyncPieceTasksServer) Send(arg0 *base.PiecePacket) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send. -func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).Send), arg0) -} - -// SendHeader mocks base method. -func (m *MockSeeder_SyncPieceTasksServer) SendHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendHeader indicates an expected call of SendHeader. -func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SendHeader), arg0) -} - -// SendMsg mocks base method. -func (m_2 *MockSeeder_SyncPieceTasksServer) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SendMsg), m) -} - -// SetHeader mocks base method. -func (m *MockSeeder_SyncPieceTasksServer) SetHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetHeader indicates an expected call of SetHeader. -func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SetHeader), arg0) -} - -// SetTrailer mocks base method. -func (m *MockSeeder_SyncPieceTasksServer) SetTrailer(arg0 metadata.MD) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTrailer", arg0) -} - -// SetTrailer indicates an expected call of SetTrailer. -func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SetTrailer), arg0) -} diff --git a/pkg/rpc/client.go b/pkg/rpc/client.go index 823589d63..3d77f58c8 100644 --- a/pkg/rpc/client.go +++ b/pkg/rpc/client.go @@ -30,10 +30,11 @@ import ( "google.golang.org/grpc/status" "k8s.io/apimachinery/pkg/util/sets" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/internal/dferrors" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/dfnet" - "d7y.io/dragonfly/v2/pkg/rpc/base" ) const ( @@ -371,7 +372,7 @@ func (conn *Connection) TryMigrate(key string, cause error, exclusiveNodes []str } // TODO recover findCandidateClientConn error if e, ok := cause.(*dferrors.DfError); ok { - if e.Code != base.Code_ResourceLacked { + if e.Code != commonv1.Code_ResourceLacked { return "", cause } } diff --git a/pkg/rpc/client_util.go b/pkg/rpc/client_util.go index 484f2861b..61b70d199 100644 --- a/pkg/rpc/client_util.go +++ b/pkg/rpc/client_util.go @@ -28,10 +28,11 @@ import ( "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/internal/dferrors" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/math" - "d7y.io/dragonfly/v2/pkg/rpc/base" ) const ( @@ -195,7 +196,7 @@ func convertClientError(err error) error { s := status.Convert(err) for _, d := range s.Details() { switch internal := d.(type) { - case *base.GrpcDfError: + case *commonv1.GrpcDfError: return &dferrors.DfError{ Code: internal.Code, Message: internal.Message, diff --git a/pkg/rpc/base/common/common.go b/pkg/rpc/common/common.go similarity index 74% rename from pkg/rpc/base/common/common.go rename to pkg/rpc/common/common.go index 610d86ad8..f71d2cb0a 100644 --- a/pkg/rpc/base/common/common.go +++ b/pkg/rpc/common/common.go @@ -1,5 +1,5 @@ /* - * Copyright 2020 The Dragonfly Authors + * Copyright 2022 The Dragonfly Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,14 +22,14 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "d7y.io/dragonfly/v2/pkg/rpc/base" + commonv1 "d7y.io/api/pkg/apis/common/v1" ) var EndOfPiece = int32(1) << 30 var BeginOfPiece = int32(-1) -func NewGrpcDfError(code base.Code, msg string) *base.GrpcDfError { - return &base.GrpcDfError{ +func NewGrpcDfError(code commonv1.Code, msg string) *commonv1.GrpcDfError { + return &commonv1.GrpcDfError{ Code: code, Message: msg, } @@ -37,7 +37,7 @@ func NewGrpcDfError(code base.Code, msg string) *base.GrpcDfError { // NewResWithCodeAndMsg returns a response ptr with code and msg, // ptr is a expected type ptr. -func NewResWithCodeAndMsg(ptr any, code base.Code, msg string) any { +func NewResWithCodeAndMsg(ptr any, code commonv1.Code, msg string) any { typ := reflect.TypeOf(ptr) v := reflect.New(typ.Elem()) @@ -46,14 +46,14 @@ func NewResWithCodeAndMsg(ptr any, code base.Code, msg string) any { func NewResWithErr(ptr any, err error) any { st := status.Convert(err) - var code base.Code + var code commonv1.Code switch st.Code() { case codes.DeadlineExceeded: - code = base.Code_RequestTimeOut + code = commonv1.Code_RequestTimeOut case codes.OK: - code = base.Code_Success + code = commonv1.Code_Success default: - code = base.Code_UnknownError + code = commonv1.Code_UnknownError } return NewResWithCodeAndMsg(ptr, code, st.Message()) } diff --git a/pkg/rpc/dfdaemon/client/client.go b/pkg/rpc/dfdaemon/client/client.go index 2d77012cd..a0a87baaa 100644 --- a/pkg/rpc/dfdaemon/client/client.go +++ b/pkg/rpc/dfdaemon/client/client.go @@ -29,12 +29,13 @@ import ( "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/rpc" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" ) var _ DaemonClient = (*daemonClient)(nil) @@ -76,23 +77,23 @@ func GetElasticClientByAddrs(addrs []dfnet.NetAddr, opts ...grpc.DialOption) (Da return elasticDaemonClient, nil } -// DaemonClient see dfdaemon.DaemonClient +// DaemonClient see dfdaemonv1.DaemonClient type DaemonClient interface { - Download(ctx context.Context, req *dfdaemon.DownRequest, opts ...grpc.CallOption) (*DownResultStream, error) + Download(ctx context.Context, req *dfdaemonv1.DownRequest, opts ...grpc.CallOption) (*DownResultStream, error) - GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) + GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (*commonv1.PiecePacket, error) - SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (dfdaemon.Daemon_SyncPieceTasksClient, error) + SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (dfdaemonv1.Daemon_SyncPieceTasksClient, error) CheckHealth(ctx context.Context, target dfnet.NetAddr, opts ...grpc.CallOption) error - StatTask(ctx context.Context, req *dfdaemon.StatTaskRequest, opts ...grpc.CallOption) error + StatTask(ctx context.Context, req *dfdaemonv1.StatTaskRequest, opts ...grpc.CallOption) error - ImportTask(ctx context.Context, req *dfdaemon.ImportTaskRequest, opts ...grpc.CallOption) error + ImportTask(ctx context.Context, req *dfdaemonv1.ImportTaskRequest, opts ...grpc.CallOption) error - ExportTask(ctx context.Context, req *dfdaemon.ExportTaskRequest, opts ...grpc.CallOption) error + ExportTask(ctx context.Context, req *dfdaemonv1.ExportTaskRequest, opts ...grpc.CallOption) error - DeleteTask(ctx context.Context, req *dfdaemon.DeleteTaskRequest, opts ...grpc.CallOption) error + DeleteTask(ctx context.Context, req *dfdaemonv1.DeleteTaskRequest, opts ...grpc.CallOption) error Close() error } @@ -101,30 +102,30 @@ type daemonClient struct { *rpc.Connection } -func (dc *daemonClient) getDaemonClient(key string, stick bool) (dfdaemon.DaemonClient, string, error) { +func (dc *daemonClient) getDaemonClient(key string, stick bool) (dfdaemonv1.DaemonClient, string, error) { clientConn, err := dc.Connection.GetClientConn(key, stick) if err != nil { return nil, "", err } - return dfdaemon.NewDaemonClient(clientConn), clientConn.Target(), nil + return dfdaemonv1.NewDaemonClient(clientConn), clientConn.Target(), nil } -func (dc *daemonClient) getDaemonClientWithTarget(target string) (dfdaemon.DaemonClient, error) { +func (dc *daemonClient) getDaemonClientWithTarget(target string) (dfdaemonv1.DaemonClient, error) { conn, err := dc.Connection.GetClientConnByTarget(target) if err != nil { return nil, err } - return dfdaemon.NewDaemonClient(conn), nil + return dfdaemonv1.NewDaemonClient(conn), nil } -func (dc *daemonClient) Download(ctx context.Context, req *dfdaemon.DownRequest, opts ...grpc.CallOption) (*DownResultStream, error) { +func (dc *daemonClient) Download(ctx context.Context, req *dfdaemonv1.DownRequest, opts ...grpc.CallOption) (*DownResultStream, error) { req.Uuid = uuid.New().String() // generate taskID taskID := idgen.TaskID(req.Url, req.UrlMeta) return newDownResultStream(ctx, dc, taskID, req, opts) } -func (dc *daemonClient) GetPieceTasks(ctx context.Context, target dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, +func (dc *daemonClient) GetPieceTasks(ctx context.Context, target dfnet.NetAddr, ptr *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (*commonv1.PiecePacket, error) { client, err := dc.getDaemonClientWithTarget(target.GetEndpoint()) if err != nil { @@ -133,7 +134,7 @@ func (dc *daemonClient) GetPieceTasks(ctx context.Context, target dfnet.NetAddr, return client.GetPieceTasks(ctx, ptr, opts...) } -func (dc *daemonClient) SyncPieceTasks(ctx context.Context, target dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (dfdaemon.Daemon_SyncPieceTasksClient, error) { +func (dc *daemonClient) SyncPieceTasks(ctx context.Context, target dfnet.NetAddr, ptr *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (dfdaemonv1.Daemon_SyncPieceTasksClient, error) { client, err := dc.getDaemonClientWithTarget(target.GetEndpoint()) if err != nil { return nil, err @@ -162,7 +163,7 @@ func (dc *daemonClient) CheckHealth(ctx context.Context, target dfnet.NetAddr, o return } -func (dc *daemonClient) StatTask(ctx context.Context, req *dfdaemon.StatTaskRequest, opts ...grpc.CallOption) error { +func (dc *daemonClient) StatTask(ctx context.Context, req *dfdaemonv1.StatTaskRequest, opts ...grpc.CallOption) error { // StatTask is a latency sensitive operation, so we don't retry & wait for daemon to start, // we assume daemon is already running. taskID := idgen.TaskID(req.Url, req.UrlMeta) @@ -175,7 +176,7 @@ func (dc *daemonClient) StatTask(ctx context.Context, req *dfdaemon.StatTaskRequ return err } -func (dc *daemonClient) ImportTask(ctx context.Context, req *dfdaemon.ImportTaskRequest, opts ...grpc.CallOption) error { +func (dc *daemonClient) ImportTask(ctx context.Context, req *dfdaemonv1.ImportTaskRequest, opts ...grpc.CallOption) error { taskID := idgen.TaskID(req.Url, req.UrlMeta) client, _, err := dc.getDaemonClient(taskID, false) if err != nil { @@ -185,7 +186,7 @@ func (dc *daemonClient) ImportTask(ctx context.Context, req *dfdaemon.ImportTask return err } -func (dc *daemonClient) ExportTask(ctx context.Context, req *dfdaemon.ExportTaskRequest, opts ...grpc.CallOption) error { +func (dc *daemonClient) ExportTask(ctx context.Context, req *dfdaemonv1.ExportTaskRequest, opts ...grpc.CallOption) error { taskID := idgen.TaskID(req.Url, req.UrlMeta) client, _, err := dc.getDaemonClient(taskID, false) if err != nil { @@ -195,7 +196,7 @@ func (dc *daemonClient) ExportTask(ctx context.Context, req *dfdaemon.ExportTask return err } -func (dc *daemonClient) DeleteTask(ctx context.Context, req *dfdaemon.DeleteTaskRequest, opts ...grpc.CallOption) error { +func (dc *daemonClient) DeleteTask(ctx context.Context, req *dfdaemonv1.DeleteTaskRequest, opts ...grpc.CallOption) error { taskID := idgen.TaskID(req.Url, req.UrlMeta) client, _, err := dc.getDaemonClient(taskID, false) if err != nil { diff --git a/pkg/rpc/dfdaemon/client/down_result_stream.go b/pkg/rpc/dfdaemon/client/down_result_stream.go index aaa23409d..ea1f35277 100644 --- a/pkg/rpc/dfdaemon/client/down_result_stream.go +++ b/pkg/rpc/dfdaemon/client/down_result_stream.go @@ -25,25 +25,26 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + "d7y.io/dragonfly/v2/internal/dferrors" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/rpc" - "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" ) type DownResultStream struct { dc *daemonClient ctx context.Context hashKey string - req *dfdaemon.DownRequest + req *dfdaemonv1.DownRequest opts []grpc.CallOption // stream for one client - stream dfdaemon.Daemon_DownloadClient + stream dfdaemonv1.Daemon_DownloadClient failedServers []string rpc.RetryMeta } -func newDownResultStream(ctx context.Context, dc *daemonClient, hashKey string, req *dfdaemon.DownRequest, opts []grpc.CallOption) (*DownResultStream, error) { +func newDownResultStream(ctx context.Context, dc *daemonClient, hashKey string, req *dfdaemonv1.DownRequest, opts []grpc.CallOption) (*DownResultStream, error) { drs := &DownResultStream{ dc: dc, ctx: ctx, @@ -67,7 +68,7 @@ func newDownResultStream(ctx context.Context, dc *daemonClient, hashKey string, func (drs *DownResultStream) initStream() error { var target string stream, err := rpc.ExecuteWithRetry(func() (any, error) { - var client dfdaemon.DaemonClient + var client dfdaemonv1.DaemonClient var err error client, target, err = drs.dc.getDaemonClient(drs.hashKey, false) if err != nil { @@ -82,12 +83,12 @@ func (drs *DownResultStream) initStream() error { logger.WithTaskID(drs.hashKey).Infof("initStream: invoke daemon node %s Download failed: %v", target, err) return drs.replaceClient(err) } - drs.stream = stream.(dfdaemon.Daemon_DownloadClient) + drs.stream = stream.(dfdaemonv1.Daemon_DownloadClient) drs.StreamTimes = 1 return nil } -func (drs *DownResultStream) Recv() (dr *dfdaemon.DownResult, err error) { +func (drs *DownResultStream) Recv() (dr *dfdaemonv1.DownResult, err error) { defer func() { if dr != nil { if dr.TaskId != drs.hashKey { @@ -101,7 +102,7 @@ func (drs *DownResultStream) Recv() (dr *dfdaemon.DownResult, err error) { return drs.stream.Recv() } -func (drs *DownResultStream) retryRecv(cause error) (*dfdaemon.DownResult, error) { +func (drs *DownResultStream) retryRecv(cause error) (*dfdaemonv1.DownResult, error) { if status.Code(cause) == codes.DeadlineExceeded || status.Code(cause) == codes.Canceled { return nil, cause } @@ -120,7 +121,7 @@ func (drs *DownResultStream) replaceStream(cause error) error { } var target string stream, err := rpc.ExecuteWithRetry(func() (any, error) { - var client dfdaemon.DaemonClient + var client dfdaemonv1.DaemonClient var err error client, target, err = drs.dc.getDaemonClient(drs.hashKey, true) if err != nil { @@ -132,7 +133,7 @@ func (drs *DownResultStream) replaceStream(cause error) error { logger.WithTaskID(drs.hashKey).Infof("replaceStream: invoke daemon node %s Download failed: %v", target, err) return drs.replaceClient(cause) } - drs.stream = stream.(dfdaemon.Daemon_DownloadClient) + drs.stream = stream.(dfdaemonv1.Daemon_DownloadClient) drs.StreamTimes++ return nil } @@ -147,7 +148,7 @@ func (drs *DownResultStream) replaceClient(cause error) error { var target string stream, err := rpc.ExecuteWithRetry(func() (any, error) { - var client dfdaemon.DaemonClient + var client dfdaemonv1.DaemonClient var err error client, target, err = drs.dc.getDaemonClient(drs.hashKey, true) if err != nil { @@ -159,7 +160,7 @@ func (drs *DownResultStream) replaceClient(cause error) error { logger.WithTaskID(drs.hashKey).Infof("replaceClient: invoke daemon node %s Download failed: %v", target, err) return drs.replaceClient(cause) } - drs.stream = stream.(dfdaemon.Daemon_DownloadClient) + drs.stream = stream.(dfdaemonv1.Daemon_DownloadClient) drs.StreamTimes = 1 return nil } diff --git a/pkg/rpc/dfdaemon/client/mocks/client_mock.go b/pkg/rpc/dfdaemon/client/mocks/client_mock.go index 01cb67d22..6640a0d66 100644 --- a/pkg/rpc/dfdaemon/client/mocks/client_mock.go +++ b/pkg/rpc/dfdaemon/client/mocks/client_mock.go @@ -8,9 +8,9 @@ import ( context "context" reflect "reflect" + v1 "d7y.io/api/pkg/apis/common/v1" + v10 "d7y.io/api/pkg/apis/dfdaemon/v1" dfnet "d7y.io/dragonfly/v2/pkg/dfnet" - base "d7y.io/dragonfly/v2/pkg/rpc/base" - dfdaemon "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" client "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" gomock "github.com/golang/mock/gomock" grpc "google.golang.org/grpc" @@ -73,7 +73,7 @@ func (mr *MockDaemonClientMockRecorder) Close() *gomock.Call { } // DeleteTask mocks base method. -func (m *MockDaemonClient) DeleteTask(ctx context.Context, req *dfdaemon.DeleteTaskRequest, opts ...grpc.CallOption) error { +func (m *MockDaemonClient) DeleteTask(ctx context.Context, req *v10.DeleteTaskRequest, opts ...grpc.CallOption) error { m.ctrl.T.Helper() varargs := []interface{}{ctx, req} for _, a := range opts { @@ -92,7 +92,7 @@ func (mr *MockDaemonClientMockRecorder) DeleteTask(ctx, req interface{}, opts .. } // Download mocks base method. -func (m *MockDaemonClient) Download(ctx context.Context, req *dfdaemon.DownRequest, opts ...grpc.CallOption) (*client.DownResultStream, error) { +func (m *MockDaemonClient) Download(ctx context.Context, req *v10.DownRequest, opts ...grpc.CallOption) (*client.DownResultStream, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, req} for _, a := range opts { @@ -112,7 +112,7 @@ func (mr *MockDaemonClientMockRecorder) Download(ctx, req interface{}, opts ...i } // ExportTask mocks base method. -func (m *MockDaemonClient) ExportTask(ctx context.Context, req *dfdaemon.ExportTaskRequest, opts ...grpc.CallOption) error { +func (m *MockDaemonClient) ExportTask(ctx context.Context, req *v10.ExportTaskRequest, opts ...grpc.CallOption) error { m.ctrl.T.Helper() varargs := []interface{}{ctx, req} for _, a := range opts { @@ -131,14 +131,14 @@ func (mr *MockDaemonClientMockRecorder) ExportTask(ctx, req interface{}, opts .. } // GetPieceTasks mocks base method. -func (m *MockDaemonClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { +func (m *MockDaemonClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *v1.PieceTaskRequest, opts ...grpc.CallOption) (*v1.PiecePacket, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, addr, ptr} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) - ret0, _ := ret[0].(*base.PiecePacket) + ret0, _ := ret[0].(*v1.PiecePacket) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -151,7 +151,7 @@ func (mr *MockDaemonClientMockRecorder) GetPieceTasks(ctx, addr, ptr interface{} } // ImportTask mocks base method. -func (m *MockDaemonClient) ImportTask(ctx context.Context, req *dfdaemon.ImportTaskRequest, opts ...grpc.CallOption) error { +func (m *MockDaemonClient) ImportTask(ctx context.Context, req *v10.ImportTaskRequest, opts ...grpc.CallOption) error { m.ctrl.T.Helper() varargs := []interface{}{ctx, req} for _, a := range opts { @@ -170,7 +170,7 @@ func (mr *MockDaemonClientMockRecorder) ImportTask(ctx, req interface{}, opts .. } // StatTask mocks base method. -func (m *MockDaemonClient) StatTask(ctx context.Context, req *dfdaemon.StatTaskRequest, opts ...grpc.CallOption) error { +func (m *MockDaemonClient) StatTask(ctx context.Context, req *v10.StatTaskRequest, opts ...grpc.CallOption) error { m.ctrl.T.Helper() varargs := []interface{}{ctx, req} for _, a := range opts { @@ -189,14 +189,14 @@ func (mr *MockDaemonClientMockRecorder) StatTask(ctx, req interface{}, opts ...i } // SyncPieceTasks mocks base method. -func (m *MockDaemonClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (dfdaemon.Daemon_SyncPieceTasksClient, error) { +func (m *MockDaemonClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *v1.PieceTaskRequest, opts ...grpc.CallOption) (v10.Daemon_SyncPieceTasksClient, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, addr, ptr} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...) - ret0, _ := ret[0].(dfdaemon.Daemon_SyncPieceTasksClient) + ret0, _ := ret[0].(v10.Daemon_SyncPieceTasksClient) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/pkg/rpc/dfdaemon/client/peer.go b/pkg/rpc/dfdaemon/client/peer.go index ea4bfaa0d..1ef0c58fe 100644 --- a/pkg/rpc/dfdaemon/client/peer.go +++ b/pkg/rpc/dfdaemon/client/peer.go @@ -22,16 +22,17 @@ import ( "google.golang.org/grpc" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/pkg/dfnet" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) func GetPieceTasks(ctx context.Context, - dstPeer *scheduler.PeerPacket_DestPeer, - ptr *base.PieceTaskRequest, - opts ...grpc.CallOption) (*base.PiecePacket, error) { + dstPeer *schedulerv1.PeerPacket_DestPeer, + ptr *commonv1.PieceTaskRequest, + opts ...grpc.CallOption) (*commonv1.PiecePacket, error) { netAddr := dfnet.NetAddr{ Type: dfnet.TCP, Addr: fmt.Sprintf("%s:%d", dstPeer.Ip, dstPeer.RpcPort), @@ -46,9 +47,9 @@ func GetPieceTasks(ctx context.Context, } func SyncPieceTasks(ctx context.Context, - destPeer *scheduler.PeerPacket_DestPeer, - ptr *base.PieceTaskRequest, - opts ...grpc.CallOption) (dfdaemon.Daemon_SyncPieceTasksClient, error) { + destPeer *schedulerv1.PeerPacket_DestPeer, + ptr *commonv1.PieceTaskRequest, + opts ...grpc.CallOption) (dfdaemonv1.Daemon_SyncPieceTasksClient, error) { netAddr := dfnet.NetAddr{ Type: dfnet.TCP, Addr: fmt.Sprintf("%s:%d", destPeer.Ip, destPeer.RpcPort), diff --git a/pkg/rpc/dfdaemon/dfdaemon.pb.go b/pkg/rpc/dfdaemon/dfdaemon.pb.go deleted file mode 100644 index dd522a131..000000000 --- a/pkg/rpc/dfdaemon/dfdaemon.pb.go +++ /dev/null @@ -1,1275 +0,0 @@ -// -// Copyright 2020 The Dragonfly Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: pkg/rpc/dfdaemon/dfdaemon.proto - -package dfdaemon - -import ( - context "context" - base "d7y.io/dragonfly/v2/pkg/rpc/base" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type DownRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Identify one downloading, the framework will fill it automatically. - Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` - // Download file from the url, not only for http. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - // Pieces will be written to output path directly, - // at the same time, dfdaemon workspace also makes soft link to the output. - Output string `protobuf:"bytes,3,opt,name=output,proto3" json:"output,omitempty"` - // Timeout duration. - Timeout uint64 `protobuf:"varint,4,opt,name=timeout,proto3" json:"timeout,omitempty"` - // Rate limit in bytes per second. - Limit float64 `protobuf:"fixed64,5,opt,name=limit,proto3" json:"limit,omitempty"` - // Disable back-to-source. - DisableBackSource bool `protobuf:"varint,6,opt,name=disable_back_source,json=disableBackSource,proto3" json:"disable_back_source,omitempty"` - // URL meta info. - UrlMeta *base.UrlMeta `protobuf:"bytes,7,opt,name=url_meta,json=urlMeta,proto3" json:"url_meta,omitempty"` - // Pattern has p2p/seed-peer/source, default is p2p. - Pattern string `protobuf:"bytes,8,opt,name=pattern,proto3" json:"pattern,omitempty"` - // Call system. - Callsystem string `protobuf:"bytes,9,opt,name=callsystem,proto3" json:"callsystem,omitempty"` - // User id. - Uid int64 `protobuf:"varint,10,opt,name=uid,proto3" json:"uid,omitempty"` - // Group id. - Gid int64 `protobuf:"varint,11,opt,name=gid,proto3" json:"gid,omitempty"` - // Keep original offset, used for ranged request, only available for hard link, otherwise will failed. - KeepOriginalOffset bool `protobuf:"varint,12,opt,name=keep_original_offset,json=keepOriginalOffset,proto3" json:"keep_original_offset,omitempty"` -} - -func (x *DownRequest) Reset() { - *x = DownRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DownRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DownRequest) ProtoMessage() {} - -func (x *DownRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DownRequest.ProtoReflect.Descriptor instead. -func (*DownRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescGZIP(), []int{0} -} - -func (x *DownRequest) GetUuid() string { - if x != nil { - return x.Uuid - } - return "" -} - -func (x *DownRequest) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *DownRequest) GetOutput() string { - if x != nil { - return x.Output - } - return "" -} - -func (x *DownRequest) GetTimeout() uint64 { - if x != nil { - return x.Timeout - } - return 0 -} - -func (x *DownRequest) GetLimit() float64 { - if x != nil { - return x.Limit - } - return 0 -} - -func (x *DownRequest) GetDisableBackSource() bool { - if x != nil { - return x.DisableBackSource - } - return false -} - -func (x *DownRequest) GetUrlMeta() *base.UrlMeta { - if x != nil { - return x.UrlMeta - } - return nil -} - -func (x *DownRequest) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *DownRequest) GetCallsystem() string { - if x != nil { - return x.Callsystem - } - return "" -} - -func (x *DownRequest) GetUid() int64 { - if x != nil { - return x.Uid - } - return 0 -} - -func (x *DownRequest) GetGid() int64 { - if x != nil { - return x.Gid - } - return 0 -} - -func (x *DownRequest) GetKeepOriginalOffset() bool { - if x != nil { - return x.KeepOriginalOffset - } - return false -} - -type DownResult struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Task id. - TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - // Peer id. - PeerId string `protobuf:"bytes,3,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` - // Task has completed length. - CompletedLength uint64 `protobuf:"varint,4,opt,name=completed_length,json=completedLength,proto3" json:"completed_length,omitempty"` - // Task has been completed. - Done bool `protobuf:"varint,5,opt,name=done,proto3" json:"done,omitempty"` -} - -func (x *DownResult) Reset() { - *x = DownResult{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DownResult) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DownResult) ProtoMessage() {} - -func (x *DownResult) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DownResult.ProtoReflect.Descriptor instead. -func (*DownResult) Descriptor() ([]byte, []int) { - return file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescGZIP(), []int{1} -} - -func (x *DownResult) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *DownResult) GetPeerId() string { - if x != nil { - return x.PeerId - } - return "" -} - -func (x *DownResult) GetCompletedLength() uint64 { - if x != nil { - return x.CompletedLength - } - return 0 -} - -func (x *DownResult) GetDone() bool { - if x != nil { - return x.Done - } - return false -} - -type StatTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Download url. - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - // URL meta info. - UrlMeta *base.UrlMeta `protobuf:"bytes,2,opt,name=url_meta,json=urlMeta,proto3" json:"url_meta,omitempty"` - // Check local cache only. - LocalOnly bool `protobuf:"varint,3,opt,name=local_only,json=localOnly,proto3" json:"local_only,omitempty"` -} - -func (x *StatTaskRequest) Reset() { - *x = StatTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StatTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatTaskRequest) ProtoMessage() {} - -func (x *StatTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatTaskRequest.ProtoReflect.Descriptor instead. -func (*StatTaskRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescGZIP(), []int{2} -} - -func (x *StatTaskRequest) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *StatTaskRequest) GetUrlMeta() *base.UrlMeta { - if x != nil { - return x.UrlMeta - } - return nil -} - -func (x *StatTaskRequest) GetLocalOnly() bool { - if x != nil { - return x.LocalOnly - } - return false -} - -type ImportTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Download url. - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - // URL meta info. - UrlMeta *base.UrlMeta `protobuf:"bytes,2,opt,name=url_meta,json=urlMeta,proto3" json:"url_meta,omitempty"` - // File to be imported. - Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` - // Task type. - Type base.TaskType `protobuf:"varint,4,opt,name=type,proto3,enum=base.TaskType" json:"type,omitempty"` -} - -func (x *ImportTaskRequest) Reset() { - *x = ImportTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ImportTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ImportTaskRequest) ProtoMessage() {} - -func (x *ImportTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ImportTaskRequest.ProtoReflect.Descriptor instead. -func (*ImportTaskRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescGZIP(), []int{3} -} - -func (x *ImportTaskRequest) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *ImportTaskRequest) GetUrlMeta() *base.UrlMeta { - if x != nil { - return x.UrlMeta - } - return nil -} - -func (x *ImportTaskRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *ImportTaskRequest) GetType() base.TaskType { - if x != nil { - return x.Type - } - return base.TaskType(0) -} - -type ExportTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Download url. - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - // Output path of downloaded file. - Output string `protobuf:"bytes,2,opt,name=output,proto3" json:"output,omitempty"` - // Timeout duration. - Timeout uint64 `protobuf:"varint,3,opt,name=timeout,proto3" json:"timeout,omitempty"` - // Rate limit in bytes per second. - Limit float64 `protobuf:"fixed64,4,opt,name=limit,proto3" json:"limit,omitempty"` - // URL meta info. - UrlMeta *base.UrlMeta `protobuf:"bytes,5,opt,name=url_meta,json=urlMeta,proto3" json:"url_meta,omitempty"` - // Call system. - Callsystem string `protobuf:"bytes,6,opt,name=callsystem,proto3" json:"callsystem,omitempty"` - // User id. - Uid int64 `protobuf:"varint,7,opt,name=uid,proto3" json:"uid,omitempty"` - // Group id. - Gid int64 `protobuf:"varint,8,opt,name=gid,proto3" json:"gid,omitempty"` - // Only export from local storage. - LocalOnly bool `protobuf:"varint,9,opt,name=local_only,json=localOnly,proto3" json:"local_only,omitempty"` -} - -func (x *ExportTaskRequest) Reset() { - *x = ExportTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExportTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExportTaskRequest) ProtoMessage() {} - -func (x *ExportTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExportTaskRequest.ProtoReflect.Descriptor instead. -func (*ExportTaskRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescGZIP(), []int{4} -} - -func (x *ExportTaskRequest) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *ExportTaskRequest) GetOutput() string { - if x != nil { - return x.Output - } - return "" -} - -func (x *ExportTaskRequest) GetTimeout() uint64 { - if x != nil { - return x.Timeout - } - return 0 -} - -func (x *ExportTaskRequest) GetLimit() float64 { - if x != nil { - return x.Limit - } - return 0 -} - -func (x *ExportTaskRequest) GetUrlMeta() *base.UrlMeta { - if x != nil { - return x.UrlMeta - } - return nil -} - -func (x *ExportTaskRequest) GetCallsystem() string { - if x != nil { - return x.Callsystem - } - return "" -} - -func (x *ExportTaskRequest) GetUid() int64 { - if x != nil { - return x.Uid - } - return 0 -} - -func (x *ExportTaskRequest) GetGid() int64 { - if x != nil { - return x.Gid - } - return 0 -} - -func (x *ExportTaskRequest) GetLocalOnly() bool { - if x != nil { - return x.LocalOnly - } - return false -} - -type DeleteTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Download url. - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - // URL meta info. - UrlMeta *base.UrlMeta `protobuf:"bytes,2,opt,name=url_meta,json=urlMeta,proto3" json:"url_meta,omitempty"` -} - -func (x *DeleteTaskRequest) Reset() { - *x = DeleteTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteTaskRequest) ProtoMessage() {} - -func (x *DeleteTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteTaskRequest.ProtoReflect.Descriptor instead. -func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescGZIP(), []int{5} -} - -func (x *DeleteTaskRequest) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *DeleteTaskRequest) GetUrlMeta() *base.UrlMeta { - if x != nil { - return x.UrlMeta - } - return nil -} - -var File_pkg_rpc_dfdaemon_dfdaemon_proto protoreflect.FileDescriptor - -var file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x64, 0x66, 0x64, 0x61, 0x65, 0x6d, - 0x6f, 0x6e, 0x2f, 0x64, 0x66, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x08, 0x64, 0x66, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x1a, 0x17, 0x70, 0x6b, 0x67, - 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbd, 0x03, 0x0a, 0x0b, 0x44, - 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x75, 0x75, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0xb0, - 0x01, 0x01, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, 0x01, 0x01, 0x52, - 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1f, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x21, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x00, 0x52, - 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x24, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x2e, - 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x64, 0x69, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, - 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x52, - 0x07, 0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, - 0x65, 0x72, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x20, 0xfa, 0x42, 0x1d, 0x72, 0x1b, - 0x52, 0x03, 0x70, 0x32, 0x70, 0x52, 0x09, 0x73, 0x65, 0x65, 0x64, 0x2d, 0x70, 0x65, 0x65, 0x72, - 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0xd0, 0x01, 0x01, 0x52, 0x07, 0x70, 0x61, 0x74, - 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x79, 0x73, 0x74, - 0x65, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x79, - 0x73, 0x74, 0x65, 0x6d, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x6b, 0x65, 0x65, 0x70, - 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x4f, 0x72, 0x69, 0x67, - 0x69, 0x6e, 0x61, 0x6c, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x98, 0x01, 0x0a, 0x0a, 0x44, - 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, - 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, - 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x32, 0x0a, - 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, - 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x00, - 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, - 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x22, 0x75, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, - 0x75, 0x72, 0x6c, 0x12, 0x28, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, - 0x4d, 0x65, 0x74, 0x61, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1d, 0x0a, - 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x99, 0x01, 0x0a, - 0x11, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x28, 0x0a, - 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x07, - 0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa5, 0x02, 0x0a, 0x11, 0x45, 0x78, 0x70, - 0x6f, 0x72, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, - 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1f, 0x0a, 0x06, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x10, 0x01, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x21, 0x0a, 0x07, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x32, 0x02, 0x28, 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x24, 0x0a, - 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, - 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x05, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x12, 0x28, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, - 0x4d, 0x65, 0x74, 0x61, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1e, 0x0a, - 0x0a, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x63, 0x61, 0x6c, 0x6c, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x12, 0x10, 0x0a, - 0x03, 0x75, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, - 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x67, 0x69, - 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x6e, 0x6c, 0x79, - 0x22, 0x58, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, - 0x12, 0x28, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, - 0x61, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x32, 0x87, 0x04, 0x0a, 0x06, 0x44, - 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x08, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, - 0x64, 0x12, 0x15, 0x2e, 0x64, 0x66, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x6f, 0x77, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x64, 0x66, 0x64, 0x61, 0x65, - 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x30, 0x01, - 0x12, 0x3a, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, - 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x62, 0x61, 0x73, 0x65, - 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3d, 0x0a, 0x0b, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0e, 0x53, - 0x79, 0x6e, 0x63, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, - 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, - 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x28, 0x01, 0x30, 0x01, 0x12, 0x3d, 0x0a, 0x08, - 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x19, 0x2e, 0x64, 0x66, 0x64, 0x61, 0x65, - 0x6d, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0a, 0x49, - 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1b, 0x2e, 0x64, 0x66, 0x64, 0x61, - 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, - 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1b, 0x2e, 0x64, - 0x66, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x61, - 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x12, 0x41, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, - 0x1b, 0x2e, 0x64, 0x66, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x42, 0x26, 0x5a, 0x24, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x64, - 0x72, 0x61, 0x67, 0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, - 0x72, 0x70, 0x63, 0x2f, 0x64, 0x66, 0x64, 0x61, 0x65, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescOnce sync.Once - file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescData = file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDesc -) - -func file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescGZIP() []byte { - file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescOnce.Do(func() { - file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescData) - }) - return file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDescData -} - -var file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_pkg_rpc_dfdaemon_dfdaemon_proto_goTypes = []interface{}{ - (*DownRequest)(nil), // 0: dfdaemon.DownRequest - (*DownResult)(nil), // 1: dfdaemon.DownResult - (*StatTaskRequest)(nil), // 2: dfdaemon.StatTaskRequest - (*ImportTaskRequest)(nil), // 3: dfdaemon.ImportTaskRequest - (*ExportTaskRequest)(nil), // 4: dfdaemon.ExportTaskRequest - (*DeleteTaskRequest)(nil), // 5: dfdaemon.DeleteTaskRequest - (*base.UrlMeta)(nil), // 6: base.UrlMeta - (base.TaskType)(0), // 7: base.TaskType - (*base.PieceTaskRequest)(nil), // 8: base.PieceTaskRequest - (*emptypb.Empty)(nil), // 9: google.protobuf.Empty - (*base.PiecePacket)(nil), // 10: base.PiecePacket -} -var file_pkg_rpc_dfdaemon_dfdaemon_proto_depIdxs = []int32{ - 6, // 0: dfdaemon.DownRequest.url_meta:type_name -> base.UrlMeta - 6, // 1: dfdaemon.StatTaskRequest.url_meta:type_name -> base.UrlMeta - 6, // 2: dfdaemon.ImportTaskRequest.url_meta:type_name -> base.UrlMeta - 7, // 3: dfdaemon.ImportTaskRequest.type:type_name -> base.TaskType - 6, // 4: dfdaemon.ExportTaskRequest.url_meta:type_name -> base.UrlMeta - 6, // 5: dfdaemon.DeleteTaskRequest.url_meta:type_name -> base.UrlMeta - 0, // 6: dfdaemon.Daemon.Download:input_type -> dfdaemon.DownRequest - 8, // 7: dfdaemon.Daemon.GetPieceTasks:input_type -> base.PieceTaskRequest - 9, // 8: dfdaemon.Daemon.CheckHealth:input_type -> google.protobuf.Empty - 8, // 9: dfdaemon.Daemon.SyncPieceTasks:input_type -> base.PieceTaskRequest - 2, // 10: dfdaemon.Daemon.StatTask:input_type -> dfdaemon.StatTaskRequest - 3, // 11: dfdaemon.Daemon.ImportTask:input_type -> dfdaemon.ImportTaskRequest - 4, // 12: dfdaemon.Daemon.ExportTask:input_type -> dfdaemon.ExportTaskRequest - 5, // 13: dfdaemon.Daemon.DeleteTask:input_type -> dfdaemon.DeleteTaskRequest - 1, // 14: dfdaemon.Daemon.Download:output_type -> dfdaemon.DownResult - 10, // 15: dfdaemon.Daemon.GetPieceTasks:output_type -> base.PiecePacket - 9, // 16: dfdaemon.Daemon.CheckHealth:output_type -> google.protobuf.Empty - 10, // 17: dfdaemon.Daemon.SyncPieceTasks:output_type -> base.PiecePacket - 9, // 18: dfdaemon.Daemon.StatTask:output_type -> google.protobuf.Empty - 9, // 19: dfdaemon.Daemon.ImportTask:output_type -> google.protobuf.Empty - 9, // 20: dfdaemon.Daemon.ExportTask:output_type -> google.protobuf.Empty - 9, // 21: dfdaemon.Daemon.DeleteTask:output_type -> google.protobuf.Empty - 14, // [14:22] is the sub-list for method output_type - 6, // [6:14] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name -} - -func init() { file_pkg_rpc_dfdaemon_dfdaemon_proto_init() } -func file_pkg_rpc_dfdaemon_dfdaemon_proto_init() { - if File_pkg_rpc_dfdaemon_dfdaemon_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DownRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DownResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StatTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExportTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDesc, - NumEnums: 0, - NumMessages: 6, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_pkg_rpc_dfdaemon_dfdaemon_proto_goTypes, - DependencyIndexes: file_pkg_rpc_dfdaemon_dfdaemon_proto_depIdxs, - MessageInfos: file_pkg_rpc_dfdaemon_dfdaemon_proto_msgTypes, - }.Build() - File_pkg_rpc_dfdaemon_dfdaemon_proto = out.File - file_pkg_rpc_dfdaemon_dfdaemon_proto_rawDesc = nil - file_pkg_rpc_dfdaemon_dfdaemon_proto_goTypes = nil - file_pkg_rpc_dfdaemon_dfdaemon_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// DaemonClient is the client API for Daemon service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type DaemonClient interface { - // Trigger client to download file - Download(ctx context.Context, in *DownRequest, opts ...grpc.CallOption) (Daemon_DownloadClient, error) - // Get piece tasks from other peers - GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) - // Check daemon health - CheckHealth(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Sync piece tasks with other peers - SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Daemon_SyncPieceTasksClient, error) - // Check if given task exists in P2P cache system - StatTask(ctx context.Context, in *StatTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Import the given file into P2P cache system - ImportTask(ctx context.Context, in *ImportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Export or download file from P2P cache system - ExportTask(ctx context.Context, in *ExportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Delete file from P2P cache system - DeleteTask(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) -} - -type daemonClient struct { - cc grpc.ClientConnInterface -} - -func NewDaemonClient(cc grpc.ClientConnInterface) DaemonClient { - return &daemonClient{cc} -} - -func (c *daemonClient) Download(ctx context.Context, in *DownRequest, opts ...grpc.CallOption) (Daemon_DownloadClient, error) { - stream, err := c.cc.NewStream(ctx, &_Daemon_serviceDesc.Streams[0], "/dfdaemon.Daemon/Download", opts...) - if err != nil { - return nil, err - } - x := &daemonDownloadClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Daemon_DownloadClient interface { - Recv() (*DownResult, error) - grpc.ClientStream -} - -type daemonDownloadClient struct { - grpc.ClientStream -} - -func (x *daemonDownloadClient) Recv() (*DownResult, error) { - m := new(DownResult) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *daemonClient) GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { - out := new(base.PiecePacket) - err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/GetPieceTasks", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *daemonClient) CheckHealth(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/CheckHealth", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *daemonClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Daemon_SyncPieceTasksClient, error) { - stream, err := c.cc.NewStream(ctx, &_Daemon_serviceDesc.Streams[1], "/dfdaemon.Daemon/SyncPieceTasks", opts...) - if err != nil { - return nil, err - } - x := &daemonSyncPieceTasksClient{stream} - return x, nil -} - -type Daemon_SyncPieceTasksClient interface { - Send(*base.PieceTaskRequest) error - Recv() (*base.PiecePacket, error) - grpc.ClientStream -} - -type daemonSyncPieceTasksClient struct { - grpc.ClientStream -} - -func (x *daemonSyncPieceTasksClient) Send(m *base.PieceTaskRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *daemonSyncPieceTasksClient) Recv() (*base.PiecePacket, error) { - m := new(base.PiecePacket) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *daemonClient) StatTask(ctx context.Context, in *StatTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/StatTask", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *daemonClient) ImportTask(ctx context.Context, in *ImportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/ImportTask", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *daemonClient) ExportTask(ctx context.Context, in *ExportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/ExportTask", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *daemonClient) DeleteTask(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/dfdaemon.Daemon/DeleteTask", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DaemonServer is the server API for Daemon service. -type DaemonServer interface { - // Trigger client to download file - Download(*DownRequest, Daemon_DownloadServer) error - // Get piece tasks from other peers - GetPieceTasks(context.Context, *base.PieceTaskRequest) (*base.PiecePacket, error) - // Check daemon health - CheckHealth(context.Context, *emptypb.Empty) (*emptypb.Empty, error) - // Sync piece tasks with other peers - SyncPieceTasks(Daemon_SyncPieceTasksServer) error - // Check if given task exists in P2P cache system - StatTask(context.Context, *StatTaskRequest) (*emptypb.Empty, error) - // Import the given file into P2P cache system - ImportTask(context.Context, *ImportTaskRequest) (*emptypb.Empty, error) - // Export or download file from P2P cache system - ExportTask(context.Context, *ExportTaskRequest) (*emptypb.Empty, error) - // Delete file from P2P cache system - DeleteTask(context.Context, *DeleteTaskRequest) (*emptypb.Empty, error) -} - -// UnimplementedDaemonServer can be embedded to have forward compatible implementations. -type UnimplementedDaemonServer struct { -} - -func (*UnimplementedDaemonServer) Download(*DownRequest, Daemon_DownloadServer) error { - return status.Errorf(codes.Unimplemented, "method Download not implemented") -} -func (*UnimplementedDaemonServer) GetPieceTasks(context.Context, *base.PieceTaskRequest) (*base.PiecePacket, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPieceTasks not implemented") -} -func (*UnimplementedDaemonServer) CheckHealth(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method CheckHealth not implemented") -} -func (*UnimplementedDaemonServer) SyncPieceTasks(Daemon_SyncPieceTasksServer) error { - return status.Errorf(codes.Unimplemented, "method SyncPieceTasks not implemented") -} -func (*UnimplementedDaemonServer) StatTask(context.Context, *StatTaskRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method StatTask not implemented") -} -func (*UnimplementedDaemonServer) ImportTask(context.Context, *ImportTaskRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ImportTask not implemented") -} -func (*UnimplementedDaemonServer) ExportTask(context.Context, *ExportTaskRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ExportTask not implemented") -} -func (*UnimplementedDaemonServer) DeleteTask(context.Context, *DeleteTaskRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteTask not implemented") -} - -func RegisterDaemonServer(s *grpc.Server, srv DaemonServer) { - s.RegisterService(&_Daemon_serviceDesc, srv) -} - -func _Daemon_Download_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(DownRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(DaemonServer).Download(m, &daemonDownloadServer{stream}) -} - -type Daemon_DownloadServer interface { - Send(*DownResult) error - grpc.ServerStream -} - -type daemonDownloadServer struct { - grpc.ServerStream -} - -func (x *daemonDownloadServer) Send(m *DownResult) error { - return x.ServerStream.SendMsg(m) -} - -func _Daemon_GetPieceTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(base.PieceTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DaemonServer).GetPieceTasks(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/dfdaemon.Daemon/GetPieceTasks", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DaemonServer).GetPieceTasks(ctx, req.(*base.PieceTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Daemon_CheckHealth_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DaemonServer).CheckHealth(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/dfdaemon.Daemon/CheckHealth", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DaemonServer).CheckHealth(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Daemon_SyncPieceTasks_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(DaemonServer).SyncPieceTasks(&daemonSyncPieceTasksServer{stream}) -} - -type Daemon_SyncPieceTasksServer interface { - Send(*base.PiecePacket) error - Recv() (*base.PieceTaskRequest, error) - grpc.ServerStream -} - -type daemonSyncPieceTasksServer struct { - grpc.ServerStream -} - -func (x *daemonSyncPieceTasksServer) Send(m *base.PiecePacket) error { - return x.ServerStream.SendMsg(m) -} - -func (x *daemonSyncPieceTasksServer) Recv() (*base.PieceTaskRequest, error) { - m := new(base.PieceTaskRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Daemon_StatTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DaemonServer).StatTask(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/dfdaemon.Daemon/StatTask", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DaemonServer).StatTask(ctx, req.(*StatTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Daemon_ImportTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ImportTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DaemonServer).ImportTask(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/dfdaemon.Daemon/ImportTask", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DaemonServer).ImportTask(ctx, req.(*ImportTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Daemon_ExportTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExportTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DaemonServer).ExportTask(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/dfdaemon.Daemon/ExportTask", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DaemonServer).ExportTask(ctx, req.(*ExportTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Daemon_DeleteTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DaemonServer).DeleteTask(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/dfdaemon.Daemon/DeleteTask", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DaemonServer).DeleteTask(ctx, req.(*DeleteTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Daemon_serviceDesc = grpc.ServiceDesc{ - ServiceName: "dfdaemon.Daemon", - HandlerType: (*DaemonServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetPieceTasks", - Handler: _Daemon_GetPieceTasks_Handler, - }, - { - MethodName: "CheckHealth", - Handler: _Daemon_CheckHealth_Handler, - }, - { - MethodName: "StatTask", - Handler: _Daemon_StatTask_Handler, - }, - { - MethodName: "ImportTask", - Handler: _Daemon_ImportTask_Handler, - }, - { - MethodName: "ExportTask", - Handler: _Daemon_ExportTask_Handler, - }, - { - MethodName: "DeleteTask", - Handler: _Daemon_DeleteTask_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Download", - Handler: _Daemon_Download_Handler, - ServerStreams: true, - }, - { - StreamName: "SyncPieceTasks", - Handler: _Daemon_SyncPieceTasks_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "pkg/rpc/dfdaemon/dfdaemon.proto", -} diff --git a/pkg/rpc/dfdaemon/dfdaemon.pb.validate.go b/pkg/rpc/dfdaemon/dfdaemon.pb.validate.go deleted file mode 100644 index 70f774abd..000000000 --- a/pkg/rpc/dfdaemon/dfdaemon.pb.validate.go +++ /dev/null @@ -1,654 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: pkg/rpc/dfdaemon/dfdaemon.proto - -package dfdaemon - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" - - base "d7y.io/dragonfly/v2/pkg/rpc/base" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - - _ = base.TaskType(0) -) - -// define the regex for a UUID once up-front -var _dfdaemon_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") - -// Validate checks the field values on DownRequest with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. -func (m *DownRequest) Validate() error { - if m == nil { - return nil - } - - if err := m._validateUuid(m.GetUuid()); err != nil { - return DownRequestValidationError{ - field: "Uuid", - reason: "value must be a valid UUID", - cause: err, - } - } - - if uri, err := url.Parse(m.GetUrl()); err != nil { - return DownRequestValidationError{ - field: "Url", - reason: "value must be a valid URI", - cause: err, - } - } else if !uri.IsAbs() { - return DownRequestValidationError{ - field: "Url", - reason: "value must be absolute", - } - } - - if utf8.RuneCountInString(m.GetOutput()) < 1 { - return DownRequestValidationError{ - field: "Output", - reason: "value length must be at least 1 runes", - } - } - - if m.GetTimeout() < 0 { - return DownRequestValidationError{ - field: "Timeout", - reason: "value must be greater than or equal to 0", - } - } - - if m.GetLimit() < 0 { - return DownRequestValidationError{ - field: "Limit", - reason: "value must be greater than or equal to 0", - } - } - - // no validation rules for DisableBackSource - - if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return DownRequestValidationError{ - field: "UrlMeta", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if m.GetPattern() != "" { - - if _, ok := _DownRequest_Pattern_InLookup[m.GetPattern()]; !ok { - return DownRequestValidationError{ - field: "Pattern", - reason: "value must be in list [p2p seed-peer source]", - } - } - - } - - // no validation rules for Callsystem - - // no validation rules for Uid - - // no validation rules for Gid - - // no validation rules for KeepOriginalOffset - - return nil -} - -func (m *DownRequest) _validateUuid(uuid string) error { - if matched := _dfdaemon_uuidPattern.MatchString(uuid); !matched { - return errors.New("invalid uuid format") - } - - return nil -} - -// DownRequestValidationError is the validation error returned by -// DownRequest.Validate if the designated constraints aren't met. -type DownRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e DownRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e DownRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e DownRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e DownRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e DownRequestValidationError) ErrorName() string { return "DownRequestValidationError" } - -// Error satisfies the builtin error interface -func (e DownRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sDownRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = DownRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = DownRequestValidationError{} - -var _DownRequest_Pattern_InLookup = map[string]struct{}{ - "p2p": {}, - "seed-peer": {}, - "source": {}, -} - -// Validate checks the field values on DownResult with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *DownResult) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetTaskId()) < 1 { - return DownResultValidationError{ - field: "TaskId", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetPeerId()) < 1 { - return DownResultValidationError{ - field: "PeerId", - reason: "value length must be at least 1 runes", - } - } - - if m.GetCompletedLength() < 0 { - return DownResultValidationError{ - field: "CompletedLength", - reason: "value must be greater than or equal to 0", - } - } - - // no validation rules for Done - - return nil -} - -// DownResultValidationError is the validation error returned by -// DownResult.Validate if the designated constraints aren't met. -type DownResultValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e DownResultValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e DownResultValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e DownResultValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e DownResultValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e DownResultValidationError) ErrorName() string { return "DownResultValidationError" } - -// Error satisfies the builtin error interface -func (e DownResultValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sDownResult.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = DownResultValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = DownResultValidationError{} - -// Validate checks the field values on StatTaskRequest with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. -func (m *StatTaskRequest) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetUrl()) < 1 { - return StatTaskRequestValidationError{ - field: "Url", - reason: "value length must be at least 1 runes", - } - } - - if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return StatTaskRequestValidationError{ - field: "UrlMeta", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for LocalOnly - - return nil -} - -// StatTaskRequestValidationError is the validation error returned by -// StatTaskRequest.Validate if the designated constraints aren't met. -type StatTaskRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e StatTaskRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e StatTaskRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e StatTaskRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e StatTaskRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e StatTaskRequestValidationError) ErrorName() string { return "StatTaskRequestValidationError" } - -// Error satisfies the builtin error interface -func (e StatTaskRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sStatTaskRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = StatTaskRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = StatTaskRequestValidationError{} - -// Validate checks the field values on ImportTaskRequest with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. -func (m *ImportTaskRequest) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetUrl()) < 1 { - return ImportTaskRequestValidationError{ - field: "Url", - reason: "value length must be at least 1 runes", - } - } - - if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ImportTaskRequestValidationError{ - field: "UrlMeta", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if utf8.RuneCountInString(m.GetPath()) < 1 { - return ImportTaskRequestValidationError{ - field: "Path", - reason: "value length must be at least 1 runes", - } - } - - // no validation rules for Type - - return nil -} - -// ImportTaskRequestValidationError is the validation error returned by -// ImportTaskRequest.Validate if the designated constraints aren't met. -type ImportTaskRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ImportTaskRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ImportTaskRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ImportTaskRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ImportTaskRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ImportTaskRequestValidationError) ErrorName() string { - return "ImportTaskRequestValidationError" -} - -// Error satisfies the builtin error interface -func (e ImportTaskRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sImportTaskRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ImportTaskRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ImportTaskRequestValidationError{} - -// Validate checks the field values on ExportTaskRequest with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. -func (m *ExportTaskRequest) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetUrl()) < 1 { - return ExportTaskRequestValidationError{ - field: "Url", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetOutput()) < 1 { - return ExportTaskRequestValidationError{ - field: "Output", - reason: "value length must be at least 1 runes", - } - } - - if m.GetTimeout() < 0 { - return ExportTaskRequestValidationError{ - field: "Timeout", - reason: "value must be greater than or equal to 0", - } - } - - if m.GetLimit() < 0 { - return ExportTaskRequestValidationError{ - field: "Limit", - reason: "value must be greater than or equal to 0", - } - } - - if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ExportTaskRequestValidationError{ - field: "UrlMeta", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for Callsystem - - // no validation rules for Uid - - // no validation rules for Gid - - // no validation rules for LocalOnly - - return nil -} - -// ExportTaskRequestValidationError is the validation error returned by -// ExportTaskRequest.Validate if the designated constraints aren't met. -type ExportTaskRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ExportTaskRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ExportTaskRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ExportTaskRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ExportTaskRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ExportTaskRequestValidationError) ErrorName() string { - return "ExportTaskRequestValidationError" -} - -// Error satisfies the builtin error interface -func (e ExportTaskRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sExportTaskRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ExportTaskRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ExportTaskRequestValidationError{} - -// Validate checks the field values on DeleteTaskRequest with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. -func (m *DeleteTaskRequest) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetUrl()) < 1 { - return DeleteTaskRequestValidationError{ - field: "Url", - reason: "value length must be at least 1 runes", - } - } - - if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return DeleteTaskRequestValidationError{ - field: "UrlMeta", - reason: "embedded message failed validation", - cause: err, - } - } - } - - return nil -} - -// DeleteTaskRequestValidationError is the validation error returned by -// DeleteTaskRequest.Validate if the designated constraints aren't met. -type DeleteTaskRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e DeleteTaskRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e DeleteTaskRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e DeleteTaskRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e DeleteTaskRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e DeleteTaskRequestValidationError) ErrorName() string { - return "DeleteTaskRequestValidationError" -} - -// Error satisfies the builtin error interface -func (e DeleteTaskRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sDeleteTaskRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = DeleteTaskRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = DeleteTaskRequestValidationError{} diff --git a/pkg/rpc/dfdaemon/dfdaemon.proto b/pkg/rpc/dfdaemon/dfdaemon.proto deleted file mode 100644 index 7da07103c..000000000 --- a/pkg/rpc/dfdaemon/dfdaemon.proto +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -syntax = "proto3"; - -package dfdaemon; - -import "pkg/rpc/base/base.proto"; -import "google/protobuf/empty.proto"; -import "validate/validate.proto"; - -option go_package = "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"; - -message DownRequest{ - // Identify one downloading, the framework will fill it automatically. - string uuid = 1 [(validate.rules).string.uuid = true]; - // Download file from the url, not only for http. - string url = 2 [(validate.rules).string.uri = true]; - // Pieces will be written to output path directly, - // at the same time, dfdaemon workspace also makes soft link to the output. - string output = 3 [(validate.rules).string.min_len = 1]; - // Timeout duration. - uint64 timeout = 4 [(validate.rules).uint64.gte = 0]; - // Rate limit in bytes per second. - double limit = 5 [(validate.rules).double.gte = 0]; - // Disable back-to-source. - bool disable_back_source = 6; - // URL meta info. - base.UrlMeta url_meta = 7; - // Pattern has p2p/seed-peer/source, default is p2p. - string pattern = 8 [(validate.rules).string = {in:["p2p", "seed-peer", "source"], ignore_empty:true}]; - // Call system. - string callsystem = 9; - // User id. - int64 uid = 10; - // Group id. - int64 gid = 11; - // Keep original offset, used for ranged request, only available for hard link, otherwise will failed. - bool keep_original_offset = 12; -} - -message DownResult{ - // Task id. - string task_id = 2 [(validate.rules).string.min_len = 1]; - // Peer id. - string peer_id = 3 [(validate.rules).string.min_len = 1]; - // Task has completed length. - uint64 completed_length = 4 [(validate.rules).uint64.gte = 0]; - // Task has been completed. - bool done = 5; -} - -message StatTaskRequest{ - // Download url. - string url = 1 [(validate.rules).string.min_len = 1]; - // URL meta info. - base.UrlMeta url_meta = 2; - // Check local cache only. - bool local_only = 3; -} - -message ImportTaskRequest{ - // Download url. - string url = 1 [(validate.rules).string.min_len = 1]; - // URL meta info. - base.UrlMeta url_meta = 2; - // File to be imported. - string path = 3 [(validate.rules).string.min_len = 1]; - // Task type. - base.TaskType type = 4; -} - -message ExportTaskRequest{ - // Download url. - string url = 1 [(validate.rules).string.min_len = 1]; - // Output path of downloaded file. - string output = 2 [(validate.rules).string.min_len = 1]; - // Timeout duration. - uint64 timeout = 3 [(validate.rules).uint64.gte = 0]; - // Rate limit in bytes per second. - double limit = 4 [(validate.rules).double.gte = 0]; - // URL meta info. - base.UrlMeta url_meta = 5; - // Call system. - string callsystem = 6; - // User id. - int64 uid = 7; - // Group id. - int64 gid = 8; - // Only export from local storage. - bool local_only = 9; -} - -message DeleteTaskRequest{ - // Download url. - string url = 1 [(validate.rules).string.min_len = 1]; - // URL meta info. - base.UrlMeta url_meta = 2; -} - -// Daemon Client RPC Service -service Daemon{ - // Trigger client to download file - rpc Download(DownRequest) returns(stream DownResult); - // Get piece tasks from other peers - rpc GetPieceTasks(base.PieceTaskRequest)returns(base.PiecePacket); - // Check daemon health - rpc CheckHealth(google.protobuf.Empty)returns(google.protobuf.Empty); - // Sync piece tasks with other peers - rpc SyncPieceTasks(stream base.PieceTaskRequest)returns(stream base.PiecePacket); - // Check if given task exists in P2P cache system - rpc StatTask(StatTaskRequest) returns(google.protobuf.Empty); - // Import the given file into P2P cache system - rpc ImportTask(ImportTaskRequest) returns(google.protobuf.Empty); - // Export or download file from P2P cache system - rpc ExportTask(ExportTaskRequest) returns(google.protobuf.Empty); - // Delete file from P2P cache system - rpc DeleteTask(DeleteTaskRequest) returns(google.protobuf.Empty); -} diff --git a/pkg/rpc/dfdaemon/mocks/dfdaemon_mock.go b/pkg/rpc/dfdaemon/mocks/dfdaemon_mock.go deleted file mode 100644 index 322ff28d3..000000000 --- a/pkg/rpc/dfdaemon/mocks/dfdaemon_mock.go +++ /dev/null @@ -1,854 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: dfdaemon/dfdaemon.pb.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - base "d7y.io/dragonfly/v2/pkg/rpc/base" - dfdaemon "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" - gomock "github.com/golang/mock/gomock" - grpc "google.golang.org/grpc" - metadata "google.golang.org/grpc/metadata" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// MockDaemonClient is a mock of DaemonClient interface. -type MockDaemonClient struct { - ctrl *gomock.Controller - recorder *MockDaemonClientMockRecorder -} - -// MockDaemonClientMockRecorder is the mock recorder for MockDaemonClient. -type MockDaemonClientMockRecorder struct { - mock *MockDaemonClient -} - -// NewMockDaemonClient creates a new mock instance. -func NewMockDaemonClient(ctrl *gomock.Controller) *MockDaemonClient { - mock := &MockDaemonClient{ctrl: ctrl} - mock.recorder = &MockDaemonClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDaemonClient) EXPECT() *MockDaemonClientMockRecorder { - return m.recorder -} - -// CheckHealth mocks base method. -func (m *MockDaemonClient) CheckHealth(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "CheckHealth", varargs...) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CheckHealth indicates an expected call of CheckHealth. -func (mr *MockDaemonClientMockRecorder) CheckHealth(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckHealth", reflect.TypeOf((*MockDaemonClient)(nil).CheckHealth), varargs...) -} - -// DeleteTask mocks base method. -func (m *MockDaemonClient) DeleteTask(ctx context.Context, in *dfdaemon.DeleteTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteTask", varargs...) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteTask indicates an expected call of DeleteTask. -func (mr *MockDaemonClientMockRecorder) DeleteTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTask", reflect.TypeOf((*MockDaemonClient)(nil).DeleteTask), varargs...) -} - -// Download mocks base method. -func (m *MockDaemonClient) Download(ctx context.Context, in *dfdaemon.DownRequest, opts ...grpc.CallOption) (dfdaemon.Daemon_DownloadClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Download", varargs...) - ret0, _ := ret[0].(dfdaemon.Daemon_DownloadClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Download indicates an expected call of Download. -func (mr *MockDaemonClientMockRecorder) Download(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockDaemonClient)(nil).Download), varargs...) -} - -// ExportTask mocks base method. -func (m *MockDaemonClient) ExportTask(ctx context.Context, in *dfdaemon.ExportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ExportTask", varargs...) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportTask indicates an expected call of ExportTask. -func (mr *MockDaemonClientMockRecorder) ExportTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportTask", reflect.TypeOf((*MockDaemonClient)(nil).ExportTask), varargs...) -} - -// GetPieceTasks mocks base method. -func (m *MockDaemonClient) GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) - ret0, _ := ret[0].(*base.PiecePacket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPieceTasks indicates an expected call of GetPieceTasks. -func (mr *MockDaemonClientMockRecorder) GetPieceTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockDaemonClient)(nil).GetPieceTasks), varargs...) -} - -// ImportTask mocks base method. -func (m *MockDaemonClient) ImportTask(ctx context.Context, in *dfdaemon.ImportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ImportTask", varargs...) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ImportTask indicates an expected call of ImportTask. -func (mr *MockDaemonClientMockRecorder) ImportTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportTask", reflect.TypeOf((*MockDaemonClient)(nil).ImportTask), varargs...) -} - -// StatTask mocks base method. -func (m *MockDaemonClient) StatTask(ctx context.Context, in *dfdaemon.StatTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "StatTask", varargs...) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StatTask indicates an expected call of StatTask. -func (mr *MockDaemonClientMockRecorder) StatTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockDaemonClient)(nil).StatTask), varargs...) -} - -// SyncPieceTasks mocks base method. -func (m *MockDaemonClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (dfdaemon.Daemon_SyncPieceTasksClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...) - ret0, _ := ret[0].(dfdaemon.Daemon_SyncPieceTasksClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SyncPieceTasks indicates an expected call of SyncPieceTasks. -func (mr *MockDaemonClientMockRecorder) SyncPieceTasks(ctx interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockDaemonClient)(nil).SyncPieceTasks), varargs...) -} - -// MockDaemon_DownloadClient is a mock of Daemon_DownloadClient interface. -type MockDaemon_DownloadClient struct { - ctrl *gomock.Controller - recorder *MockDaemon_DownloadClientMockRecorder -} - -// MockDaemon_DownloadClientMockRecorder is the mock recorder for MockDaemon_DownloadClient. -type MockDaemon_DownloadClientMockRecorder struct { - mock *MockDaemon_DownloadClient -} - -// NewMockDaemon_DownloadClient creates a new mock instance. -func NewMockDaemon_DownloadClient(ctrl *gomock.Controller) *MockDaemon_DownloadClient { - mock := &MockDaemon_DownloadClient{ctrl: ctrl} - mock.recorder = &MockDaemon_DownloadClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDaemon_DownloadClient) EXPECT() *MockDaemon_DownloadClientMockRecorder { - return m.recorder -} - -// CloseSend mocks base method. -func (m *MockDaemon_DownloadClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend. -func (mr *MockDaemon_DownloadClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).CloseSend)) -} - -// Context mocks base method. -func (m *MockDaemon_DownloadClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockDaemon_DownloadClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Context)) -} - -// Header mocks base method. -func (m *MockDaemon_DownloadClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header. -func (mr *MockDaemon_DownloadClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Header)) -} - -// Recv mocks base method. -func (m *MockDaemon_DownloadClient) Recv() (*dfdaemon.DownResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*dfdaemon.DownResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv. -func (mr *MockDaemon_DownloadClientMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Recv)) -} - -// RecvMsg mocks base method. -func (m_2 *MockDaemon_DownloadClient) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockDaemon_DownloadClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).RecvMsg), m) -} - -// SendMsg mocks base method. -func (m_2 *MockDaemon_DownloadClient) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockDaemon_DownloadClientMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).SendMsg), m) -} - -// Trailer mocks base method. -func (m *MockDaemon_DownloadClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer. -func (mr *MockDaemon_DownloadClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Trailer)) -} - -// MockDaemon_SyncPieceTasksClient is a mock of Daemon_SyncPieceTasksClient interface. -type MockDaemon_SyncPieceTasksClient struct { - ctrl *gomock.Controller - recorder *MockDaemon_SyncPieceTasksClientMockRecorder -} - -// MockDaemon_SyncPieceTasksClientMockRecorder is the mock recorder for MockDaemon_SyncPieceTasksClient. -type MockDaemon_SyncPieceTasksClientMockRecorder struct { - mock *MockDaemon_SyncPieceTasksClient -} - -// NewMockDaemon_SyncPieceTasksClient creates a new mock instance. -func NewMockDaemon_SyncPieceTasksClient(ctrl *gomock.Controller) *MockDaemon_SyncPieceTasksClient { - mock := &MockDaemon_SyncPieceTasksClient{ctrl: ctrl} - mock.recorder = &MockDaemon_SyncPieceTasksClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDaemon_SyncPieceTasksClient) EXPECT() *MockDaemon_SyncPieceTasksClientMockRecorder { - return m.recorder -} - -// CloseSend mocks base method. -func (m *MockDaemon_SyncPieceTasksClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend. -func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).CloseSend)) -} - -// Context mocks base method. -func (m *MockDaemon_SyncPieceTasksClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Context)) -} - -// Header mocks base method. -func (m *MockDaemon_SyncPieceTasksClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header. -func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Header)) -} - -// Recv mocks base method. -func (m *MockDaemon_SyncPieceTasksClient) Recv() (*base.PiecePacket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*base.PiecePacket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv. -func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Recv)) -} - -// RecvMsg mocks base method. -func (m_2 *MockDaemon_SyncPieceTasksClient) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).RecvMsg), m) -} - -// Send mocks base method. -func (m *MockDaemon_SyncPieceTasksClient) Send(arg0 *base.PieceTaskRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send. -func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Send), arg0) -} - -// SendMsg mocks base method. -func (m_2 *MockDaemon_SyncPieceTasksClient) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).SendMsg), m) -} - -// Trailer mocks base method. -func (m *MockDaemon_SyncPieceTasksClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer. -func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Trailer)) -} - -// MockDaemonServer is a mock of DaemonServer interface. -type MockDaemonServer struct { - ctrl *gomock.Controller - recorder *MockDaemonServerMockRecorder -} - -// MockDaemonServerMockRecorder is the mock recorder for MockDaemonServer. -type MockDaemonServerMockRecorder struct { - mock *MockDaemonServer -} - -// NewMockDaemonServer creates a new mock instance. -func NewMockDaemonServer(ctrl *gomock.Controller) *MockDaemonServer { - mock := &MockDaemonServer{ctrl: ctrl} - mock.recorder = &MockDaemonServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDaemonServer) EXPECT() *MockDaemonServerMockRecorder { - return m.recorder -} - -// CheckHealth mocks base method. -func (m *MockDaemonServer) CheckHealth(arg0 context.Context, arg1 *emptypb.Empty) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CheckHealth", arg0, arg1) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CheckHealth indicates an expected call of CheckHealth. -func (mr *MockDaemonServerMockRecorder) CheckHealth(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckHealth", reflect.TypeOf((*MockDaemonServer)(nil).CheckHealth), arg0, arg1) -} - -// DeleteTask mocks base method. -func (m *MockDaemonServer) DeleteTask(arg0 context.Context, arg1 *dfdaemon.DeleteTaskRequest) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteTask", arg0, arg1) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteTask indicates an expected call of DeleteTask. -func (mr *MockDaemonServerMockRecorder) DeleteTask(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTask", reflect.TypeOf((*MockDaemonServer)(nil).DeleteTask), arg0, arg1) -} - -// Download mocks base method. -func (m *MockDaemonServer) Download(arg0 *dfdaemon.DownRequest, arg1 dfdaemon.Daemon_DownloadServer) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Download", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Download indicates an expected call of Download. -func (mr *MockDaemonServerMockRecorder) Download(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockDaemonServer)(nil).Download), arg0, arg1) -} - -// ExportTask mocks base method. -func (m *MockDaemonServer) ExportTask(arg0 context.Context, arg1 *dfdaemon.ExportTaskRequest) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportTask", arg0, arg1) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ExportTask indicates an expected call of ExportTask. -func (mr *MockDaemonServerMockRecorder) ExportTask(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportTask", reflect.TypeOf((*MockDaemonServer)(nil).ExportTask), arg0, arg1) -} - -// GetPieceTasks mocks base method. -func (m *MockDaemonServer) GetPieceTasks(arg0 context.Context, arg1 *base.PieceTaskRequest) (*base.PiecePacket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1) - ret0, _ := ret[0].(*base.PiecePacket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPieceTasks indicates an expected call of GetPieceTasks. -func (mr *MockDaemonServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockDaemonServer)(nil).GetPieceTasks), arg0, arg1) -} - -// ImportTask mocks base method. -func (m *MockDaemonServer) ImportTask(arg0 context.Context, arg1 *dfdaemon.ImportTaskRequest) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImportTask", arg0, arg1) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ImportTask indicates an expected call of ImportTask. -func (mr *MockDaemonServerMockRecorder) ImportTask(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportTask", reflect.TypeOf((*MockDaemonServer)(nil).ImportTask), arg0, arg1) -} - -// StatTask mocks base method. -func (m *MockDaemonServer) StatTask(arg0 context.Context, arg1 *dfdaemon.StatTaskRequest) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StatTask", arg0, arg1) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StatTask indicates an expected call of StatTask. -func (mr *MockDaemonServerMockRecorder) StatTask(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockDaemonServer)(nil).StatTask), arg0, arg1) -} - -// SyncPieceTasks mocks base method. -func (m *MockDaemonServer) SyncPieceTasks(arg0 dfdaemon.Daemon_SyncPieceTasksServer) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncPieceTasks", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SyncPieceTasks indicates an expected call of SyncPieceTasks. -func (mr *MockDaemonServerMockRecorder) SyncPieceTasks(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockDaemonServer)(nil).SyncPieceTasks), arg0) -} - -// MockDaemon_DownloadServer is a mock of Daemon_DownloadServer interface. -type MockDaemon_DownloadServer struct { - ctrl *gomock.Controller - recorder *MockDaemon_DownloadServerMockRecorder -} - -// MockDaemon_DownloadServerMockRecorder is the mock recorder for MockDaemon_DownloadServer. -type MockDaemon_DownloadServerMockRecorder struct { - mock *MockDaemon_DownloadServer -} - -// NewMockDaemon_DownloadServer creates a new mock instance. -func NewMockDaemon_DownloadServer(ctrl *gomock.Controller) *MockDaemon_DownloadServer { - mock := &MockDaemon_DownloadServer{ctrl: ctrl} - mock.recorder = &MockDaemon_DownloadServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDaemon_DownloadServer) EXPECT() *MockDaemon_DownloadServerMockRecorder { - return m.recorder -} - -// Context mocks base method. -func (m *MockDaemon_DownloadServer) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockDaemon_DownloadServerMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).Context)) -} - -// RecvMsg mocks base method. -func (m_2 *MockDaemon_DownloadServer) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockDaemon_DownloadServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).RecvMsg), m) -} - -// Send mocks base method. -func (m *MockDaemon_DownloadServer) Send(arg0 *dfdaemon.DownResult) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send. -func (mr *MockDaemon_DownloadServerMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).Send), arg0) -} - -// SendHeader mocks base method. -func (m *MockDaemon_DownloadServer) SendHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendHeader indicates an expected call of SendHeader. -func (mr *MockDaemon_DownloadServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SendHeader), arg0) -} - -// SendMsg mocks base method. -func (m_2 *MockDaemon_DownloadServer) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockDaemon_DownloadServerMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SendMsg), m) -} - -// SetHeader mocks base method. -func (m *MockDaemon_DownloadServer) SetHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetHeader indicates an expected call of SetHeader. -func (mr *MockDaemon_DownloadServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SetHeader), arg0) -} - -// SetTrailer mocks base method. -func (m *MockDaemon_DownloadServer) SetTrailer(arg0 metadata.MD) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTrailer", arg0) -} - -// SetTrailer indicates an expected call of SetTrailer. -func (mr *MockDaemon_DownloadServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SetTrailer), arg0) -} - -// MockDaemon_SyncPieceTasksServer is a mock of Daemon_SyncPieceTasksServer interface. -type MockDaemon_SyncPieceTasksServer struct { - ctrl *gomock.Controller - recorder *MockDaemon_SyncPieceTasksServerMockRecorder -} - -// MockDaemon_SyncPieceTasksServerMockRecorder is the mock recorder for MockDaemon_SyncPieceTasksServer. -type MockDaemon_SyncPieceTasksServerMockRecorder struct { - mock *MockDaemon_SyncPieceTasksServer -} - -// NewMockDaemon_SyncPieceTasksServer creates a new mock instance. -func NewMockDaemon_SyncPieceTasksServer(ctrl *gomock.Controller) *MockDaemon_SyncPieceTasksServer { - mock := &MockDaemon_SyncPieceTasksServer{ctrl: ctrl} - mock.recorder = &MockDaemon_SyncPieceTasksServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDaemon_SyncPieceTasksServer) EXPECT() *MockDaemon_SyncPieceTasksServerMockRecorder { - return m.recorder -} - -// Context mocks base method. -func (m *MockDaemon_SyncPieceTasksServer) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).Context)) -} - -// Recv mocks base method. -func (m *MockDaemon_SyncPieceTasksServer) Recv() (*base.PieceTaskRequest, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*base.PieceTaskRequest) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv. -func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).Recv)) -} - -// RecvMsg mocks base method. -func (m_2 *MockDaemon_SyncPieceTasksServer) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).RecvMsg), m) -} - -// Send mocks base method. -func (m *MockDaemon_SyncPieceTasksServer) Send(arg0 *base.PiecePacket) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send. -func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).Send), arg0) -} - -// SendHeader mocks base method. -func (m *MockDaemon_SyncPieceTasksServer) SendHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendHeader indicates an expected call of SendHeader. -func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SendHeader), arg0) -} - -// SendMsg mocks base method. -func (m_2 *MockDaemon_SyncPieceTasksServer) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SendMsg), m) -} - -// SetHeader mocks base method. -func (m *MockDaemon_SyncPieceTasksServer) SetHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetHeader indicates an expected call of SetHeader. -func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SetHeader), arg0) -} - -// SetTrailer mocks base method. -func (m *MockDaemon_SyncPieceTasksServer) SetTrailer(arg0 metadata.MD) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTrailer", arg0) -} - -// SetTrailer indicates an expected call of SetTrailer. -func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SetTrailer), arg0) -} diff --git a/pkg/rpc/dfdaemon/server/mocks/server_mock.go b/pkg/rpc/dfdaemon/server/mocks/server_mock.go index e8f8a23cd..df9363d02 100644 --- a/pkg/rpc/dfdaemon/server/mocks/server_mock.go +++ b/pkg/rpc/dfdaemon/server/mocks/server_mock.go @@ -8,8 +8,8 @@ import ( context "context" reflect "reflect" - base "d7y.io/dragonfly/v2/pkg/rpc/base" - dfdaemon "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" + v1 "d7y.io/api/pkg/apis/common/v1" + v10 "d7y.io/api/pkg/apis/dfdaemon/v1" gomock "github.com/golang/mock/gomock" ) @@ -51,7 +51,7 @@ func (mr *MockDaemonServerMockRecorder) CheckHealth(arg0 interface{}) *gomock.Ca } // DeleteTask mocks base method. -func (m *MockDaemonServer) DeleteTask(arg0 context.Context, arg1 *dfdaemon.DeleteTaskRequest) error { +func (m *MockDaemonServer) DeleteTask(arg0 context.Context, arg1 *v10.DeleteTaskRequest) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteTask", arg0, arg1) ret0, _ := ret[0].(error) @@ -65,7 +65,7 @@ func (mr *MockDaemonServerMockRecorder) DeleteTask(arg0, arg1 interface{}) *gomo } // Download mocks base method. -func (m *MockDaemonServer) Download(arg0 context.Context, arg1 *dfdaemon.DownRequest, arg2 chan<- *dfdaemon.DownResult) error { +func (m *MockDaemonServer) Download(arg0 context.Context, arg1 *v10.DownRequest, arg2 chan<- *v10.DownResult) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Download", arg0, arg1, arg2) ret0, _ := ret[0].(error) @@ -79,7 +79,7 @@ func (mr *MockDaemonServerMockRecorder) Download(arg0, arg1, arg2 interface{}) * } // ExportTask mocks base method. -func (m *MockDaemonServer) ExportTask(arg0 context.Context, arg1 *dfdaemon.ExportTaskRequest) error { +func (m *MockDaemonServer) ExportTask(arg0 context.Context, arg1 *v10.ExportTaskRequest) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ExportTask", arg0, arg1) ret0, _ := ret[0].(error) @@ -93,10 +93,10 @@ func (mr *MockDaemonServerMockRecorder) ExportTask(arg0, arg1 interface{}) *gomo } // GetPieceTasks mocks base method. -func (m *MockDaemonServer) GetPieceTasks(arg0 context.Context, arg1 *base.PieceTaskRequest) (*base.PiecePacket, error) { +func (m *MockDaemonServer) GetPieceTasks(arg0 context.Context, arg1 *v1.PieceTaskRequest) (*v1.PiecePacket, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1) - ret0, _ := ret[0].(*base.PiecePacket) + ret0, _ := ret[0].(*v1.PiecePacket) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -108,7 +108,7 @@ func (mr *MockDaemonServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *g } // ImportTask mocks base method. -func (m *MockDaemonServer) ImportTask(arg0 context.Context, arg1 *dfdaemon.ImportTaskRequest) error { +func (m *MockDaemonServer) ImportTask(arg0 context.Context, arg1 *v10.ImportTaskRequest) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ImportTask", arg0, arg1) ret0, _ := ret[0].(error) @@ -122,7 +122,7 @@ func (mr *MockDaemonServerMockRecorder) ImportTask(arg0, arg1 interface{}) *gomo } // StatTask mocks base method. -func (m *MockDaemonServer) StatTask(arg0 context.Context, arg1 *dfdaemon.StatTaskRequest) error { +func (m *MockDaemonServer) StatTask(arg0 context.Context, arg1 *v10.StatTaskRequest) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StatTask", arg0, arg1) ret0, _ := ret[0].(error) @@ -136,7 +136,7 @@ func (mr *MockDaemonServerMockRecorder) StatTask(arg0, arg1 interface{}) *gomock } // SyncPieceTasks mocks base method. -func (m *MockDaemonServer) SyncPieceTasks(arg0 dfdaemon.Daemon_SyncPieceTasksServer) error { +func (m *MockDaemonServer) SyncPieceTasks(arg0 v10.Daemon_SyncPieceTasksServer) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyncPieceTasks", arg0) ret0, _ := ret[0].(error) diff --git a/pkg/rpc/dfdaemon/server/server.go b/pkg/rpc/dfdaemon/server/server.go index 5aa1146c8..cb0d98c02 100644 --- a/pkg/rpc/dfdaemon/server/server.go +++ b/pkg/rpc/dfdaemon/server/server.go @@ -26,46 +26,47 @@ import ( "google.golang.org/grpc/peer" "google.golang.org/protobuf/types/known/emptypb" + commonv1 "d7y.io/api/pkg/apis/common/v1" + dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1" + "d7y.io/dragonfly/v2/internal/dferrors" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/rpc" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" "d7y.io/dragonfly/v2/pkg/safe" ) -// DaemonServer refer to dfdaemon.DaemonServer +// DaemonServer refer to dfdaemonv1.DaemonServer type DaemonServer interface { // Download triggers client to download file - Download(context.Context, *dfdaemon.DownRequest, chan<- *dfdaemon.DownResult) error + Download(context.Context, *dfdaemonv1.DownRequest, chan<- *dfdaemonv1.DownResult) error // GetPieceTasks get piece tasks from other peers - GetPieceTasks(context.Context, *base.PieceTaskRequest) (*base.PiecePacket, error) + GetPieceTasks(context.Context, *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) // SyncPieceTasks sync piece tasks info with other peers - SyncPieceTasks(dfdaemon.Daemon_SyncPieceTasksServer) error + SyncPieceTasks(dfdaemonv1.Daemon_SyncPieceTasksServer) error // CheckHealth check daemon health CheckHealth(context.Context) error // Check if the given task exists in P2P cache system - StatTask(context.Context, *dfdaemon.StatTaskRequest) error + StatTask(context.Context, *dfdaemonv1.StatTaskRequest) error // Import the given file into P2P cache system - ImportTask(context.Context, *dfdaemon.ImportTaskRequest) error + ImportTask(context.Context, *dfdaemonv1.ImportTaskRequest) error // Export or download file from P2P cache system - ExportTask(context.Context, *dfdaemon.ExportTaskRequest) error + ExportTask(context.Context, *dfdaemonv1.ExportTaskRequest) error // Delete file from P2P cache system - DeleteTask(context.Context, *dfdaemon.DeleteTaskRequest) error + DeleteTask(context.Context, *dfdaemonv1.DeleteTaskRequest) error } type proxy struct { server DaemonServer - dfdaemon.UnimplementedDaemonServer + dfdaemonv1.UnimplementedDaemonServer } func New(daemonServer DaemonServer, opts ...grpc.ServerOption) *grpc.Server { grpcServer := grpc.NewServer(append(rpc.DefaultServerOptions(), opts...)...) - dfdaemon.RegisterDaemonServer(grpcServer, &proxy{server: daemonServer}) + dfdaemonv1.RegisterDaemonServer(grpcServer, &proxy{server: daemonServer}) return grpcServer } -func (p *proxy) Download(req *dfdaemon.DownRequest, stream dfdaemon.Daemon_DownloadServer) (err error) { +func (p *proxy) Download(req *dfdaemonv1.DownRequest, stream dfdaemonv1.Daemon_DownloadServer) (err error) { ctx, cancel := context.WithCancel(stream.Context()) defer cancel() @@ -76,7 +77,7 @@ func (p *proxy) Download(req *dfdaemon.DownRequest, stream dfdaemon.Daemon_Downl logger.Infof("trigger download for url: %s, from: %s, uuid: %s", req.Url, peerAddr, req.Uuid) errChan := make(chan error, 10) - drc := make(chan *dfdaemon.DownResult, 4) + drc := make(chan *dfdaemonv1.DownResult, 4) once := new(sync.Once) closeDrc := func() { @@ -97,11 +98,11 @@ func (p *proxy) Download(req *dfdaemon.DownRequest, stream dfdaemon.Daemon_Downl return } -func (p *proxy) GetPieceTasks(ctx context.Context, ptr *base.PieceTaskRequest) (*base.PiecePacket, error) { +func (p *proxy) GetPieceTasks(ctx context.Context, ptr *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) { return p.server.GetPieceTasks(ctx, ptr) } -func (p *proxy) SyncPieceTasks(sync dfdaemon.Daemon_SyncPieceTasksServer) error { +func (p *proxy) SyncPieceTasks(sync dfdaemonv1.Daemon_SyncPieceTasksServer) error { return p.server.SyncPieceTasks(sync) } @@ -109,23 +110,23 @@ func (p *proxy) CheckHealth(ctx context.Context, req *emptypb.Empty) (*emptypb.E return new(emptypb.Empty), p.server.CheckHealth(ctx) } -func (p *proxy) StatTask(ctx context.Context, req *dfdaemon.StatTaskRequest) (*emptypb.Empty, error) { +func (p *proxy) StatTask(ctx context.Context, req *dfdaemonv1.StatTaskRequest) (*emptypb.Empty, error) { return new(emptypb.Empty), p.server.StatTask(ctx, req) } -func (p *proxy) ImportTask(ctx context.Context, req *dfdaemon.ImportTaskRequest) (*emptypb.Empty, error) { +func (p *proxy) ImportTask(ctx context.Context, req *dfdaemonv1.ImportTaskRequest) (*emptypb.Empty, error) { return new(emptypb.Empty), p.server.ImportTask(ctx, req) } -func (p *proxy) ExportTask(ctx context.Context, req *dfdaemon.ExportTaskRequest) (*emptypb.Empty, error) { +func (p *proxy) ExportTask(ctx context.Context, req *dfdaemonv1.ExportTaskRequest) (*emptypb.Empty, error) { return new(emptypb.Empty), p.server.ExportTask(ctx, req) } -func (p *proxy) DeleteTask(ctx context.Context, req *dfdaemon.DeleteTaskRequest) (*emptypb.Empty, error) { +func (p *proxy) DeleteTask(ctx context.Context, req *dfdaemonv1.DeleteTaskRequest) (*emptypb.Empty, error) { return new(emptypb.Empty), p.server.DeleteTask(ctx, req) } -func send(drc chan *dfdaemon.DownResult, closeDrc func(), stream dfdaemon.Daemon_DownloadServer, errChan chan error) { +func send(drc chan *dfdaemonv1.DownResult, closeDrc func(), stream dfdaemonv1.Daemon_DownloadServer, errChan chan error) { err := safe.Call(func() { defer closeDrc() @@ -148,7 +149,7 @@ func send(drc chan *dfdaemon.DownResult, closeDrc func(), stream dfdaemon.Daemon } } -func call(ctx context.Context, drc chan *dfdaemon.DownResult, p *proxy, req *dfdaemon.DownRequest, errChan chan error) { +func call(ctx context.Context, drc chan *dfdaemonv1.DownResult, p *proxy, req *dfdaemonv1.DownRequest, errChan chan error) { err := safe.Call(func() { if err := p.server.Download(ctx, req, drc); err != nil { errChan <- err diff --git a/pkg/rpc/errordetails/error_details.pb.go b/pkg/rpc/errordetails/error_details.pb.go deleted file mode 100644 index 050b3b115..000000000 --- a/pkg/rpc/errordetails/error_details.pb.go +++ /dev/null @@ -1,178 +0,0 @@ -// -// Copyright 2022 The Dragonfly Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: pkg/rpc/errordetails/error_details.proto - -package errordetails - -import ( - base "d7y.io/dragonfly/v2/pkg/rpc/base" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type SourceError struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Temporary bool `protobuf:"varint,1,opt,name=temporary,proto3" json:"temporary,omitempty"` - // source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header - Metadata *base.ExtendAttribute `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` -} - -func (x *SourceError) Reset() { - *x = SourceError{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_errordetails_error_details_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SourceError) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SourceError) ProtoMessage() {} - -func (x *SourceError) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_errordetails_error_details_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SourceError.ProtoReflect.Descriptor instead. -func (*SourceError) Descriptor() ([]byte, []int) { - return file_pkg_rpc_errordetails_error_details_proto_rawDescGZIP(), []int{0} -} - -func (x *SourceError) GetTemporary() bool { - if x != nil { - return x.Temporary - } - return false -} - -func (x *SourceError) GetMetadata() *base.ExtendAttribute { - if x != nil { - return x.Metadata - } - return nil -} - -var File_pkg_rpc_errordetails_error_details_proto protoreflect.FileDescriptor - -var file_pkg_rpc_errordetails_error_details_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x17, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x5e, 0x0a, 0x0b, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x12, 0x31, - 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x42, 0x2a, 0x5a, 0x28, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x64, 0x72, 0x61, 0x67, - 0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, - 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pkg_rpc_errordetails_error_details_proto_rawDescOnce sync.Once - file_pkg_rpc_errordetails_error_details_proto_rawDescData = file_pkg_rpc_errordetails_error_details_proto_rawDesc -) - -func file_pkg_rpc_errordetails_error_details_proto_rawDescGZIP() []byte { - file_pkg_rpc_errordetails_error_details_proto_rawDescOnce.Do(func() { - file_pkg_rpc_errordetails_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_rpc_errordetails_error_details_proto_rawDescData) - }) - return file_pkg_rpc_errordetails_error_details_proto_rawDescData -} - -var file_pkg_rpc_errordetails_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_pkg_rpc_errordetails_error_details_proto_goTypes = []interface{}{ - (*SourceError)(nil), // 0: errordetails.SourceError - (*base.ExtendAttribute)(nil), // 1: base.ExtendAttribute -} -var file_pkg_rpc_errordetails_error_details_proto_depIdxs = []int32{ - 1, // 0: errordetails.SourceError.metadata:type_name -> base.ExtendAttribute - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_pkg_rpc_errordetails_error_details_proto_init() } -func file_pkg_rpc_errordetails_error_details_proto_init() { - if File_pkg_rpc_errordetails_error_details_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_pkg_rpc_errordetails_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceError); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_rpc_errordetails_error_details_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_pkg_rpc_errordetails_error_details_proto_goTypes, - DependencyIndexes: file_pkg_rpc_errordetails_error_details_proto_depIdxs, - MessageInfos: file_pkg_rpc_errordetails_error_details_proto_msgTypes, - }.Build() - File_pkg_rpc_errordetails_error_details_proto = out.File - file_pkg_rpc_errordetails_error_details_proto_rawDesc = nil - file_pkg_rpc_errordetails_error_details_proto_goTypes = nil - file_pkg_rpc_errordetails_error_details_proto_depIdxs = nil -} diff --git a/pkg/rpc/errordetails/error_details.pb.validate.go b/pkg/rpc/errordetails/error_details.pb.validate.go deleted file mode 100644 index 9577ae9ef..000000000 --- a/pkg/rpc/errordetails/error_details.pb.validate.go +++ /dev/null @@ -1,111 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: pkg/rpc/errordetails/error_details.proto - -package errordetails - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} -) - -// Validate checks the field values on SourceError with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. -func (m *SourceError) Validate() error { - if m == nil { - return nil - } - - // no validation rules for Temporary - - if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SourceErrorValidationError{ - field: "Metadata", - reason: "embedded message failed validation", - cause: err, - } - } - } - - return nil -} - -// SourceErrorValidationError is the validation error returned by -// SourceError.Validate if the designated constraints aren't met. -type SourceErrorValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SourceErrorValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SourceErrorValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SourceErrorValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SourceErrorValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SourceErrorValidationError) ErrorName() string { return "SourceErrorValidationError" } - -// Error satisfies the builtin error interface -func (e SourceErrorValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSourceError.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SourceErrorValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SourceErrorValidationError{} diff --git a/pkg/rpc/errordetails/error_details.proto b/pkg/rpc/errordetails/error_details.proto deleted file mode 100644 index a44d10370..000000000 --- a/pkg/rpc/errordetails/error_details.proto +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2022 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -syntax = "proto3"; - -package errordetails; - -import "pkg/rpc/base/base.proto"; - -option go_package = "d7y.io/dragonfly/v2/pkg/rpc/errordetails"; - -message SourceError { - bool temporary = 1; - // source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header - base.ExtendAttribute metadata = 2; -} - diff --git a/pkg/rpc/manager/client/client.go b/pkg/rpc/manager/client/client.go index 6c27f4f4f..fab6499fc 100644 --- a/pkg/rpc/manager/client/client.go +++ b/pkg/rpc/manager/client/client.go @@ -32,10 +32,11 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/reachable" - "d7y.io/dragonfly/v2/pkg/rpc/manager" ) const ( @@ -49,25 +50,25 @@ const ( // Client is the interface for grpc client. type Client interface { // Update Seed peer configuration. - UpdateSeedPeer(*manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) + UpdateSeedPeer(*managerv1.UpdateSeedPeerRequest) (*managerv1.SeedPeer, error) // Get Scheduler and Scheduler cluster configuration. - GetScheduler(*manager.GetSchedulerRequest) (*manager.Scheduler, error) + GetScheduler(*managerv1.GetSchedulerRequest) (*managerv1.Scheduler, error) // Update scheduler configuration. - UpdateScheduler(*manager.UpdateSchedulerRequest) (*manager.Scheduler, error) + UpdateScheduler(*managerv1.UpdateSchedulerRequest) (*managerv1.Scheduler, error) // List acitve schedulers configuration. - ListSchedulers(*manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) + ListSchedulers(*managerv1.ListSchedulersRequest) (*managerv1.ListSchedulersResponse, error) // Get object storage configuration. - GetObjectStorage(*manager.GetObjectStorageRequest) (*manager.ObjectStorage, error) + GetObjectStorage(*managerv1.GetObjectStorageRequest) (*managerv1.ObjectStorage, error) // List buckets configuration. - ListBuckets(*manager.ListBucketsRequest) (*manager.ListBucketsResponse, error) + ListBuckets(*managerv1.ListBucketsRequest) (*managerv1.ListBucketsResponse, error) // KeepAlive with manager. - KeepAlive(time.Duration, *manager.KeepAliveRequest) + KeepAlive(time.Duration, *managerv1.KeepAliveRequest) // Close client connect. Close() error @@ -75,7 +76,7 @@ type Client interface { // client provides manager grpc function. type client struct { - manager.ManagerClient + managerv1.ManagerClient conn *grpc.ClientConn } @@ -103,7 +104,7 @@ func New(target string) (Client, error) { } return &client{ - ManagerClient: manager.NewManagerClient(conn), + ManagerClient: managerv1.NewManagerClient(conn), conn: conn, }, nil } @@ -123,7 +124,7 @@ func NewWithAddrs(netAddrs []dfnet.NetAddr) (Client, error) { } // Update SeedPeer configuration. -func (c *client) UpdateSeedPeer(req *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) { +func (c *client) UpdateSeedPeer(req *managerv1.UpdateSeedPeerRequest) (*managerv1.SeedPeer, error) { ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) defer cancel() @@ -131,7 +132,7 @@ func (c *client) UpdateSeedPeer(req *manager.UpdateSeedPeerRequest) (*manager.Se } // Get Scheduler and Scheduler cluster configuration. -func (c *client) GetScheduler(req *manager.GetSchedulerRequest) (*manager.Scheduler, error) { +func (c *client) GetScheduler(req *managerv1.GetSchedulerRequest) (*managerv1.Scheduler, error) { ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) defer cancel() @@ -139,7 +140,7 @@ func (c *client) GetScheduler(req *manager.GetSchedulerRequest) (*manager.Schedu } // Update scheduler configuration. -func (c *client) UpdateScheduler(req *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) { +func (c *client) UpdateScheduler(req *managerv1.UpdateSchedulerRequest) (*managerv1.Scheduler, error) { ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) defer cancel() @@ -147,7 +148,7 @@ func (c *client) UpdateScheduler(req *manager.UpdateSchedulerRequest) (*manager. } // List acitve schedulers configuration. -func (c *client) ListSchedulers(req *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) { +func (c *client) ListSchedulers(req *managerv1.ListSchedulersRequest) (*managerv1.ListSchedulersResponse, error) { ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) defer cancel() @@ -155,7 +156,7 @@ func (c *client) ListSchedulers(req *manager.ListSchedulersRequest) (*manager.Li } // Get object storage configuration. -func (c *client) GetObjectStorage(req *manager.GetObjectStorageRequest) (*manager.ObjectStorage, error) { +func (c *client) GetObjectStorage(req *managerv1.GetObjectStorageRequest) (*managerv1.ObjectStorage, error) { ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) defer cancel() @@ -163,7 +164,7 @@ func (c *client) GetObjectStorage(req *manager.GetObjectStorageRequest) (*manage } // List buckets configuration. -func (c *client) ListBuckets(req *manager.ListBucketsRequest) (*manager.ListBucketsResponse, error) { +func (c *client) ListBuckets(req *managerv1.ListBucketsRequest) (*managerv1.ListBucketsResponse, error) { ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) defer cancel() @@ -171,7 +172,7 @@ func (c *client) ListBuckets(req *manager.ListBucketsRequest) (*manager.ListBuck } // List acitve schedulers configuration. -func (c *client) KeepAlive(interval time.Duration, keepalive *manager.KeepAliveRequest) { +func (c *client) KeepAlive(interval time.Duration, keepalive *managerv1.KeepAliveRequest) { retry: ctx, cancel := context.WithCancel(context.Background()) stream, err := c.ManagerClient.KeepAlive(ctx) @@ -191,7 +192,7 @@ retry: for { select { case <-tick.C: - if err := stream.Send(&manager.KeepAliveRequest{ + if err := stream.Send(&managerv1.KeepAliveRequest{ SourceType: keepalive.SourceType, HostName: keepalive.HostName, Ip: keepalive.Ip, diff --git a/pkg/rpc/manager/client/mocks/client_mock.go b/pkg/rpc/manager/client/mocks/client_mock.go index 8344932eb..689fc63cd 100644 --- a/pkg/rpc/manager/client/mocks/client_mock.go +++ b/pkg/rpc/manager/client/mocks/client_mock.go @@ -8,7 +8,7 @@ import ( reflect "reflect" time "time" - manager "d7y.io/dragonfly/v2/pkg/rpc/manager" + v1 "d7y.io/api/pkg/apis/manager/v1" gomock "github.com/golang/mock/gomock" ) @@ -50,10 +50,10 @@ func (mr *MockClientMockRecorder) Close() *gomock.Call { } // GetObjectStorage mocks base method. -func (m *MockClient) GetObjectStorage(arg0 *manager.GetObjectStorageRequest) (*manager.ObjectStorage, error) { +func (m *MockClient) GetObjectStorage(arg0 *v1.GetObjectStorageRequest) (*v1.ObjectStorage, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetObjectStorage", arg0) - ret0, _ := ret[0].(*manager.ObjectStorage) + ret0, _ := ret[0].(*v1.ObjectStorage) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -65,10 +65,10 @@ func (mr *MockClientMockRecorder) GetObjectStorage(arg0 interface{}) *gomock.Cal } // GetScheduler mocks base method. -func (m *MockClient) GetScheduler(arg0 *manager.GetSchedulerRequest) (*manager.Scheduler, error) { +func (m *MockClient) GetScheduler(arg0 *v1.GetSchedulerRequest) (*v1.Scheduler, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetScheduler", arg0) - ret0, _ := ret[0].(*manager.Scheduler) + ret0, _ := ret[0].(*v1.Scheduler) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -80,7 +80,7 @@ func (mr *MockClientMockRecorder) GetScheduler(arg0 interface{}) *gomock.Call { } // KeepAlive mocks base method. -func (m *MockClient) KeepAlive(arg0 time.Duration, arg1 *manager.KeepAliveRequest) { +func (m *MockClient) KeepAlive(arg0 time.Duration, arg1 *v1.KeepAliveRequest) { m.ctrl.T.Helper() m.ctrl.Call(m, "KeepAlive", arg0, arg1) } @@ -92,10 +92,10 @@ func (mr *MockClientMockRecorder) KeepAlive(arg0, arg1 interface{}) *gomock.Call } // ListBuckets mocks base method. -func (m *MockClient) ListBuckets(arg0 *manager.ListBucketsRequest) (*manager.ListBucketsResponse, error) { +func (m *MockClient) ListBuckets(arg0 *v1.ListBucketsRequest) (*v1.ListBucketsResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListBuckets", arg0) - ret0, _ := ret[0].(*manager.ListBucketsResponse) + ret0, _ := ret[0].(*v1.ListBucketsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -107,10 +107,10 @@ func (mr *MockClientMockRecorder) ListBuckets(arg0 interface{}) *gomock.Call { } // ListSchedulers mocks base method. -func (m *MockClient) ListSchedulers(arg0 *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) { +func (m *MockClient) ListSchedulers(arg0 *v1.ListSchedulersRequest) (*v1.ListSchedulersResponse, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListSchedulers", arg0) - ret0, _ := ret[0].(*manager.ListSchedulersResponse) + ret0, _ := ret[0].(*v1.ListSchedulersResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -122,10 +122,10 @@ func (mr *MockClientMockRecorder) ListSchedulers(arg0 interface{}) *gomock.Call } // UpdateScheduler mocks base method. -func (m *MockClient) UpdateScheduler(arg0 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) { +func (m *MockClient) UpdateScheduler(arg0 *v1.UpdateSchedulerRequest) (*v1.Scheduler, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UpdateScheduler", arg0) - ret0, _ := ret[0].(*manager.Scheduler) + ret0, _ := ret[0].(*v1.Scheduler) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -137,10 +137,10 @@ func (mr *MockClientMockRecorder) UpdateScheduler(arg0 interface{}) *gomock.Call } // UpdateSeedPeer mocks base method. -func (m *MockClient) UpdateSeedPeer(arg0 *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) { +func (m *MockClient) UpdateSeedPeer(arg0 *v1.UpdateSeedPeerRequest) (*v1.SeedPeer, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UpdateSeedPeer", arg0) - ret0, _ := ret[0].(*manager.SeedPeer) + ret0, _ := ret[0].(*v1.SeedPeer) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/pkg/rpc/manager/manager.pb.go b/pkg/rpc/manager/manager.pb.go deleted file mode 100644 index b638be7a0..000000000 --- a/pkg/rpc/manager/manager.pb.go +++ /dev/null @@ -1,2639 +0,0 @@ -// -// Copyright 2020 The Dragonfly Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: pkg/rpc/manager/manager.proto - -package manager - -import ( - context "context" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// Request source type. -type SourceType int32 - -const ( - // Scheduler service. - SourceType_SCHEDULER_SOURCE SourceType = 0 - // Peer service. - SourceType_PEER_SOURCE SourceType = 1 - // SeedPeer service. - SourceType_SEED_PEER_SOURCE SourceType = 2 -) - -// Enum value maps for SourceType. -var ( - SourceType_name = map[int32]string{ - 0: "SCHEDULER_SOURCE", - 1: "PEER_SOURCE", - 2: "SEED_PEER_SOURCE", - } - SourceType_value = map[string]int32{ - "SCHEDULER_SOURCE": 0, - "PEER_SOURCE": 1, - "SEED_PEER_SOURCE": 2, - } -) - -func (x SourceType) Enum() *SourceType { - p := new(SourceType) - *p = x - return p -} - -func (x SourceType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (SourceType) Descriptor() protoreflect.EnumDescriptor { - return file_pkg_rpc_manager_manager_proto_enumTypes[0].Descriptor() -} - -func (SourceType) Type() protoreflect.EnumType { - return &file_pkg_rpc_manager_manager_proto_enumTypes[0] -} - -func (x SourceType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use SourceType.Descriptor instead. -func (SourceType) EnumDescriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{0} -} - -// SecurityGroup represents security group of cluster. -type SecurityGroup struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Group id. - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // Group name. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Group biography. - Bio string `protobuf:"bytes,3,opt,name=bio,proto3" json:"bio,omitempty"` - // Group domain. - Domain string `protobuf:"bytes,4,opt,name=domain,proto3" json:"domain,omitempty"` - // Group proxy domain. - ProxyDomain string `protobuf:"bytes,5,opt,name=proxy_domain,json=proxyDomain,proto3" json:"proxy_domain,omitempty"` -} - -func (x *SecurityGroup) Reset() { - *x = SecurityGroup{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityGroup) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityGroup) ProtoMessage() {} - -func (x *SecurityGroup) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityGroup.ProtoReflect.Descriptor instead. -func (*SecurityGroup) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{0} -} - -func (x *SecurityGroup) GetId() uint64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *SecurityGroup) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *SecurityGroup) GetBio() string { - if x != nil { - return x.Bio - } - return "" -} - -func (x *SecurityGroup) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *SecurityGroup) GetProxyDomain() string { - if x != nil { - return x.ProxyDomain - } - return "" -} - -// SeedPeerCluster represents cluster of seed peer. -type SeedPeerCluster struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Cluster id. - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // Cluster name. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Cluster biography. - Bio string `protobuf:"bytes,3,opt,name=bio,proto3" json:"bio,omitempty"` - // Cluster configuration. - Config []byte `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - // Cluster scopes. - Scopes []byte `protobuf:"bytes,5,opt,name=scopes,proto3" json:"scopes,omitempty"` - // Security group to which the seed peer cluster belongs. - SecurityGroup *SecurityGroup `protobuf:"bytes,6,opt,name=security_group,json=securityGroup,proto3" json:"security_group,omitempty"` -} - -func (x *SeedPeerCluster) Reset() { - *x = SeedPeerCluster{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SeedPeerCluster) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SeedPeerCluster) ProtoMessage() {} - -func (x *SeedPeerCluster) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SeedPeerCluster.ProtoReflect.Descriptor instead. -func (*SeedPeerCluster) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{1} -} - -func (x *SeedPeerCluster) GetId() uint64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *SeedPeerCluster) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *SeedPeerCluster) GetBio() string { - if x != nil { - return x.Bio - } - return "" -} - -func (x *SeedPeerCluster) GetConfig() []byte { - if x != nil { - return x.Config - } - return nil -} - -func (x *SeedPeerCluster) GetScopes() []byte { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *SeedPeerCluster) GetSecurityGroup() *SecurityGroup { - if x != nil { - return x.SecurityGroup - } - return nil -} - -// SeedPeer represents seed peer for network. -type SeedPeer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Seed peer id. - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // Seed peer hostname. - HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // Seed peer type. - Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` - // Seed peer idc. - Idc string `protobuf:"bytes,5,opt,name=idc,proto3" json:"idc,omitempty"` - // Seed peer network topology. - NetTopology string `protobuf:"bytes,6,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"` - // Seed peer location. - Location string `protobuf:"bytes,7,opt,name=location,proto3" json:"location,omitempty"` - // Seed peer ip. - Ip string `protobuf:"bytes,8,opt,name=ip,proto3" json:"ip,omitempty"` - // Seed peer grpc port. - Port int32 `protobuf:"varint,9,opt,name=port,proto3" json:"port,omitempty"` - // Seed peer download port. - DownloadPort int32 `protobuf:"varint,10,opt,name=download_port,json=downloadPort,proto3" json:"download_port,omitempty"` - // Seed peer state. - State string `protobuf:"bytes,11,opt,name=state,proto3" json:"state,omitempty"` - // ID of the cluster to which the seed peer belongs. - SeedPeerClusterId uint64 `protobuf:"varint,12,opt,name=seed_peer_cluster_id,json=seedPeerClusterId,proto3" json:"seed_peer_cluster_id,omitempty"` - // Cluster to which the seed peer belongs. - SeedPeerCluster *SeedPeerCluster `protobuf:"bytes,13,opt,name=seed_peer_cluster,json=seedPeerCluster,proto3" json:"seed_peer_cluster,omitempty"` - // Schedulers included in seed peer. - Schedulers []*Scheduler `protobuf:"bytes,14,rep,name=schedulers,proto3" json:"schedulers,omitempty"` - // Seed peer object storage port. - ObjectStoragePort int32 `protobuf:"varint,15,opt,name=object_storage_port,json=objectStoragePort,proto3" json:"object_storage_port,omitempty"` -} - -func (x *SeedPeer) Reset() { - *x = SeedPeer{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SeedPeer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SeedPeer) ProtoMessage() {} - -func (x *SeedPeer) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SeedPeer.ProtoReflect.Descriptor instead. -func (*SeedPeer) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{2} -} - -func (x *SeedPeer) GetId() uint64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *SeedPeer) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *SeedPeer) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *SeedPeer) GetIdc() string { - if x != nil { - return x.Idc - } - return "" -} - -func (x *SeedPeer) GetNetTopology() string { - if x != nil { - return x.NetTopology - } - return "" -} - -func (x *SeedPeer) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *SeedPeer) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -func (x *SeedPeer) GetPort() int32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *SeedPeer) GetDownloadPort() int32 { - if x != nil { - return x.DownloadPort - } - return 0 -} - -func (x *SeedPeer) GetState() string { - if x != nil { - return x.State - } - return "" -} - -func (x *SeedPeer) GetSeedPeerClusterId() uint64 { - if x != nil { - return x.SeedPeerClusterId - } - return 0 -} - -func (x *SeedPeer) GetSeedPeerCluster() *SeedPeerCluster { - if x != nil { - return x.SeedPeerCluster - } - return nil -} - -func (x *SeedPeer) GetSchedulers() []*Scheduler { - if x != nil { - return x.Schedulers - } - return nil -} - -func (x *SeedPeer) GetObjectStoragePort() int32 { - if x != nil { - return x.ObjectStoragePort - } - return 0 -} - -// GetSeedPeerRequest represents request of GetSeedPeer. -type GetSeedPeerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request source type. - SourceType SourceType `protobuf:"varint,1,opt,name=source_type,json=sourceType,proto3,enum=manager.SourceType" json:"source_type,omitempty"` - // Seed peer hostname. - HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // ID of the cluster to which the seed peer belongs. - SeedPeerClusterId uint64 `protobuf:"varint,3,opt,name=seed_peer_cluster_id,json=seedPeerClusterId,proto3" json:"seed_peer_cluster_id,omitempty"` - // Seed peer ip. - Ip string `protobuf:"bytes,4,opt,name=ip,proto3" json:"ip,omitempty"` -} - -func (x *GetSeedPeerRequest) Reset() { - *x = GetSeedPeerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetSeedPeerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetSeedPeerRequest) ProtoMessage() {} - -func (x *GetSeedPeerRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetSeedPeerRequest.ProtoReflect.Descriptor instead. -func (*GetSeedPeerRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{3} -} - -func (x *GetSeedPeerRequest) GetSourceType() SourceType { - if x != nil { - return x.SourceType - } - return SourceType_SCHEDULER_SOURCE -} - -func (x *GetSeedPeerRequest) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *GetSeedPeerRequest) GetSeedPeerClusterId() uint64 { - if x != nil { - return x.SeedPeerClusterId - } - return 0 -} - -func (x *GetSeedPeerRequest) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -// UpdateSeedPeerRequest represents request of UpdateSeedPeer. -type UpdateSeedPeerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request source type. - SourceType SourceType `protobuf:"varint,1,opt,name=source_type,json=sourceType,proto3,enum=manager.SourceType" json:"source_type,omitempty"` - // Seed peer hostname. - HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // Seed peer type. - Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` - // Seed peer idc. - Idc string `protobuf:"bytes,5,opt,name=idc,proto3" json:"idc,omitempty"` - // Seed peer network topology. - NetTopology string `protobuf:"bytes,6,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"` - // Seed peer location. - Location string `protobuf:"bytes,7,opt,name=location,proto3" json:"location,omitempty"` - // Seed peer ip. - Ip string `protobuf:"bytes,8,opt,name=ip,proto3" json:"ip,omitempty"` - // Seed peer port. - Port int32 `protobuf:"varint,9,opt,name=port,proto3" json:"port,omitempty"` - // Seed peer download port. - DownloadPort int32 `protobuf:"varint,10,opt,name=download_port,json=downloadPort,proto3" json:"download_port,omitempty"` - // ID of the cluster to which the seed peer belongs. - SeedPeerClusterId uint64 `protobuf:"varint,11,opt,name=seed_peer_cluster_id,json=seedPeerClusterId,proto3" json:"seed_peer_cluster_id,omitempty"` - // Seed peer object storage port. - ObjectStoragePort int32 `protobuf:"varint,12,opt,name=object_storage_port,json=objectStoragePort,proto3" json:"object_storage_port,omitempty"` -} - -func (x *UpdateSeedPeerRequest) Reset() { - *x = UpdateSeedPeerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateSeedPeerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateSeedPeerRequest) ProtoMessage() {} - -func (x *UpdateSeedPeerRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateSeedPeerRequest.ProtoReflect.Descriptor instead. -func (*UpdateSeedPeerRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{4} -} - -func (x *UpdateSeedPeerRequest) GetSourceType() SourceType { - if x != nil { - return x.SourceType - } - return SourceType_SCHEDULER_SOURCE -} - -func (x *UpdateSeedPeerRequest) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *UpdateSeedPeerRequest) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *UpdateSeedPeerRequest) GetIdc() string { - if x != nil { - return x.Idc - } - return "" -} - -func (x *UpdateSeedPeerRequest) GetNetTopology() string { - if x != nil { - return x.NetTopology - } - return "" -} - -func (x *UpdateSeedPeerRequest) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *UpdateSeedPeerRequest) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -func (x *UpdateSeedPeerRequest) GetPort() int32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *UpdateSeedPeerRequest) GetDownloadPort() int32 { - if x != nil { - return x.DownloadPort - } - return 0 -} - -func (x *UpdateSeedPeerRequest) GetSeedPeerClusterId() uint64 { - if x != nil { - return x.SeedPeerClusterId - } - return 0 -} - -func (x *UpdateSeedPeerRequest) GetObjectStoragePort() int32 { - if x != nil { - return x.ObjectStoragePort - } - return 0 -} - -// SeedPeerCluster represents cluster of scheduler. -type SchedulerCluster struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Cluster id. - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // Cluster name. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Cluster biography. - Bio string `protobuf:"bytes,3,opt,name=bio,proto3" json:"bio,omitempty"` - // Cluster config. - Config []byte `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - // Cluster client config. - ClientConfig []byte `protobuf:"bytes,5,opt,name=client_config,json=clientConfig,proto3" json:"client_config,omitempty"` - // Cluster scopes. - Scopes []byte `protobuf:"bytes,6,opt,name=scopes,proto3" json:"scopes,omitempty"` - // Security group to which the scheduler cluster belongs. - SecurityGroup *SecurityGroup `protobuf:"bytes,7,opt,name=security_group,json=securityGroup,proto3" json:"security_group,omitempty"` -} - -func (x *SchedulerCluster) Reset() { - *x = SchedulerCluster{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SchedulerCluster) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SchedulerCluster) ProtoMessage() {} - -func (x *SchedulerCluster) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SchedulerCluster.ProtoReflect.Descriptor instead. -func (*SchedulerCluster) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{5} -} - -func (x *SchedulerCluster) GetId() uint64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *SchedulerCluster) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *SchedulerCluster) GetBio() string { - if x != nil { - return x.Bio - } - return "" -} - -func (x *SchedulerCluster) GetConfig() []byte { - if x != nil { - return x.Config - } - return nil -} - -func (x *SchedulerCluster) GetClientConfig() []byte { - if x != nil { - return x.ClientConfig - } - return nil -} - -func (x *SchedulerCluster) GetScopes() []byte { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *SchedulerCluster) GetSecurityGroup() *SecurityGroup { - if x != nil { - return x.SecurityGroup - } - return nil -} - -// SeedPeerCluster represents scheduler for network. -type Scheduler struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Scheduler id. - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // Scheduler hostname. - HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // Deprecated: Do not use. - Vips string `protobuf:"bytes,3,opt,name=vips,proto3" json:"vips,omitempty"` - // Scheduler idc. - Idc string `protobuf:"bytes,4,opt,name=idc,proto3" json:"idc,omitempty"` - // Scheduler location. - Location string `protobuf:"bytes,5,opt,name=location,proto3" json:"location,omitempty"` - // Deprecated: Use net_topology instead. - NetConfig []byte `protobuf:"bytes,6,opt,name=net_config,json=netConfig,proto3" json:"net_config,omitempty"` - // Scheduler ip. - Ip string `protobuf:"bytes,7,opt,name=ip,proto3" json:"ip,omitempty"` - // Scheduler grpc port. - Port int32 `protobuf:"varint,8,opt,name=port,proto3" json:"port,omitempty"` - // Scheduler state. - State string `protobuf:"bytes,9,opt,name=state,proto3" json:"state,omitempty"` - // ID of the cluster to which the scheduler belongs. - SchedulerClusterId uint64 `protobuf:"varint,10,opt,name=scheduler_cluster_id,json=schedulerClusterId,proto3" json:"scheduler_cluster_id,omitempty"` - // Cluster to which the scheduler belongs. - SchedulerCluster *SchedulerCluster `protobuf:"bytes,11,opt,name=scheduler_cluster,json=schedulerCluster,proto3" json:"scheduler_cluster,omitempty"` - // Seed peers to which the scheduler belongs. - SeedPeers []*SeedPeer `protobuf:"bytes,13,rep,name=seed_peers,json=seedPeers,proto3" json:"seed_peers,omitempty"` - // Scheduler network topology. - NetTopology string `protobuf:"bytes,14,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"` -} - -func (x *Scheduler) Reset() { - *x = Scheduler{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Scheduler) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Scheduler) ProtoMessage() {} - -func (x *Scheduler) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Scheduler.ProtoReflect.Descriptor instead. -func (*Scheduler) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{6} -} - -func (x *Scheduler) GetId() uint64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *Scheduler) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *Scheduler) GetVips() string { - if x != nil { - return x.Vips - } - return "" -} - -func (x *Scheduler) GetIdc() string { - if x != nil { - return x.Idc - } - return "" -} - -func (x *Scheduler) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *Scheduler) GetNetConfig() []byte { - if x != nil { - return x.NetConfig - } - return nil -} - -func (x *Scheduler) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -func (x *Scheduler) GetPort() int32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *Scheduler) GetState() string { - if x != nil { - return x.State - } - return "" -} - -func (x *Scheduler) GetSchedulerClusterId() uint64 { - if x != nil { - return x.SchedulerClusterId - } - return 0 -} - -func (x *Scheduler) GetSchedulerCluster() *SchedulerCluster { - if x != nil { - return x.SchedulerCluster - } - return nil -} - -func (x *Scheduler) GetSeedPeers() []*SeedPeer { - if x != nil { - return x.SeedPeers - } - return nil -} - -func (x *Scheduler) GetNetTopology() string { - if x != nil { - return x.NetTopology - } - return "" -} - -// GetSchedulerRequest represents request of GetScheduler. -type GetSchedulerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request source type. - SourceType SourceType `protobuf:"varint,1,opt,name=source_type,json=sourceType,proto3,enum=manager.SourceType" json:"source_type,omitempty"` - // Scheduler hostname. - HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // ID of the cluster to which the scheduler belongs. - SchedulerClusterId uint64 `protobuf:"varint,3,opt,name=scheduler_cluster_id,json=schedulerClusterId,proto3" json:"scheduler_cluster_id,omitempty"` - // Scheduler ip. - Ip string `protobuf:"bytes,4,opt,name=ip,proto3" json:"ip,omitempty"` -} - -func (x *GetSchedulerRequest) Reset() { - *x = GetSchedulerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetSchedulerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetSchedulerRequest) ProtoMessage() {} - -func (x *GetSchedulerRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetSchedulerRequest.ProtoReflect.Descriptor instead. -func (*GetSchedulerRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{7} -} - -func (x *GetSchedulerRequest) GetSourceType() SourceType { - if x != nil { - return x.SourceType - } - return SourceType_SCHEDULER_SOURCE -} - -func (x *GetSchedulerRequest) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *GetSchedulerRequest) GetSchedulerClusterId() uint64 { - if x != nil { - return x.SchedulerClusterId - } - return 0 -} - -func (x *GetSchedulerRequest) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -// UpdateSchedulerRequest represents request of UpdateScheduler. -type UpdateSchedulerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request source type. - SourceType SourceType `protobuf:"varint,1,opt,name=source_type,json=sourceType,proto3,enum=manager.SourceType" json:"source_type,omitempty"` - // Scheduler hostname. - HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // ID of the cluster to which the scheduler belongs. - SchedulerClusterId uint64 `protobuf:"varint,3,opt,name=scheduler_cluster_id,json=schedulerClusterId,proto3" json:"scheduler_cluster_id,omitempty"` - // Deprecated: Do not use. - Vips string `protobuf:"bytes,4,opt,name=vips,proto3" json:"vips,omitempty"` - // Scheduler idc. - Idc string `protobuf:"bytes,5,opt,name=idc,proto3" json:"idc,omitempty"` - // Scheduler location. - Location string `protobuf:"bytes,6,opt,name=location,proto3" json:"location,omitempty"` - // Deprecated: Use net_topology instead. - NetConfig []byte `protobuf:"bytes,7,opt,name=net_config,json=netConfig,proto3" json:"net_config,omitempty"` - // Scheduler ip. - Ip string `protobuf:"bytes,8,opt,name=ip,proto3" json:"ip,omitempty"` - // Scheduler port. - Port int32 `protobuf:"varint,9,opt,name=port,proto3" json:"port,omitempty"` - // Scheduler network topology. - NetTopology string `protobuf:"bytes,10,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"` -} - -func (x *UpdateSchedulerRequest) Reset() { - *x = UpdateSchedulerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateSchedulerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateSchedulerRequest) ProtoMessage() {} - -func (x *UpdateSchedulerRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateSchedulerRequest.ProtoReflect.Descriptor instead. -func (*UpdateSchedulerRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{8} -} - -func (x *UpdateSchedulerRequest) GetSourceType() SourceType { - if x != nil { - return x.SourceType - } - return SourceType_SCHEDULER_SOURCE -} - -func (x *UpdateSchedulerRequest) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *UpdateSchedulerRequest) GetSchedulerClusterId() uint64 { - if x != nil { - return x.SchedulerClusterId - } - return 0 -} - -func (x *UpdateSchedulerRequest) GetVips() string { - if x != nil { - return x.Vips - } - return "" -} - -func (x *UpdateSchedulerRequest) GetIdc() string { - if x != nil { - return x.Idc - } - return "" -} - -func (x *UpdateSchedulerRequest) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *UpdateSchedulerRequest) GetNetConfig() []byte { - if x != nil { - return x.NetConfig - } - return nil -} - -func (x *UpdateSchedulerRequest) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -func (x *UpdateSchedulerRequest) GetPort() int32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *UpdateSchedulerRequest) GetNetTopology() string { - if x != nil { - return x.NetTopology - } - return "" -} - -// ListSchedulersRequest represents request of ListSchedulers. -type ListSchedulersRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request source type. - SourceType SourceType `protobuf:"varint,1,opt,name=source_type,json=sourceType,proto3,enum=manager.SourceType" json:"source_type,omitempty"` - // Source service hostname. - HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // Source service ip. - Ip string `protobuf:"bytes,3,opt,name=ip,proto3" json:"ip,omitempty"` - // Source service host information. - HostInfo map[string]string `protobuf:"bytes,5,rep,name=host_info,json=hostInfo,proto3" json:"host_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *ListSchedulersRequest) Reset() { - *x = ListSchedulersRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListSchedulersRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListSchedulersRequest) ProtoMessage() {} - -func (x *ListSchedulersRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListSchedulersRequest.ProtoReflect.Descriptor instead. -func (*ListSchedulersRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{9} -} - -func (x *ListSchedulersRequest) GetSourceType() SourceType { - if x != nil { - return x.SourceType - } - return SourceType_SCHEDULER_SOURCE -} - -func (x *ListSchedulersRequest) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *ListSchedulersRequest) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -func (x *ListSchedulersRequest) GetHostInfo() map[string]string { - if x != nil { - return x.HostInfo - } - return nil -} - -// ListSchedulersResponse represents response of ListSchedulers. -type ListSchedulersResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Schedulers to which the source service belongs. - Schedulers []*Scheduler `protobuf:"bytes,1,rep,name=schedulers,proto3" json:"schedulers,omitempty"` -} - -func (x *ListSchedulersResponse) Reset() { - *x = ListSchedulersResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListSchedulersResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListSchedulersResponse) ProtoMessage() {} - -func (x *ListSchedulersResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListSchedulersResponse.ProtoReflect.Descriptor instead. -func (*ListSchedulersResponse) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{10} -} - -func (x *ListSchedulersResponse) GetSchedulers() []*Scheduler { - if x != nil { - return x.Schedulers - } - return nil -} - -// ObjectStorage represents config of object storage. -type ObjectStorage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Object storage name of type. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Storage region. - Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"` - // Datacenter endpoint. - Endpoint string `protobuf:"bytes,3,opt,name=endpoint,proto3" json:"endpoint,omitempty"` - // Access key id. - AccessKey string `protobuf:"bytes,4,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"` - // Access key secret. - SecretKey string `protobuf:"bytes,5,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"` -} - -func (x *ObjectStorage) Reset() { - *x = ObjectStorage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ObjectStorage) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ObjectStorage) ProtoMessage() {} - -func (x *ObjectStorage) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ObjectStorage.ProtoReflect.Descriptor instead. -func (*ObjectStorage) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{11} -} - -func (x *ObjectStorage) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ObjectStorage) GetRegion() string { - if x != nil { - return x.Region - } - return "" -} - -func (x *ObjectStorage) GetEndpoint() string { - if x != nil { - return x.Endpoint - } - return "" -} - -func (x *ObjectStorage) GetAccessKey() string { - if x != nil { - return x.AccessKey - } - return "" -} - -func (x *ObjectStorage) GetSecretKey() string { - if x != nil { - return x.SecretKey - } - return "" -} - -// GetObjectStorageRequest represents request of GetObjectStorage. -type GetObjectStorageRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request source type. - SourceType SourceType `protobuf:"varint,1,opt,name=source_type,json=sourceType,proto3,enum=manager.SourceType" json:"source_type,omitempty"` - // Source service hostname. - HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // Source service ip. - Ip string `protobuf:"bytes,3,opt,name=ip,proto3" json:"ip,omitempty"` -} - -func (x *GetObjectStorageRequest) Reset() { - *x = GetObjectStorageRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetObjectStorageRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetObjectStorageRequest) ProtoMessage() {} - -func (x *GetObjectStorageRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetObjectStorageRequest.ProtoReflect.Descriptor instead. -func (*GetObjectStorageRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{12} -} - -func (x *GetObjectStorageRequest) GetSourceType() SourceType { - if x != nil { - return x.SourceType - } - return SourceType_SCHEDULER_SOURCE -} - -func (x *GetObjectStorageRequest) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *GetObjectStorageRequest) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -// Bucket represents config of bucket. -type Bucket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Bucket name. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *Bucket) Reset() { - *x = Bucket{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Bucket) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Bucket) ProtoMessage() {} - -func (x *Bucket) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. -func (*Bucket) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{13} -} - -func (x *Bucket) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// ListSchedulersRequest represents request of ListBuckets. -type ListBucketsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request source type. - SourceType SourceType `protobuf:"varint,1,opt,name=source_type,json=sourceType,proto3,enum=manager.SourceType" json:"source_type,omitempty"` - // Source service hostname. - HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // Source service ip. - Ip string `protobuf:"bytes,3,opt,name=ip,proto3" json:"ip,omitempty"` -} - -func (x *ListBucketsRequest) Reset() { - *x = ListBucketsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListBucketsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListBucketsRequest) ProtoMessage() {} - -func (x *ListBucketsRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListBucketsRequest.ProtoReflect.Descriptor instead. -func (*ListBucketsRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{14} -} - -func (x *ListBucketsRequest) GetSourceType() SourceType { - if x != nil { - return x.SourceType - } - return SourceType_SCHEDULER_SOURCE -} - -func (x *ListBucketsRequest) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *ListBucketsRequest) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -// ListBucketsResponse represents response of ListBuckets. -type ListBucketsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Bucket configs. - Buckets []*Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"` -} - -func (x *ListBucketsResponse) Reset() { - *x = ListBucketsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListBucketsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListBucketsResponse) ProtoMessage() {} - -func (x *ListBucketsResponse) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListBucketsResponse.ProtoReflect.Descriptor instead. -func (*ListBucketsResponse) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{15} -} - -func (x *ListBucketsResponse) GetBuckets() []*Bucket { - if x != nil { - return x.Buckets - } - return nil -} - -// KeepAliveRequest represents request of KeepAlive. -type KeepAliveRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Request source type. - SourceType SourceType `protobuf:"varint,1,opt,name=source_type,json=sourceType,proto3,enum=manager.SourceType" json:"source_type,omitempty"` - // Source service hostname. - HostName string `protobuf:"bytes,2,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // ID of the cluster to which the source service belongs. - ClusterId uint64 `protobuf:"varint,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - // Source service ip. - Ip string `protobuf:"bytes,4,opt,name=ip,proto3" json:"ip,omitempty"` -} - -func (x *KeepAliveRequest) Reset() { - *x = KeepAliveRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *KeepAliveRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*KeepAliveRequest) ProtoMessage() {} - -func (x *KeepAliveRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_manager_manager_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use KeepAliveRequest.ProtoReflect.Descriptor instead. -func (*KeepAliveRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_manager_manager_proto_rawDescGZIP(), []int{16} -} - -func (x *KeepAliveRequest) GetSourceType() SourceType { - if x != nil { - return x.SourceType - } - return SourceType_SCHEDULER_SOURCE -} - -func (x *KeepAliveRequest) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *KeepAliveRequest) GetClusterId() uint64 { - if x != nil { - return x.ClusterId - } - return 0 -} - -func (x *KeepAliveRequest) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -var File_pkg_rpc_manager_manager_proto protoreflect.FileDescriptor - -var file_pkg_rpc_manager_manager_proto_rawDesc = []byte{ - 0x0a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x07, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x80, - 0x01, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x62, 0x69, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x62, 0x69, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x21, - 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x44, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x22, 0xb6, 0x01, 0x0a, 0x0f, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x62, 0x69, 0x6f, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x62, 0x69, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x73, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x0d, 0x73, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0xd6, 0x03, 0x0a, 0x08, 0x53, - 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, - 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x12, 0x1a, 0x0a, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, - 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x23, 0x0a, - 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, - 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x65, 0x65, 0x64, - 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x73, 0x65, 0x65, - 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, - 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0f, - 0x73, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, - 0x32, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x11, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, - 0x6f, 0x72, 0x74, 0x22, 0xd0, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x65, 0x65, 0x64, 0x50, - 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x68, 0x6f, - 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x68, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x38, 0x0a, 0x14, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x01, 0x52, 0x11, 0x73, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, - 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x02, 0x69, 0x70, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x70, 0x01, 0xd0, - 0x01, 0x01, 0x52, 0x02, 0x69, 0x70, 0x22, 0x91, 0x04, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, - 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x24, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x68, 0x01, 0x52, 0x08, 0x68, 0x6f, - 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x1a, 0xfa, 0x42, 0x17, 0x72, 0x15, 0x52, 0x05, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x52, 0x06, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, - 0x01, 0x01, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x30, 0x0a, 0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, - 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, - 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x0b, 0x6e, 0x65, - 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x12, 0x27, 0x0a, 0x08, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, - 0x72, 0x06, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12, 0x20, 0x0a, 0x04, 0x70, - 0x6f, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, - 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x31, 0x0a, - 0x0d, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, - 0x80, 0x08, 0x52, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x72, 0x74, - 0x12, 0x38, 0x0a, 0x14, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x01, 0x52, 0x11, 0x73, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, - 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3e, 0x0a, 0x13, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x6f, 0x72, - 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x1a, 0x09, 0x10, 0xff, - 0xff, 0x03, 0x28, 0x80, 0x08, 0x40, 0x01, 0x52, 0x11, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xdc, 0x01, 0x0a, 0x10, 0x53, - 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x62, 0x69, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x62, 0x69, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, - 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x73, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x0d, 0x73, 0x65, 0x63, 0x75, - 0x72, 0x69, 0x74, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0xa2, 0x03, 0x0a, 0x09, 0x53, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x76, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x76, 0x69, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x30, 0x0a, 0x14, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x73, - 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x46, 0x0a, 0x11, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x5f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x0a, 0x73, 0x65, 0x65, - 0x64, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, - 0x52, 0x09, 0x73, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6e, - 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x22, 0xd2, - 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, - 0x68, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x14, - 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, - 0x02, 0x28, 0x01, 0x52, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x70, 0x01, 0xd0, 0x01, 0x01, 0x52, - 0x02, 0x69, 0x70, 0x22, 0xbf, 0x03, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, - 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, - 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x68, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x14, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x72, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28, 0x01, 0x52, 0x12, 0x73, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x21, 0x0a, 0x04, 0x76, 0x69, 0x70, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, - 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x04, 0x76, 0x69, - 0x70, 0x73, 0x12, 0x1f, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x03, - 0x69, 0x64, 0x63, 0x12, 0x29, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, - 0x08, 0xd0, 0x01, 0x01, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, - 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0c, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x7a, 0x04, 0x10, 0x01, 0x70, 0x01, 0x52, 0x09, 0x6e, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, - 0x70, 0x12, 0x20, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x42, - 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x04, 0x70, - 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x0c, 0x6e, 0x65, 0x74, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, - 0x6f, 0x67, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, - 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, - 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x22, 0xa8, 0x02, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3e, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, - 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x24, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x68, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, - 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12, 0x53, - 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, - 0x08, 0xfa, 0x42, 0x05, 0x9a, 0x01, 0x02, 0x30, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x1a, 0x3b, 0x0a, 0x0d, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x4c, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x0a, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x72, 0x52, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x22, 0xdd, - 0x01, 0x0a, 0x0d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x12, 0x1e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, - 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x25, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, - 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, - 0x10, 0x01, 0x18, 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, - 0x80, 0x08, 0xd0, 0x01, 0x01, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, - 0x12, 0x2c, 0x0a, 0x0a, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0x18, 0x80, 0x08, - 0xd0, 0x01, 0x01, 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x22, 0x98, - 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x68, 0x6f, - 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x68, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x22, 0x28, 0x0a, 0x06, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, 0x05, 0x10, 0x01, 0x18, 0x80, 0x08, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x09, 0x68, 0x6f, - 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x68, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x22, 0x40, 0x0a, 0x13, 0x4c, 0x69, 0x73, - 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x29, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x10, - 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, - 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x24, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x68, 0x01, 0x52, 0x08, 0x68, 0x6f, - 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, - 0x02, 0x28, 0x01, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, - 0x0a, 0x02, 0x69, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x72, - 0x05, 0x70, 0x01, 0xd0, 0x01, 0x01, 0x52, 0x02, 0x69, 0x70, 0x2a, 0x49, 0x0a, 0x0a, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x43, 0x48, 0x45, - 0x44, 0x55, 0x4c, 0x45, 0x52, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x00, 0x12, 0x0f, - 0x0a, 0x0b, 0x50, 0x45, 0x45, 0x52, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x01, 0x12, - 0x14, 0x0a, 0x10, 0x53, 0x45, 0x45, 0x44, 0x5f, 0x50, 0x45, 0x45, 0x52, 0x5f, 0x53, 0x4f, 0x55, - 0x52, 0x43, 0x45, 0x10, 0x02, 0x32, 0xc4, 0x04, 0x0a, 0x07, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x12, 0x3d, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, - 0x12, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, - 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, - 0x12, 0x43, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, - 0x65, 0x72, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x53, 0x65, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x65, - 0x64, 0x50, 0x65, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, - 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0f, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, - 0x51, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, - 0x73, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x12, 0x48, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, - 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x09, 0x4b, 0x65, - 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x19, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x28, 0x01, 0x42, 0x25, 0x5a, 0x23, - 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x64, 0x72, 0x61, 0x67, 0x6f, 0x6e, 0x66, 0x6c, 0x79, - 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pkg_rpc_manager_manager_proto_rawDescOnce sync.Once - file_pkg_rpc_manager_manager_proto_rawDescData = file_pkg_rpc_manager_manager_proto_rawDesc -) - -func file_pkg_rpc_manager_manager_proto_rawDescGZIP() []byte { - file_pkg_rpc_manager_manager_proto_rawDescOnce.Do(func() { - file_pkg_rpc_manager_manager_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_rpc_manager_manager_proto_rawDescData) - }) - return file_pkg_rpc_manager_manager_proto_rawDescData -} - -var file_pkg_rpc_manager_manager_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_pkg_rpc_manager_manager_proto_msgTypes = make([]protoimpl.MessageInfo, 18) -var file_pkg_rpc_manager_manager_proto_goTypes = []interface{}{ - (SourceType)(0), // 0: manager.SourceType - (*SecurityGroup)(nil), // 1: manager.SecurityGroup - (*SeedPeerCluster)(nil), // 2: manager.SeedPeerCluster - (*SeedPeer)(nil), // 3: manager.SeedPeer - (*GetSeedPeerRequest)(nil), // 4: manager.GetSeedPeerRequest - (*UpdateSeedPeerRequest)(nil), // 5: manager.UpdateSeedPeerRequest - (*SchedulerCluster)(nil), // 6: manager.SchedulerCluster - (*Scheduler)(nil), // 7: manager.Scheduler - (*GetSchedulerRequest)(nil), // 8: manager.GetSchedulerRequest - (*UpdateSchedulerRequest)(nil), // 9: manager.UpdateSchedulerRequest - (*ListSchedulersRequest)(nil), // 10: manager.ListSchedulersRequest - (*ListSchedulersResponse)(nil), // 11: manager.ListSchedulersResponse - (*ObjectStorage)(nil), // 12: manager.ObjectStorage - (*GetObjectStorageRequest)(nil), // 13: manager.GetObjectStorageRequest - (*Bucket)(nil), // 14: manager.Bucket - (*ListBucketsRequest)(nil), // 15: manager.ListBucketsRequest - (*ListBucketsResponse)(nil), // 16: manager.ListBucketsResponse - (*KeepAliveRequest)(nil), // 17: manager.KeepAliveRequest - nil, // 18: manager.ListSchedulersRequest.HostInfoEntry - (*emptypb.Empty)(nil), // 19: google.protobuf.Empty -} -var file_pkg_rpc_manager_manager_proto_depIdxs = []int32{ - 1, // 0: manager.SeedPeerCluster.security_group:type_name -> manager.SecurityGroup - 2, // 1: manager.SeedPeer.seed_peer_cluster:type_name -> manager.SeedPeerCluster - 7, // 2: manager.SeedPeer.schedulers:type_name -> manager.Scheduler - 0, // 3: manager.GetSeedPeerRequest.source_type:type_name -> manager.SourceType - 0, // 4: manager.UpdateSeedPeerRequest.source_type:type_name -> manager.SourceType - 1, // 5: manager.SchedulerCluster.security_group:type_name -> manager.SecurityGroup - 6, // 6: manager.Scheduler.scheduler_cluster:type_name -> manager.SchedulerCluster - 3, // 7: manager.Scheduler.seed_peers:type_name -> manager.SeedPeer - 0, // 8: manager.GetSchedulerRequest.source_type:type_name -> manager.SourceType - 0, // 9: manager.UpdateSchedulerRequest.source_type:type_name -> manager.SourceType - 0, // 10: manager.ListSchedulersRequest.source_type:type_name -> manager.SourceType - 18, // 11: manager.ListSchedulersRequest.host_info:type_name -> manager.ListSchedulersRequest.HostInfoEntry - 7, // 12: manager.ListSchedulersResponse.schedulers:type_name -> manager.Scheduler - 0, // 13: manager.GetObjectStorageRequest.source_type:type_name -> manager.SourceType - 0, // 14: manager.ListBucketsRequest.source_type:type_name -> manager.SourceType - 14, // 15: manager.ListBucketsResponse.buckets:type_name -> manager.Bucket - 0, // 16: manager.KeepAliveRequest.source_type:type_name -> manager.SourceType - 4, // 17: manager.Manager.GetSeedPeer:input_type -> manager.GetSeedPeerRequest - 5, // 18: manager.Manager.UpdateSeedPeer:input_type -> manager.UpdateSeedPeerRequest - 8, // 19: manager.Manager.GetScheduler:input_type -> manager.GetSchedulerRequest - 9, // 20: manager.Manager.UpdateScheduler:input_type -> manager.UpdateSchedulerRequest - 10, // 21: manager.Manager.ListSchedulers:input_type -> manager.ListSchedulersRequest - 13, // 22: manager.Manager.GetObjectStorage:input_type -> manager.GetObjectStorageRequest - 15, // 23: manager.Manager.ListBuckets:input_type -> manager.ListBucketsRequest - 17, // 24: manager.Manager.KeepAlive:input_type -> manager.KeepAliveRequest - 3, // 25: manager.Manager.GetSeedPeer:output_type -> manager.SeedPeer - 3, // 26: manager.Manager.UpdateSeedPeer:output_type -> manager.SeedPeer - 7, // 27: manager.Manager.GetScheduler:output_type -> manager.Scheduler - 7, // 28: manager.Manager.UpdateScheduler:output_type -> manager.Scheduler - 11, // 29: manager.Manager.ListSchedulers:output_type -> manager.ListSchedulersResponse - 12, // 30: manager.Manager.GetObjectStorage:output_type -> manager.ObjectStorage - 16, // 31: manager.Manager.ListBuckets:output_type -> manager.ListBucketsResponse - 19, // 32: manager.Manager.KeepAlive:output_type -> google.protobuf.Empty - 25, // [25:33] is the sub-list for method output_type - 17, // [17:25] is the sub-list for method input_type - 17, // [17:17] is the sub-list for extension type_name - 17, // [17:17] is the sub-list for extension extendee - 0, // [0:17] is the sub-list for field type_name -} - -func init() { file_pkg_rpc_manager_manager_proto_init() } -func file_pkg_rpc_manager_manager_proto_init() { - if File_pkg_rpc_manager_manager_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_pkg_rpc_manager_manager_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityGroup); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SeedPeerCluster); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SeedPeer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSeedPeerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateSeedPeerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchedulerCluster); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Scheduler); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSchedulerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateSchedulerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSchedulersRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSchedulersResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectStorage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetObjectStorageRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBucketsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBucketsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_manager_manager_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*KeepAliveRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_rpc_manager_manager_proto_rawDesc, - NumEnums: 1, - NumMessages: 18, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_pkg_rpc_manager_manager_proto_goTypes, - DependencyIndexes: file_pkg_rpc_manager_manager_proto_depIdxs, - EnumInfos: file_pkg_rpc_manager_manager_proto_enumTypes, - MessageInfos: file_pkg_rpc_manager_manager_proto_msgTypes, - }.Build() - File_pkg_rpc_manager_manager_proto = out.File - file_pkg_rpc_manager_manager_proto_rawDesc = nil - file_pkg_rpc_manager_manager_proto_goTypes = nil - file_pkg_rpc_manager_manager_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// ManagerClient is the client API for Manager service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ManagerClient interface { - // Get SeedPeer and SeedPeer cluster configuration. - GetSeedPeer(ctx context.Context, in *GetSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error) - // Update SeedPeer configuration. - UpdateSeedPeer(ctx context.Context, in *UpdateSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error) - // Get Scheduler and Scheduler cluster configuration. - GetScheduler(ctx context.Context, in *GetSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error) - // Update scheduler configuration. - UpdateScheduler(ctx context.Context, in *UpdateSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error) - // List acitve schedulers configuration. - ListSchedulers(ctx context.Context, in *ListSchedulersRequest, opts ...grpc.CallOption) (*ListSchedulersResponse, error) - // Get ObjectStorage configuration. - GetObjectStorage(ctx context.Context, in *GetObjectStorageRequest, opts ...grpc.CallOption) (*ObjectStorage, error) - // List buckets configuration. - ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) - // KeepAlive with manager. - KeepAlive(ctx context.Context, opts ...grpc.CallOption) (Manager_KeepAliveClient, error) -} - -type managerClient struct { - cc grpc.ClientConnInterface -} - -func NewManagerClient(cc grpc.ClientConnInterface) ManagerClient { - return &managerClient{cc} -} - -func (c *managerClient) GetSeedPeer(ctx context.Context, in *GetSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error) { - out := new(SeedPeer) - err := c.cc.Invoke(ctx, "/manager.Manager/GetSeedPeer", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managerClient) UpdateSeedPeer(ctx context.Context, in *UpdateSeedPeerRequest, opts ...grpc.CallOption) (*SeedPeer, error) { - out := new(SeedPeer) - err := c.cc.Invoke(ctx, "/manager.Manager/UpdateSeedPeer", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managerClient) GetScheduler(ctx context.Context, in *GetSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error) { - out := new(Scheduler) - err := c.cc.Invoke(ctx, "/manager.Manager/GetScheduler", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managerClient) UpdateScheduler(ctx context.Context, in *UpdateSchedulerRequest, opts ...grpc.CallOption) (*Scheduler, error) { - out := new(Scheduler) - err := c.cc.Invoke(ctx, "/manager.Manager/UpdateScheduler", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managerClient) ListSchedulers(ctx context.Context, in *ListSchedulersRequest, opts ...grpc.CallOption) (*ListSchedulersResponse, error) { - out := new(ListSchedulersResponse) - err := c.cc.Invoke(ctx, "/manager.Manager/ListSchedulers", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managerClient) GetObjectStorage(ctx context.Context, in *GetObjectStorageRequest, opts ...grpc.CallOption) (*ObjectStorage, error) { - out := new(ObjectStorage) - err := c.cc.Invoke(ctx, "/manager.Manager/GetObjectStorage", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managerClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) { - out := new(ListBucketsResponse) - err := c.cc.Invoke(ctx, "/manager.Manager/ListBuckets", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *managerClient) KeepAlive(ctx context.Context, opts ...grpc.CallOption) (Manager_KeepAliveClient, error) { - stream, err := c.cc.NewStream(ctx, &_Manager_serviceDesc.Streams[0], "/manager.Manager/KeepAlive", opts...) - if err != nil { - return nil, err - } - x := &managerKeepAliveClient{stream} - return x, nil -} - -type Manager_KeepAliveClient interface { - Send(*KeepAliveRequest) error - CloseAndRecv() (*emptypb.Empty, error) - grpc.ClientStream -} - -type managerKeepAliveClient struct { - grpc.ClientStream -} - -func (x *managerKeepAliveClient) Send(m *KeepAliveRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *managerKeepAliveClient) CloseAndRecv() (*emptypb.Empty, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(emptypb.Empty) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ManagerServer is the server API for Manager service. -type ManagerServer interface { - // Get SeedPeer and SeedPeer cluster configuration. - GetSeedPeer(context.Context, *GetSeedPeerRequest) (*SeedPeer, error) - // Update SeedPeer configuration. - UpdateSeedPeer(context.Context, *UpdateSeedPeerRequest) (*SeedPeer, error) - // Get Scheduler and Scheduler cluster configuration. - GetScheduler(context.Context, *GetSchedulerRequest) (*Scheduler, error) - // Update scheduler configuration. - UpdateScheduler(context.Context, *UpdateSchedulerRequest) (*Scheduler, error) - // List acitve schedulers configuration. - ListSchedulers(context.Context, *ListSchedulersRequest) (*ListSchedulersResponse, error) - // Get ObjectStorage configuration. - GetObjectStorage(context.Context, *GetObjectStorageRequest) (*ObjectStorage, error) - // List buckets configuration. - ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) - // KeepAlive with manager. - KeepAlive(Manager_KeepAliveServer) error -} - -// UnimplementedManagerServer can be embedded to have forward compatible implementations. -type UnimplementedManagerServer struct { -} - -func (*UnimplementedManagerServer) GetSeedPeer(context.Context, *GetSeedPeerRequest) (*SeedPeer, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSeedPeer not implemented") -} -func (*UnimplementedManagerServer) UpdateSeedPeer(context.Context, *UpdateSeedPeerRequest) (*SeedPeer, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateSeedPeer not implemented") -} -func (*UnimplementedManagerServer) GetScheduler(context.Context, *GetSchedulerRequest) (*Scheduler, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetScheduler not implemented") -} -func (*UnimplementedManagerServer) UpdateScheduler(context.Context, *UpdateSchedulerRequest) (*Scheduler, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateScheduler not implemented") -} -func (*UnimplementedManagerServer) ListSchedulers(context.Context, *ListSchedulersRequest) (*ListSchedulersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListSchedulers not implemented") -} -func (*UnimplementedManagerServer) GetObjectStorage(context.Context, *GetObjectStorageRequest) (*ObjectStorage, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetObjectStorage not implemented") -} -func (*UnimplementedManagerServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented") -} -func (*UnimplementedManagerServer) KeepAlive(Manager_KeepAliveServer) error { - return status.Errorf(codes.Unimplemented, "method KeepAlive not implemented") -} - -func RegisterManagerServer(s *grpc.Server, srv ManagerServer) { - s.RegisterService(&_Manager_serviceDesc, srv) -} - -func _Manager_GetSeedPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSeedPeerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagerServer).GetSeedPeer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/manager.Manager/GetSeedPeer", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagerServer).GetSeedPeer(ctx, req.(*GetSeedPeerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Manager_UpdateSeedPeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateSeedPeerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagerServer).UpdateSeedPeer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/manager.Manager/UpdateSeedPeer", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagerServer).UpdateSeedPeer(ctx, req.(*UpdateSeedPeerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Manager_GetScheduler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSchedulerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagerServer).GetScheduler(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/manager.Manager/GetScheduler", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagerServer).GetScheduler(ctx, req.(*GetSchedulerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Manager_UpdateScheduler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateSchedulerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagerServer).UpdateScheduler(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/manager.Manager/UpdateScheduler", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagerServer).UpdateScheduler(ctx, req.(*UpdateSchedulerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Manager_ListSchedulers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListSchedulersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagerServer).ListSchedulers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/manager.Manager/ListSchedulers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagerServer).ListSchedulers(ctx, req.(*ListSchedulersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Manager_GetObjectStorage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetObjectStorageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagerServer).GetObjectStorage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/manager.Manager/GetObjectStorage", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagerServer).GetObjectStorage(ctx, req.(*GetObjectStorageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Manager_ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListBucketsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ManagerServer).ListBuckets(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/manager.Manager/ListBuckets", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ManagerServer).ListBuckets(ctx, req.(*ListBucketsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Manager_KeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ManagerServer).KeepAlive(&managerKeepAliveServer{stream}) -} - -type Manager_KeepAliveServer interface { - SendAndClose(*emptypb.Empty) error - Recv() (*KeepAliveRequest, error) - grpc.ServerStream -} - -type managerKeepAliveServer struct { - grpc.ServerStream -} - -func (x *managerKeepAliveServer) SendAndClose(m *emptypb.Empty) error { - return x.ServerStream.SendMsg(m) -} - -func (x *managerKeepAliveServer) Recv() (*KeepAliveRequest, error) { - m := new(KeepAliveRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _Manager_serviceDesc = grpc.ServiceDesc{ - ServiceName: "manager.Manager", - HandlerType: (*ManagerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetSeedPeer", - Handler: _Manager_GetSeedPeer_Handler, - }, - { - MethodName: "UpdateSeedPeer", - Handler: _Manager_UpdateSeedPeer_Handler, - }, - { - MethodName: "GetScheduler", - Handler: _Manager_GetScheduler_Handler, - }, - { - MethodName: "UpdateScheduler", - Handler: _Manager_UpdateScheduler_Handler, - }, - { - MethodName: "ListSchedulers", - Handler: _Manager_ListSchedulers_Handler, - }, - { - MethodName: "GetObjectStorage", - Handler: _Manager_GetObjectStorage_Handler, - }, - { - MethodName: "ListBuckets", - Handler: _Manager_ListBuckets_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "KeepAlive", - Handler: _Manager_KeepAlive_Handler, - ClientStreams: true, - }, - }, - Metadata: "pkg/rpc/manager/manager.proto", -} diff --git a/pkg/rpc/manager/manager.pb.validate.go b/pkg/rpc/manager/manager.pb.validate.go deleted file mode 100644 index b3aea8b9f..000000000 --- a/pkg/rpc/manager/manager.pb.validate.go +++ /dev/null @@ -1,1990 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: pkg/rpc/manager/manager.proto - -package manager - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} -) - -// Validate checks the field values on SecurityGroup with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. -func (m *SecurityGroup) Validate() error { - if m == nil { - return nil - } - - // no validation rules for Id - - // no validation rules for Name - - // no validation rules for Bio - - // no validation rules for Domain - - // no validation rules for ProxyDomain - - return nil -} - -// SecurityGroupValidationError is the validation error returned by -// SecurityGroup.Validate if the designated constraints aren't met. -type SecurityGroupValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SecurityGroupValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SecurityGroupValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SecurityGroupValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SecurityGroupValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SecurityGroupValidationError) ErrorName() string { return "SecurityGroupValidationError" } - -// Error satisfies the builtin error interface -func (e SecurityGroupValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSecurityGroup.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SecurityGroupValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SecurityGroupValidationError{} - -// Validate checks the field values on SeedPeerCluster with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. -func (m *SeedPeerCluster) Validate() error { - if m == nil { - return nil - } - - // no validation rules for Id - - // no validation rules for Name - - // no validation rules for Bio - - // no validation rules for Config - - // no validation rules for Scopes - - if v, ok := interface{}(m.GetSecurityGroup()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SeedPeerClusterValidationError{ - field: "SecurityGroup", - reason: "embedded message failed validation", - cause: err, - } - } - } - - return nil -} - -// SeedPeerClusterValidationError is the validation error returned by -// SeedPeerCluster.Validate if the designated constraints aren't met. -type SeedPeerClusterValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SeedPeerClusterValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SeedPeerClusterValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SeedPeerClusterValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SeedPeerClusterValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SeedPeerClusterValidationError) ErrorName() string { return "SeedPeerClusterValidationError" } - -// Error satisfies the builtin error interface -func (e SeedPeerClusterValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSeedPeerCluster.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SeedPeerClusterValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SeedPeerClusterValidationError{} - -// Validate checks the field values on SeedPeer with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *SeedPeer) Validate() error { - if m == nil { - return nil - } - - // no validation rules for Id - - // no validation rules for HostName - - // no validation rules for Type - - // no validation rules for Idc - - // no validation rules for NetTopology - - // no validation rules for Location - - // no validation rules for Ip - - // no validation rules for Port - - // no validation rules for DownloadPort - - // no validation rules for State - - // no validation rules for SeedPeerClusterId - - if v, ok := interface{}(m.GetSeedPeerCluster()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SeedPeerValidationError{ - field: "SeedPeerCluster", - reason: "embedded message failed validation", - cause: err, - } - } - } - - for idx, item := range m.GetSchedulers() { - _, _ = idx, item - - if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SeedPeerValidationError{ - field: fmt.Sprintf("Schedulers[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - // no validation rules for ObjectStoragePort - - return nil -} - -// SeedPeerValidationError is the validation error returned by -// SeedPeer.Validate if the designated constraints aren't met. -type SeedPeerValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SeedPeerValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SeedPeerValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SeedPeerValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SeedPeerValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SeedPeerValidationError) ErrorName() string { return "SeedPeerValidationError" } - -// Error satisfies the builtin error interface -func (e SeedPeerValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSeedPeer.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SeedPeerValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SeedPeerValidationError{} - -// Validate checks the field values on GetSeedPeerRequest with the rules -// defined in the proto definition for this message. If any rules are -// violated, an error is returned. -func (m *GetSeedPeerRequest) Validate() error { - if m == nil { - return nil - } - - if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok { - return GetSeedPeerRequestValidationError{ - field: "SourceType", - reason: "value must be one of the defined enum values", - } - } - - if err := m._validateHostname(m.GetHostName()); err != nil { - return GetSeedPeerRequestValidationError{ - field: "HostName", - reason: "value must be a valid hostname", - cause: err, - } - } - - if m.GetSeedPeerClusterId() < 1 { - return GetSeedPeerRequestValidationError{ - field: "SeedPeerClusterId", - reason: "value must be greater than or equal to 1", - } - } - - if m.GetIp() != "" { - - if ip := net.ParseIP(m.GetIp()); ip == nil { - return GetSeedPeerRequestValidationError{ - field: "Ip", - reason: "value must be a valid IP address", - } - } - - } - - return nil -} - -func (m *GetSeedPeerRequest) _validateHostname(host string) error { - s := strings.ToLower(strings.TrimSuffix(host, ".")) - - if len(host) > 253 { - return errors.New("hostname cannot exceed 253 characters") - } - - for _, part := range strings.Split(s, ".") { - if l := len(part); l == 0 || l > 63 { - return errors.New("hostname part must be non-empty and cannot exceed 63 characters") - } - - if part[0] == '-' { - return errors.New("hostname parts cannot begin with hyphens") - } - - if part[len(part)-1] == '-' { - return errors.New("hostname parts cannot end with hyphens") - } - - for _, r := range part { - if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' { - return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r)) - } - } - } - - return nil -} - -// GetSeedPeerRequestValidationError is the validation error returned by -// GetSeedPeerRequest.Validate if the designated constraints aren't met. -type GetSeedPeerRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GetSeedPeerRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GetSeedPeerRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GetSeedPeerRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GetSeedPeerRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GetSeedPeerRequestValidationError) ErrorName() string { - return "GetSeedPeerRequestValidationError" -} - -// Error satisfies the builtin error interface -func (e GetSeedPeerRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGetSeedPeerRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GetSeedPeerRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GetSeedPeerRequestValidationError{} - -// Validate checks the field values on UpdateSeedPeerRequest with the rules -// defined in the proto definition for this message. If any rules are -// violated, an error is returned. -func (m *UpdateSeedPeerRequest) Validate() error { - if m == nil { - return nil - } - - if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok { - return UpdateSeedPeerRequestValidationError{ - field: "SourceType", - reason: "value must be one of the defined enum values", - } - } - - if err := m._validateHostname(m.GetHostName()); err != nil { - return UpdateSeedPeerRequestValidationError{ - field: "HostName", - reason: "value must be a valid hostname", - cause: err, - } - } - - if _, ok := _UpdateSeedPeerRequest_Type_InLookup[m.GetType()]; !ok { - return UpdateSeedPeerRequestValidationError{ - field: "Type", - reason: "value must be in list [super strong weak]", - } - } - - if m.GetIdc() != "" { - - if l := utf8.RuneCountInString(m.GetIdc()); l < 1 || l > 1024 { - return UpdateSeedPeerRequestValidationError{ - field: "Idc", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - } - - if m.GetNetTopology() != "" { - - if l := utf8.RuneCountInString(m.GetNetTopology()); l < 1 || l > 1024 { - return UpdateSeedPeerRequestValidationError{ - field: "NetTopology", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - } - - if m.GetLocation() != "" { - - if utf8.RuneCountInString(m.GetLocation()) > 1024 { - return UpdateSeedPeerRequestValidationError{ - field: "Location", - reason: "value length must be at most 1024 runes", - } - } - - } - - if ip := net.ParseIP(m.GetIp()); ip == nil { - return UpdateSeedPeerRequestValidationError{ - field: "Ip", - reason: "value must be a valid IP address", - } - } - - if val := m.GetPort(); val < 1024 || val >= 65535 { - return UpdateSeedPeerRequestValidationError{ - field: "Port", - reason: "value must be inside range [1024, 65535)", - } - } - - if val := m.GetDownloadPort(); val < 1024 || val >= 65535 { - return UpdateSeedPeerRequestValidationError{ - field: "DownloadPort", - reason: "value must be inside range [1024, 65535)", - } - } - - if m.GetSeedPeerClusterId() < 1 { - return UpdateSeedPeerRequestValidationError{ - field: "SeedPeerClusterId", - reason: "value must be greater than or equal to 1", - } - } - - if m.GetObjectStoragePort() != 0 { - - if val := m.GetObjectStoragePort(); val < 1024 || val >= 65535 { - return UpdateSeedPeerRequestValidationError{ - field: "ObjectStoragePort", - reason: "value must be inside range [1024, 65535)", - } - } - - } - - return nil -} - -func (m *UpdateSeedPeerRequest) _validateHostname(host string) error { - s := strings.ToLower(strings.TrimSuffix(host, ".")) - - if len(host) > 253 { - return errors.New("hostname cannot exceed 253 characters") - } - - for _, part := range strings.Split(s, ".") { - if l := len(part); l == 0 || l > 63 { - return errors.New("hostname part must be non-empty and cannot exceed 63 characters") - } - - if part[0] == '-' { - return errors.New("hostname parts cannot begin with hyphens") - } - - if part[len(part)-1] == '-' { - return errors.New("hostname parts cannot end with hyphens") - } - - for _, r := range part { - if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' { - return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r)) - } - } - } - - return nil -} - -// UpdateSeedPeerRequestValidationError is the validation error returned by -// UpdateSeedPeerRequest.Validate if the designated constraints aren't met. -type UpdateSeedPeerRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e UpdateSeedPeerRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e UpdateSeedPeerRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e UpdateSeedPeerRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e UpdateSeedPeerRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e UpdateSeedPeerRequestValidationError) ErrorName() string { - return "UpdateSeedPeerRequestValidationError" -} - -// Error satisfies the builtin error interface -func (e UpdateSeedPeerRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sUpdateSeedPeerRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = UpdateSeedPeerRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = UpdateSeedPeerRequestValidationError{} - -var _UpdateSeedPeerRequest_Type_InLookup = map[string]struct{}{ - "super": {}, - "strong": {}, - "weak": {}, -} - -// Validate checks the field values on SchedulerCluster with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. -func (m *SchedulerCluster) Validate() error { - if m == nil { - return nil - } - - // no validation rules for Id - - // no validation rules for Name - - // no validation rules for Bio - - // no validation rules for Config - - // no validation rules for ClientConfig - - // no validation rules for Scopes - - if v, ok := interface{}(m.GetSecurityGroup()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SchedulerClusterValidationError{ - field: "SecurityGroup", - reason: "embedded message failed validation", - cause: err, - } - } - } - - return nil -} - -// SchedulerClusterValidationError is the validation error returned by -// SchedulerCluster.Validate if the designated constraints aren't met. -type SchedulerClusterValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SchedulerClusterValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SchedulerClusterValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SchedulerClusterValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SchedulerClusterValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SchedulerClusterValidationError) ErrorName() string { return "SchedulerClusterValidationError" } - -// Error satisfies the builtin error interface -func (e SchedulerClusterValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSchedulerCluster.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SchedulerClusterValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SchedulerClusterValidationError{} - -// Validate checks the field values on Scheduler with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *Scheduler) Validate() error { - if m == nil { - return nil - } - - // no validation rules for Id - - // no validation rules for HostName - - // no validation rules for Vips - - // no validation rules for Idc - - // no validation rules for Location - - // no validation rules for NetConfig - - // no validation rules for Ip - - // no validation rules for Port - - // no validation rules for State - - // no validation rules for SchedulerClusterId - - if v, ok := interface{}(m.GetSchedulerCluster()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SchedulerValidationError{ - field: "SchedulerCluster", - reason: "embedded message failed validation", - cause: err, - } - } - } - - for idx, item := range m.GetSeedPeers() { - _, _ = idx, item - - if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SchedulerValidationError{ - field: fmt.Sprintf("SeedPeers[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - // no validation rules for NetTopology - - return nil -} - -// SchedulerValidationError is the validation error returned by -// Scheduler.Validate if the designated constraints aren't met. -type SchedulerValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SchedulerValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SchedulerValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SchedulerValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SchedulerValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SchedulerValidationError) ErrorName() string { return "SchedulerValidationError" } - -// Error satisfies the builtin error interface -func (e SchedulerValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sScheduler.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SchedulerValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SchedulerValidationError{} - -// Validate checks the field values on GetSchedulerRequest with the rules -// defined in the proto definition for this message. If any rules are -// violated, an error is returned. -func (m *GetSchedulerRequest) Validate() error { - if m == nil { - return nil - } - - if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok { - return GetSchedulerRequestValidationError{ - field: "SourceType", - reason: "value must be one of the defined enum values", - } - } - - if err := m._validateHostname(m.GetHostName()); err != nil { - return GetSchedulerRequestValidationError{ - field: "HostName", - reason: "value must be a valid hostname", - cause: err, - } - } - - if m.GetSchedulerClusterId() < 1 { - return GetSchedulerRequestValidationError{ - field: "SchedulerClusterId", - reason: "value must be greater than or equal to 1", - } - } - - if m.GetIp() != "" { - - if ip := net.ParseIP(m.GetIp()); ip == nil { - return GetSchedulerRequestValidationError{ - field: "Ip", - reason: "value must be a valid IP address", - } - } - - } - - return nil -} - -func (m *GetSchedulerRequest) _validateHostname(host string) error { - s := strings.ToLower(strings.TrimSuffix(host, ".")) - - if len(host) > 253 { - return errors.New("hostname cannot exceed 253 characters") - } - - for _, part := range strings.Split(s, ".") { - if l := len(part); l == 0 || l > 63 { - return errors.New("hostname part must be non-empty and cannot exceed 63 characters") - } - - if part[0] == '-' { - return errors.New("hostname parts cannot begin with hyphens") - } - - if part[len(part)-1] == '-' { - return errors.New("hostname parts cannot end with hyphens") - } - - for _, r := range part { - if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' { - return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r)) - } - } - } - - return nil -} - -// GetSchedulerRequestValidationError is the validation error returned by -// GetSchedulerRequest.Validate if the designated constraints aren't met. -type GetSchedulerRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GetSchedulerRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GetSchedulerRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GetSchedulerRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GetSchedulerRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GetSchedulerRequestValidationError) ErrorName() string { - return "GetSchedulerRequestValidationError" -} - -// Error satisfies the builtin error interface -func (e GetSchedulerRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGetSchedulerRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GetSchedulerRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GetSchedulerRequestValidationError{} - -// Validate checks the field values on UpdateSchedulerRequest with the rules -// defined in the proto definition for this message. If any rules are -// violated, an error is returned. -func (m *UpdateSchedulerRequest) Validate() error { - if m == nil { - return nil - } - - if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok { - return UpdateSchedulerRequestValidationError{ - field: "SourceType", - reason: "value must be one of the defined enum values", - } - } - - if err := m._validateHostname(m.GetHostName()); err != nil { - return UpdateSchedulerRequestValidationError{ - field: "HostName", - reason: "value must be a valid hostname", - cause: err, - } - } - - if m.GetSchedulerClusterId() < 1 { - return UpdateSchedulerRequestValidationError{ - field: "SchedulerClusterId", - reason: "value must be greater than or equal to 1", - } - } - - if m.GetVips() != "" { - - if l := utf8.RuneCountInString(m.GetVips()); l < 1 || l > 1024 { - return UpdateSchedulerRequestValidationError{ - field: "Vips", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - } - - if m.GetIdc() != "" { - - if l := utf8.RuneCountInString(m.GetIdc()); l < 1 || l > 1024 { - return UpdateSchedulerRequestValidationError{ - field: "Idc", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - } - - if m.GetLocation() != "" { - - if l := utf8.RuneCountInString(m.GetLocation()); l < 1 || l > 1024 { - return UpdateSchedulerRequestValidationError{ - field: "Location", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - } - - if len(m.GetNetConfig()) > 0 { - - if len(m.GetNetConfig()) < 1 { - return UpdateSchedulerRequestValidationError{ - field: "NetConfig", - reason: "value length must be at least 1 bytes", - } - } - - } - - if ip := net.ParseIP(m.GetIp()); ip == nil { - return UpdateSchedulerRequestValidationError{ - field: "Ip", - reason: "value must be a valid IP address", - } - } - - if val := m.GetPort(); val < 1024 || val >= 65535 { - return UpdateSchedulerRequestValidationError{ - field: "Port", - reason: "value must be inside range [1024, 65535)", - } - } - - if m.GetNetTopology() != "" { - - if l := utf8.RuneCountInString(m.GetNetTopology()); l < 1 || l > 1024 { - return UpdateSchedulerRequestValidationError{ - field: "NetTopology", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - } - - return nil -} - -func (m *UpdateSchedulerRequest) _validateHostname(host string) error { - s := strings.ToLower(strings.TrimSuffix(host, ".")) - - if len(host) > 253 { - return errors.New("hostname cannot exceed 253 characters") - } - - for _, part := range strings.Split(s, ".") { - if l := len(part); l == 0 || l > 63 { - return errors.New("hostname part must be non-empty and cannot exceed 63 characters") - } - - if part[0] == '-' { - return errors.New("hostname parts cannot begin with hyphens") - } - - if part[len(part)-1] == '-' { - return errors.New("hostname parts cannot end with hyphens") - } - - for _, r := range part { - if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' { - return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r)) - } - } - } - - return nil -} - -// UpdateSchedulerRequestValidationError is the validation error returned by -// UpdateSchedulerRequest.Validate if the designated constraints aren't met. -type UpdateSchedulerRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e UpdateSchedulerRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e UpdateSchedulerRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e UpdateSchedulerRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e UpdateSchedulerRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e UpdateSchedulerRequestValidationError) ErrorName() string { - return "UpdateSchedulerRequestValidationError" -} - -// Error satisfies the builtin error interface -func (e UpdateSchedulerRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sUpdateSchedulerRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = UpdateSchedulerRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = UpdateSchedulerRequestValidationError{} - -// Validate checks the field values on ListSchedulersRequest with the rules -// defined in the proto definition for this message. If any rules are -// violated, an error is returned. -func (m *ListSchedulersRequest) Validate() error { - if m == nil { - return nil - } - - if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok { - return ListSchedulersRequestValidationError{ - field: "SourceType", - reason: "value must be one of the defined enum values", - } - } - - if err := m._validateHostname(m.GetHostName()); err != nil { - return ListSchedulersRequestValidationError{ - field: "HostName", - reason: "value must be a valid hostname", - cause: err, - } - } - - if ip := net.ParseIP(m.GetIp()); ip == nil { - return ListSchedulersRequestValidationError{ - field: "Ip", - reason: "value must be a valid IP address", - } - } - - if len(m.GetHostInfo()) > 0 { - - } - - return nil -} - -func (m *ListSchedulersRequest) _validateHostname(host string) error { - s := strings.ToLower(strings.TrimSuffix(host, ".")) - - if len(host) > 253 { - return errors.New("hostname cannot exceed 253 characters") - } - - for _, part := range strings.Split(s, ".") { - if l := len(part); l == 0 || l > 63 { - return errors.New("hostname part must be non-empty and cannot exceed 63 characters") - } - - if part[0] == '-' { - return errors.New("hostname parts cannot begin with hyphens") - } - - if part[len(part)-1] == '-' { - return errors.New("hostname parts cannot end with hyphens") - } - - for _, r := range part { - if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' { - return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r)) - } - } - } - - return nil -} - -// ListSchedulersRequestValidationError is the validation error returned by -// ListSchedulersRequest.Validate if the designated constraints aren't met. -type ListSchedulersRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ListSchedulersRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ListSchedulersRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ListSchedulersRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ListSchedulersRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ListSchedulersRequestValidationError) ErrorName() string { - return "ListSchedulersRequestValidationError" -} - -// Error satisfies the builtin error interface -func (e ListSchedulersRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sListSchedulersRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ListSchedulersRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ListSchedulersRequestValidationError{} - -// Validate checks the field values on ListSchedulersResponse with the rules -// defined in the proto definition for this message. If any rules are -// violated, an error is returned. -func (m *ListSchedulersResponse) Validate() error { - if m == nil { - return nil - } - - for idx, item := range m.GetSchedulers() { - _, _ = idx, item - - if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListSchedulersResponseValidationError{ - field: fmt.Sprintf("Schedulers[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - return nil -} - -// ListSchedulersResponseValidationError is the validation error returned by -// ListSchedulersResponse.Validate if the designated constraints aren't met. -type ListSchedulersResponseValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ListSchedulersResponseValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ListSchedulersResponseValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ListSchedulersResponseValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ListSchedulersResponseValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ListSchedulersResponseValidationError) ErrorName() string { - return "ListSchedulersResponseValidationError" -} - -// Error satisfies the builtin error interface -func (e ListSchedulersResponseValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sListSchedulersResponse.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ListSchedulersResponseValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ListSchedulersResponseValidationError{} - -// Validate checks the field values on ObjectStorage with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. -func (m *ObjectStorage) Validate() error { - if m == nil { - return nil - } - - if l := utf8.RuneCountInString(m.GetName()); l < 1 || l > 1024 { - return ObjectStorageValidationError{ - field: "Name", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - if m.GetRegion() != "" { - - if l := utf8.RuneCountInString(m.GetRegion()); l < 1 || l > 1024 { - return ObjectStorageValidationError{ - field: "Region", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - } - - if m.GetEndpoint() != "" { - - if l := utf8.RuneCountInString(m.GetEndpoint()); l < 1 || l > 1024 { - return ObjectStorageValidationError{ - field: "Endpoint", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - } - - if m.GetAccessKey() != "" { - - if l := utf8.RuneCountInString(m.GetAccessKey()); l < 1 || l > 1024 { - return ObjectStorageValidationError{ - field: "AccessKey", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - } - - if m.GetSecretKey() != "" { - - if l := utf8.RuneCountInString(m.GetSecretKey()); l < 1 || l > 1024 { - return ObjectStorageValidationError{ - field: "SecretKey", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - } - - return nil -} - -// ObjectStorageValidationError is the validation error returned by -// ObjectStorage.Validate if the designated constraints aren't met. -type ObjectStorageValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ObjectStorageValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ObjectStorageValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ObjectStorageValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ObjectStorageValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ObjectStorageValidationError) ErrorName() string { return "ObjectStorageValidationError" } - -// Error satisfies the builtin error interface -func (e ObjectStorageValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sObjectStorage.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ObjectStorageValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ObjectStorageValidationError{} - -// Validate checks the field values on GetObjectStorageRequest with the rules -// defined in the proto definition for this message. If any rules are -// violated, an error is returned. -func (m *GetObjectStorageRequest) Validate() error { - if m == nil { - return nil - } - - if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok { - return GetObjectStorageRequestValidationError{ - field: "SourceType", - reason: "value must be one of the defined enum values", - } - } - - if err := m._validateHostname(m.GetHostName()); err != nil { - return GetObjectStorageRequestValidationError{ - field: "HostName", - reason: "value must be a valid hostname", - cause: err, - } - } - - if ip := net.ParseIP(m.GetIp()); ip == nil { - return GetObjectStorageRequestValidationError{ - field: "Ip", - reason: "value must be a valid IP address", - } - } - - return nil -} - -func (m *GetObjectStorageRequest) _validateHostname(host string) error { - s := strings.ToLower(strings.TrimSuffix(host, ".")) - - if len(host) > 253 { - return errors.New("hostname cannot exceed 253 characters") - } - - for _, part := range strings.Split(s, ".") { - if l := len(part); l == 0 || l > 63 { - return errors.New("hostname part must be non-empty and cannot exceed 63 characters") - } - - if part[0] == '-' { - return errors.New("hostname parts cannot begin with hyphens") - } - - if part[len(part)-1] == '-' { - return errors.New("hostname parts cannot end with hyphens") - } - - for _, r := range part { - if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' { - return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r)) - } - } - } - - return nil -} - -// GetObjectStorageRequestValidationError is the validation error returned by -// GetObjectStorageRequest.Validate if the designated constraints aren't met. -type GetObjectStorageRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e GetObjectStorageRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e GetObjectStorageRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e GetObjectStorageRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e GetObjectStorageRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e GetObjectStorageRequestValidationError) ErrorName() string { - return "GetObjectStorageRequestValidationError" -} - -// Error satisfies the builtin error interface -func (e GetObjectStorageRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sGetObjectStorageRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = GetObjectStorageRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = GetObjectStorageRequestValidationError{} - -// Validate checks the field values on Bucket with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *Bucket) Validate() error { - if m == nil { - return nil - } - - if l := utf8.RuneCountInString(m.GetName()); l < 1 || l > 1024 { - return BucketValidationError{ - field: "Name", - reason: "value length must be between 1 and 1024 runes, inclusive", - } - } - - return nil -} - -// BucketValidationError is the validation error returned by Bucket.Validate if -// the designated constraints aren't met. -type BucketValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e BucketValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e BucketValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e BucketValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e BucketValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e BucketValidationError) ErrorName() string { return "BucketValidationError" } - -// Error satisfies the builtin error interface -func (e BucketValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sBucket.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = BucketValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = BucketValidationError{} - -// Validate checks the field values on ListBucketsRequest with the rules -// defined in the proto definition for this message. If any rules are -// violated, an error is returned. -func (m *ListBucketsRequest) Validate() error { - if m == nil { - return nil - } - - if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok { - return ListBucketsRequestValidationError{ - field: "SourceType", - reason: "value must be one of the defined enum values", - } - } - - if err := m._validateHostname(m.GetHostName()); err != nil { - return ListBucketsRequestValidationError{ - field: "HostName", - reason: "value must be a valid hostname", - cause: err, - } - } - - if ip := net.ParseIP(m.GetIp()); ip == nil { - return ListBucketsRequestValidationError{ - field: "Ip", - reason: "value must be a valid IP address", - } - } - - return nil -} - -func (m *ListBucketsRequest) _validateHostname(host string) error { - s := strings.ToLower(strings.TrimSuffix(host, ".")) - - if len(host) > 253 { - return errors.New("hostname cannot exceed 253 characters") - } - - for _, part := range strings.Split(s, ".") { - if l := len(part); l == 0 || l > 63 { - return errors.New("hostname part must be non-empty and cannot exceed 63 characters") - } - - if part[0] == '-' { - return errors.New("hostname parts cannot begin with hyphens") - } - - if part[len(part)-1] == '-' { - return errors.New("hostname parts cannot end with hyphens") - } - - for _, r := range part { - if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' { - return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r)) - } - } - } - - return nil -} - -// ListBucketsRequestValidationError is the validation error returned by -// ListBucketsRequest.Validate if the designated constraints aren't met. -type ListBucketsRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ListBucketsRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ListBucketsRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ListBucketsRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ListBucketsRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ListBucketsRequestValidationError) ErrorName() string { - return "ListBucketsRequestValidationError" -} - -// Error satisfies the builtin error interface -func (e ListBucketsRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sListBucketsRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ListBucketsRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ListBucketsRequestValidationError{} - -// Validate checks the field values on ListBucketsResponse with the rules -// defined in the proto definition for this message. If any rules are -// violated, an error is returned. -func (m *ListBucketsResponse) Validate() error { - if m == nil { - return nil - } - - for idx, item := range m.GetBuckets() { - _, _ = idx, item - - if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return ListBucketsResponseValidationError{ - field: fmt.Sprintf("Buckets[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - return nil -} - -// ListBucketsResponseValidationError is the validation error returned by -// ListBucketsResponse.Validate if the designated constraints aren't met. -type ListBucketsResponseValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ListBucketsResponseValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ListBucketsResponseValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ListBucketsResponseValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ListBucketsResponseValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ListBucketsResponseValidationError) ErrorName() string { - return "ListBucketsResponseValidationError" -} - -// Error satisfies the builtin error interface -func (e ListBucketsResponseValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sListBucketsResponse.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ListBucketsResponseValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ListBucketsResponseValidationError{} - -// Validate checks the field values on KeepAliveRequest with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. -func (m *KeepAliveRequest) Validate() error { - if m == nil { - return nil - } - - if _, ok := SourceType_name[int32(m.GetSourceType())]; !ok { - return KeepAliveRequestValidationError{ - field: "SourceType", - reason: "value must be one of the defined enum values", - } - } - - if err := m._validateHostname(m.GetHostName()); err != nil { - return KeepAliveRequestValidationError{ - field: "HostName", - reason: "value must be a valid hostname", - cause: err, - } - } - - if m.GetClusterId() < 1 { - return KeepAliveRequestValidationError{ - field: "ClusterId", - reason: "value must be greater than or equal to 1", - } - } - - if m.GetIp() != "" { - - if ip := net.ParseIP(m.GetIp()); ip == nil { - return KeepAliveRequestValidationError{ - field: "Ip", - reason: "value must be a valid IP address", - } - } - - } - - return nil -} - -func (m *KeepAliveRequest) _validateHostname(host string) error { - s := strings.ToLower(strings.TrimSuffix(host, ".")) - - if len(host) > 253 { - return errors.New("hostname cannot exceed 253 characters") - } - - for _, part := range strings.Split(s, ".") { - if l := len(part); l == 0 || l > 63 { - return errors.New("hostname part must be non-empty and cannot exceed 63 characters") - } - - if part[0] == '-' { - return errors.New("hostname parts cannot begin with hyphens") - } - - if part[len(part)-1] == '-' { - return errors.New("hostname parts cannot end with hyphens") - } - - for _, r := range part { - if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' { - return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r)) - } - } - } - - return nil -} - -// KeepAliveRequestValidationError is the validation error returned by -// KeepAliveRequest.Validate if the designated constraints aren't met. -type KeepAliveRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e KeepAliveRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e KeepAliveRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e KeepAliveRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e KeepAliveRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e KeepAliveRequestValidationError) ErrorName() string { return "KeepAliveRequestValidationError" } - -// Error satisfies the builtin error interface -func (e KeepAliveRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sKeepAliveRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = KeepAliveRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = KeepAliveRequestValidationError{} diff --git a/pkg/rpc/manager/manager.proto b/pkg/rpc/manager/manager.proto deleted file mode 100644 index 17dee6aaa..000000000 --- a/pkg/rpc/manager/manager.proto +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -syntax = "proto3"; - -package manager; - -import "google/protobuf/empty.proto"; -import "validate/validate.proto"; - -option go_package = "d7y.io/dragonfly/v2/pkg/rpc/manager"; - -// Request source type. -enum SourceType { - // Scheduler service. - SCHEDULER_SOURCE = 0; - // Peer service. - PEER_SOURCE = 1; - // SeedPeer service. - SEED_PEER_SOURCE = 2; -} - -// SecurityGroup represents security group of cluster. -message SecurityGroup { - // Group id. - uint64 id = 1; - // Group name. - string name = 2; - // Group biography. - string bio = 3; - // Group domain. - string domain = 4; - // Group proxy domain. - string proxy_domain = 5; -} - -// SeedPeerCluster represents cluster of seed peer. -message SeedPeerCluster { - // Cluster id. - uint64 id = 1; - // Cluster name. - string name = 2; - // Cluster biography. - string bio = 3; - // Cluster configuration. - bytes config = 4; - // Cluster scopes. - bytes scopes = 5; - // Security group to which the seed peer cluster belongs. - SecurityGroup security_group = 6; -} - -// SeedPeer represents seed peer for network. -message SeedPeer { - // Seed peer id. - uint64 id = 1; - // Seed peer hostname. - string host_name = 2; - // Seed peer type. - string type = 3; - // Seed peer idc. - string idc = 5; - // Seed peer network topology. - string net_topology = 6; - // Seed peer location. - string location = 7; - // Seed peer ip. - string ip = 8; - // Seed peer grpc port. - int32 port = 9; - // Seed peer download port. - int32 download_port = 10; - // Seed peer state. - string state = 11; - // ID of the cluster to which the seed peer belongs. - uint64 seed_peer_cluster_id = 12; - // Cluster to which the seed peer belongs. - SeedPeerCluster seed_peer_cluster = 13; - // Schedulers included in seed peer. - repeated Scheduler schedulers = 14; - // Seed peer object storage port. - int32 object_storage_port = 15; -} - -// GetSeedPeerRequest represents request of GetSeedPeer. -message GetSeedPeerRequest { - // Request source type. - SourceType source_type = 1 [(validate.rules).enum.defined_only = true]; - // Seed peer hostname. - string host_name = 2 [(validate.rules).string.hostname = true]; - // ID of the cluster to which the seed peer belongs. - uint64 seed_peer_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}]; - // Seed peer ip. - string ip = 4 [(validate.rules).string = {ip: true, ignore_empty: true}]; -} - -// UpdateSeedPeerRequest represents request of UpdateSeedPeer. -message UpdateSeedPeerRequest { - // Request source type. - SourceType source_type = 1 [(validate.rules).enum.defined_only = true]; - // Seed peer hostname. - string host_name = 2 [(validate.rules).string.hostname = true]; - // Seed peer type. - string type = 3 [(validate.rules).string = {in: ["super", "strong", "weak"]}]; - // Seed peer idc. - string idc = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; - // Seed peer network topology. - string net_topology = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; - // Seed peer location. - string location = 7 [(validate.rules).string = {max_len: 1024, ignore_empty: true}]; - // Seed peer ip. - string ip = 8 [(validate.rules).string = {ip: true}]; - // Seed peer port. - int32 port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}]; - // Seed peer download port. - int32 download_port = 10 [(validate.rules).int32 = {gte: 1024, lt: 65535}]; - // ID of the cluster to which the seed peer belongs. - uint64 seed_peer_cluster_id = 11 [(validate.rules).uint64 = {gte: 1}]; - // Seed peer object storage port. - int32 object_storage_port = 12 [(validate.rules).int32 = {gte: 1024, lt: 65535, ignore_empty: true}]; -} - -// SeedPeerCluster represents cluster of scheduler. -message SchedulerCluster { - // Cluster id. - uint64 id = 1; - // Cluster name. - string name = 2; - // Cluster biography. - string bio = 3; - // Cluster config. - bytes config = 4; - // Cluster client config. - bytes client_config = 5; - // Cluster scopes. - bytes scopes = 6; - // Security group to which the scheduler cluster belongs. - SecurityGroup security_group = 7; -} - -// SeedPeerCluster represents scheduler for network. -message Scheduler { - // Scheduler id. - uint64 id = 1; - // Scheduler hostname. - string host_name = 2; - // Deprecated: Do not use. - string vips = 3; - // Scheduler idc. - string idc = 4; - // Scheduler location. - string location = 5; - // Deprecated: Use net_topology instead. - bytes net_config = 6; - // Scheduler ip. - string ip = 7; - // Scheduler grpc port. - int32 port = 8; - // Scheduler state. - string state = 9; - // ID of the cluster to which the scheduler belongs. - uint64 scheduler_cluster_id = 10; - // Cluster to which the scheduler belongs. - SchedulerCluster scheduler_cluster = 11; - // Seed peers to which the scheduler belongs. - repeated SeedPeer seed_peers = 13; - // Scheduler network topology. - string net_topology = 14; -} - -// GetSchedulerRequest represents request of GetScheduler. -message GetSchedulerRequest { - // Request source type. - SourceType source_type = 1 [(validate.rules).enum.defined_only = true]; - // Scheduler hostname. - string host_name = 2 [(validate.rules).string.hostname = true]; - // ID of the cluster to which the scheduler belongs. - uint64 scheduler_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}]; - // Scheduler ip. - string ip = 4 [(validate.rules).string = {ip: true, ignore_empty: true}]; -} - -// UpdateSchedulerRequest represents request of UpdateScheduler. -message UpdateSchedulerRequest { - // Request source type. - SourceType source_type = 1 [(validate.rules).enum.defined_only = true]; - // Scheduler hostname. - string host_name = 2 [(validate.rules).string.hostname = true]; - // ID of the cluster to which the scheduler belongs. - uint64 scheduler_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}]; - // Deprecated: Do not use. - string vips = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; - // Scheduler idc. - string idc = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; - // Scheduler location. - string location = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; - // Deprecated: Use net_topology instead. - bytes net_config = 7 [(validate.rules).bytes = {min_len: 1, ignore_empty: true}]; - // Scheduler ip. - string ip = 8 [(validate.rules).string = {ip: true}]; - // Scheduler port. - int32 port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}]; - // Scheduler network topology. - string net_topology = 10 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; -} - -// ListSchedulersRequest represents request of ListSchedulers. -message ListSchedulersRequest { - // Request source type. - SourceType source_type = 1 [(validate.rules).enum.defined_only = true]; - // Source service hostname. - string host_name = 2 [(validate.rules).string.hostname = true]; - // Source service ip. - string ip = 3 [(validate.rules).string.ip = true]; - // Source service host information. - map host_info = 5 [(validate.rules).map.ignore_empty = true]; -} - -// ListSchedulersResponse represents response of ListSchedulers. -message ListSchedulersResponse { - // Schedulers to which the source service belongs. - repeated Scheduler schedulers = 1; -} - -// ObjectStorage represents config of object storage. -message ObjectStorage { - // Object storage name of type. - string name = 1 [(validate.rules).string = {min_len: 1, max_len: 1024}]; - // Storage region. - string region = 2 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; - // Datacenter endpoint. - string endpoint = 3 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; - // Access key id. - string access_key = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; - // Access key secret. - string secret_key = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}]; -} - -// GetObjectStorageRequest represents request of GetObjectStorage. -message GetObjectStorageRequest { - // Request source type. - SourceType source_type = 1 [(validate.rules).enum.defined_only = true]; - // Source service hostname. - string host_name = 2 [(validate.rules).string.hostname = true]; - // Source service ip. - string ip = 3 [(validate.rules).string.ip = true]; -} - -// Bucket represents config of bucket. -message Bucket { - // Bucket name. - string name = 1 [(validate.rules).string = {min_len: 1, max_len: 1024}]; -} - -// ListSchedulersRequest represents request of ListBuckets. -message ListBucketsRequest { - // Request source type. - SourceType source_type = 1 [(validate.rules).enum.defined_only = true]; - // Source service hostname. - string host_name = 2 [(validate.rules).string.hostname = true]; - // Source service ip. - string ip = 3 [(validate.rules).string.ip = true]; -} - -// ListBucketsResponse represents response of ListBuckets. -message ListBucketsResponse { - // Bucket configs. - repeated Bucket buckets = 1; -} - -// KeepAliveRequest represents request of KeepAlive. -message KeepAliveRequest { - // Request source type. - SourceType source_type = 1 [(validate.rules).enum.defined_only = true]; - // Source service hostname. - string host_name = 2 [(validate.rules).string.hostname = true]; - // ID of the cluster to which the source service belongs. - uint64 cluster_id = 3 [(validate.rules).uint64 = {gte: 1}]; - // Source service ip. - string ip = 4 [(validate.rules).string = {ip: true, ignore_empty: true}]; -} - -// Manager RPC Service. -service Manager { - // Get SeedPeer and SeedPeer cluster configuration. - rpc GetSeedPeer(GetSeedPeerRequest) returns(SeedPeer); - - // Update SeedPeer configuration. - rpc UpdateSeedPeer(UpdateSeedPeerRequest) returns(SeedPeer); - - // Get Scheduler and Scheduler cluster configuration. - rpc GetScheduler(GetSchedulerRequest)returns(Scheduler); - - // Update scheduler configuration. - rpc UpdateScheduler(UpdateSchedulerRequest) returns(Scheduler); - - // List acitve schedulers configuration. - rpc ListSchedulers(ListSchedulersRequest)returns(ListSchedulersResponse); - - // Get ObjectStorage configuration. - rpc GetObjectStorage(GetObjectStorageRequest) returns(ObjectStorage); - - // List buckets configuration. - rpc ListBuckets(ListBucketsRequest)returns(ListBucketsResponse); - - // KeepAlive with manager. - rpc KeepAlive(stream KeepAliveRequest)returns(google.protobuf.Empty); -} diff --git a/pkg/rpc/manager/mocks/manager_mock.go b/pkg/rpc/manager/mocks/manager_mock.go deleted file mode 100644 index adfb1345d..000000000 --- a/pkg/rpc/manager/mocks/manager_mock.go +++ /dev/null @@ -1,612 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: manager/manager.pb.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - manager "d7y.io/dragonfly/v2/pkg/rpc/manager" - gomock "github.com/golang/mock/gomock" - grpc "google.golang.org/grpc" - metadata "google.golang.org/grpc/metadata" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// MockManagerClient is a mock of ManagerClient interface. -type MockManagerClient struct { - ctrl *gomock.Controller - recorder *MockManagerClientMockRecorder -} - -// MockManagerClientMockRecorder is the mock recorder for MockManagerClient. -type MockManagerClientMockRecorder struct { - mock *MockManagerClient -} - -// NewMockManagerClient creates a new mock instance. -func NewMockManagerClient(ctrl *gomock.Controller) *MockManagerClient { - mock := &MockManagerClient{ctrl: ctrl} - mock.recorder = &MockManagerClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockManagerClient) EXPECT() *MockManagerClientMockRecorder { - return m.recorder -} - -// GetObjectStorage mocks base method. -func (m *MockManagerClient) GetObjectStorage(ctx context.Context, in *manager.GetObjectStorageRequest, opts ...grpc.CallOption) (*manager.ObjectStorage, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectStorage", varargs...) - ret0, _ := ret[0].(*manager.ObjectStorage) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectStorage indicates an expected call of GetObjectStorage. -func (mr *MockManagerClientMockRecorder) GetObjectStorage(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorage", reflect.TypeOf((*MockManagerClient)(nil).GetObjectStorage), varargs...) -} - -// GetScheduler mocks base method. -func (m *MockManagerClient) GetScheduler(ctx context.Context, in *manager.GetSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetScheduler", varargs...) - ret0, _ := ret[0].(*manager.Scheduler) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetScheduler indicates an expected call of GetScheduler. -func (mr *MockManagerClientMockRecorder) GetScheduler(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheduler", reflect.TypeOf((*MockManagerClient)(nil).GetScheduler), varargs...) -} - -// GetSeedPeer mocks base method. -func (m *MockManagerClient) GetSeedPeer(ctx context.Context, in *manager.GetSeedPeerRequest, opts ...grpc.CallOption) (*manager.SeedPeer, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetSeedPeer", varargs...) - ret0, _ := ret[0].(*manager.SeedPeer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSeedPeer indicates an expected call of GetSeedPeer. -func (mr *MockManagerClientMockRecorder) GetSeedPeer(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSeedPeer", reflect.TypeOf((*MockManagerClient)(nil).GetSeedPeer), varargs...) -} - -// KeepAlive mocks base method. -func (m *MockManagerClient) KeepAlive(ctx context.Context, opts ...grpc.CallOption) (manager.Manager_KeepAliveClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "KeepAlive", varargs...) - ret0, _ := ret[0].(manager.Manager_KeepAliveClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// KeepAlive indicates an expected call of KeepAlive. -func (mr *MockManagerClientMockRecorder) KeepAlive(ctx interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockManagerClient)(nil).KeepAlive), varargs...) -} - -// ListBuckets mocks base method. -func (m *MockManagerClient) ListBuckets(ctx context.Context, in *manager.ListBucketsRequest, opts ...grpc.CallOption) (*manager.ListBucketsResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListBuckets", varargs...) - ret0, _ := ret[0].(*manager.ListBucketsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBuckets indicates an expected call of ListBuckets. -func (mr *MockManagerClientMockRecorder) ListBuckets(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBuckets", reflect.TypeOf((*MockManagerClient)(nil).ListBuckets), varargs...) -} - -// ListSchedulers mocks base method. -func (m *MockManagerClient) ListSchedulers(ctx context.Context, in *manager.ListSchedulersRequest, opts ...grpc.CallOption) (*manager.ListSchedulersResponse, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListSchedulers", varargs...) - ret0, _ := ret[0].(*manager.ListSchedulersResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListSchedulers indicates an expected call of ListSchedulers. -func (mr *MockManagerClientMockRecorder) ListSchedulers(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerClient)(nil).ListSchedulers), varargs...) -} - -// UpdateScheduler mocks base method. -func (m *MockManagerClient) UpdateScheduler(ctx context.Context, in *manager.UpdateSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "UpdateScheduler", varargs...) - ret0, _ := ret[0].(*manager.Scheduler) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateScheduler indicates an expected call of UpdateScheduler. -func (mr *MockManagerClientMockRecorder) UpdateScheduler(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockManagerClient)(nil).UpdateScheduler), varargs...) -} - -// UpdateSeedPeer mocks base method. -func (m *MockManagerClient) UpdateSeedPeer(ctx context.Context, in *manager.UpdateSeedPeerRequest, opts ...grpc.CallOption) (*manager.SeedPeer, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "UpdateSeedPeer", varargs...) - ret0, _ := ret[0].(*manager.SeedPeer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateSeedPeer indicates an expected call of UpdateSeedPeer. -func (mr *MockManagerClientMockRecorder) UpdateSeedPeer(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSeedPeer", reflect.TypeOf((*MockManagerClient)(nil).UpdateSeedPeer), varargs...) -} - -// MockManager_KeepAliveClient is a mock of Manager_KeepAliveClient interface. -type MockManager_KeepAliveClient struct { - ctrl *gomock.Controller - recorder *MockManager_KeepAliveClientMockRecorder -} - -// MockManager_KeepAliveClientMockRecorder is the mock recorder for MockManager_KeepAliveClient. -type MockManager_KeepAliveClientMockRecorder struct { - mock *MockManager_KeepAliveClient -} - -// NewMockManager_KeepAliveClient creates a new mock instance. -func NewMockManager_KeepAliveClient(ctrl *gomock.Controller) *MockManager_KeepAliveClient { - mock := &MockManager_KeepAliveClient{ctrl: ctrl} - mock.recorder = &MockManager_KeepAliveClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockManager_KeepAliveClient) EXPECT() *MockManager_KeepAliveClientMockRecorder { - return m.recorder -} - -// CloseAndRecv mocks base method. -func (m *MockManager_KeepAliveClient) CloseAndRecv() (*emptypb.Empty, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseAndRecv") - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CloseAndRecv indicates an expected call of CloseAndRecv. -func (mr *MockManager_KeepAliveClientMockRecorder) CloseAndRecv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).CloseAndRecv)) -} - -// CloseSend mocks base method. -func (m *MockManager_KeepAliveClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend. -func (mr *MockManager_KeepAliveClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).CloseSend)) -} - -// Context mocks base method. -func (m *MockManager_KeepAliveClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockManager_KeepAliveClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Context)) -} - -// Header mocks base method. -func (m *MockManager_KeepAliveClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header. -func (mr *MockManager_KeepAliveClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Header)) -} - -// RecvMsg mocks base method. -func (m_2 *MockManager_KeepAliveClient) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockManager_KeepAliveClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).RecvMsg), m) -} - -// Send mocks base method. -func (m *MockManager_KeepAliveClient) Send(arg0 *manager.KeepAliveRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send. -func (mr *MockManager_KeepAliveClientMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Send), arg0) -} - -// SendMsg mocks base method. -func (m_2 *MockManager_KeepAliveClient) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockManager_KeepAliveClientMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).SendMsg), m) -} - -// Trailer mocks base method. -func (m *MockManager_KeepAliveClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer. -func (mr *MockManager_KeepAliveClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Trailer)) -} - -// MockManagerServer is a mock of ManagerServer interface. -type MockManagerServer struct { - ctrl *gomock.Controller - recorder *MockManagerServerMockRecorder -} - -// MockManagerServerMockRecorder is the mock recorder for MockManagerServer. -type MockManagerServerMockRecorder struct { - mock *MockManagerServer -} - -// NewMockManagerServer creates a new mock instance. -func NewMockManagerServer(ctrl *gomock.Controller) *MockManagerServer { - mock := &MockManagerServer{ctrl: ctrl} - mock.recorder = &MockManagerServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockManagerServer) EXPECT() *MockManagerServerMockRecorder { - return m.recorder -} - -// GetObjectStorage mocks base method. -func (m *MockManagerServer) GetObjectStorage(arg0 context.Context, arg1 *manager.GetObjectStorageRequest) (*manager.ObjectStorage, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectStorage", arg0, arg1) - ret0, _ := ret[0].(*manager.ObjectStorage) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectStorage indicates an expected call of GetObjectStorage. -func (mr *MockManagerServerMockRecorder) GetObjectStorage(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorage", reflect.TypeOf((*MockManagerServer)(nil).GetObjectStorage), arg0, arg1) -} - -// GetScheduler mocks base method. -func (m *MockManagerServer) GetScheduler(arg0 context.Context, arg1 *manager.GetSchedulerRequest) (*manager.Scheduler, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetScheduler", arg0, arg1) - ret0, _ := ret[0].(*manager.Scheduler) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetScheduler indicates an expected call of GetScheduler. -func (mr *MockManagerServerMockRecorder) GetScheduler(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheduler", reflect.TypeOf((*MockManagerServer)(nil).GetScheduler), arg0, arg1) -} - -// GetSeedPeer mocks base method. -func (m *MockManagerServer) GetSeedPeer(arg0 context.Context, arg1 *manager.GetSeedPeerRequest) (*manager.SeedPeer, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSeedPeer", arg0, arg1) - ret0, _ := ret[0].(*manager.SeedPeer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSeedPeer indicates an expected call of GetSeedPeer. -func (mr *MockManagerServerMockRecorder) GetSeedPeer(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSeedPeer", reflect.TypeOf((*MockManagerServer)(nil).GetSeedPeer), arg0, arg1) -} - -// KeepAlive mocks base method. -func (m *MockManagerServer) KeepAlive(arg0 manager.Manager_KeepAliveServer) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "KeepAlive", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// KeepAlive indicates an expected call of KeepAlive. -func (mr *MockManagerServerMockRecorder) KeepAlive(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockManagerServer)(nil).KeepAlive), arg0) -} - -// ListBuckets mocks base method. -func (m *MockManagerServer) ListBuckets(arg0 context.Context, arg1 *manager.ListBucketsRequest) (*manager.ListBucketsResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBuckets", arg0, arg1) - ret0, _ := ret[0].(*manager.ListBucketsResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBuckets indicates an expected call of ListBuckets. -func (mr *MockManagerServerMockRecorder) ListBuckets(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBuckets", reflect.TypeOf((*MockManagerServer)(nil).ListBuckets), arg0, arg1) -} - -// ListSchedulers mocks base method. -func (m *MockManagerServer) ListSchedulers(arg0 context.Context, arg1 *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListSchedulers", arg0, arg1) - ret0, _ := ret[0].(*manager.ListSchedulersResponse) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListSchedulers indicates an expected call of ListSchedulers. -func (mr *MockManagerServerMockRecorder) ListSchedulers(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerServer)(nil).ListSchedulers), arg0, arg1) -} - -// UpdateScheduler mocks base method. -func (m *MockManagerServer) UpdateScheduler(arg0 context.Context, arg1 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateScheduler", arg0, arg1) - ret0, _ := ret[0].(*manager.Scheduler) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateScheduler indicates an expected call of UpdateScheduler. -func (mr *MockManagerServerMockRecorder) UpdateScheduler(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockManagerServer)(nil).UpdateScheduler), arg0, arg1) -} - -// UpdateSeedPeer mocks base method. -func (m *MockManagerServer) UpdateSeedPeer(arg0 context.Context, arg1 *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateSeedPeer", arg0, arg1) - ret0, _ := ret[0].(*manager.SeedPeer) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateSeedPeer indicates an expected call of UpdateSeedPeer. -func (mr *MockManagerServerMockRecorder) UpdateSeedPeer(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSeedPeer", reflect.TypeOf((*MockManagerServer)(nil).UpdateSeedPeer), arg0, arg1) -} - -// MockManager_KeepAliveServer is a mock of Manager_KeepAliveServer interface. -type MockManager_KeepAliveServer struct { - ctrl *gomock.Controller - recorder *MockManager_KeepAliveServerMockRecorder -} - -// MockManager_KeepAliveServerMockRecorder is the mock recorder for MockManager_KeepAliveServer. -type MockManager_KeepAliveServerMockRecorder struct { - mock *MockManager_KeepAliveServer -} - -// NewMockManager_KeepAliveServer creates a new mock instance. -func NewMockManager_KeepAliveServer(ctrl *gomock.Controller) *MockManager_KeepAliveServer { - mock := &MockManager_KeepAliveServer{ctrl: ctrl} - mock.recorder = &MockManager_KeepAliveServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockManager_KeepAliveServer) EXPECT() *MockManager_KeepAliveServerMockRecorder { - return m.recorder -} - -// Context mocks base method. -func (m *MockManager_KeepAliveServer) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockManager_KeepAliveServerMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).Context)) -} - -// Recv mocks base method. -func (m *MockManager_KeepAliveServer) Recv() (*manager.KeepAliveRequest, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*manager.KeepAliveRequest) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv. -func (mr *MockManager_KeepAliveServerMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).Recv)) -} - -// RecvMsg mocks base method. -func (m_2 *MockManager_KeepAliveServer) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockManager_KeepAliveServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).RecvMsg), m) -} - -// SendAndClose mocks base method. -func (m *MockManager_KeepAliveServer) SendAndClose(arg0 *emptypb.Empty) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAndClose", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendAndClose indicates an expected call of SendAndClose. -func (mr *MockManager_KeepAliveServerMockRecorder) SendAndClose(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAndClose", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendAndClose), arg0) -} - -// SendHeader mocks base method. -func (m *MockManager_KeepAliveServer) SendHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendHeader indicates an expected call of SendHeader. -func (mr *MockManager_KeepAliveServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendHeader), arg0) -} - -// SendMsg mocks base method. -func (m_2 *MockManager_KeepAliveServer) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockManager_KeepAliveServerMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendMsg), m) -} - -// SetHeader mocks base method. -func (m *MockManager_KeepAliveServer) SetHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetHeader indicates an expected call of SetHeader. -func (mr *MockManager_KeepAliveServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SetHeader), arg0) -} - -// SetTrailer mocks base method. -func (m *MockManager_KeepAliveServer) SetTrailer(arg0 metadata.MD) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTrailer", arg0) -} - -// SetTrailer indicates an expected call of SetTrailer. -func (mr *MockManager_KeepAliveServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SetTrailer), arg0) -} diff --git a/pkg/rpc/scheduler/client/client.go b/pkg/rpc/scheduler/client/client.go index 897228625..9f9436056 100644 --- a/pkg/rpc/scheduler/client/client.go +++ b/pkg/rpc/scheduler/client/client.go @@ -25,32 +25,33 @@ import ( "google.golang.org/grpc" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/rpc" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/base/common" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + "d7y.io/dragonfly/v2/pkg/rpc/common" ) // NewBeginOfPiece creates begin of piece. -func NewBeginOfPiece(taskID, peerID string) *scheduler.PieceResult { - return &scheduler.PieceResult{ +func NewBeginOfPiece(taskID, peerID string) *schedulerv1.PieceResult { + return &schedulerv1.PieceResult{ TaskId: taskID, SrcPid: peerID, - PieceInfo: &base.PieceInfo{ + PieceInfo: &commonv1.PieceInfo{ PieceNum: common.BeginOfPiece, }, } } // NewBeginOfPiece creates end of piece. -func NewEndOfPiece(taskID, peerID string, finishedCount int32) *scheduler.PieceResult { - return &scheduler.PieceResult{ +func NewEndOfPiece(taskID, peerID string, finishedCount int32) *schedulerv1.PieceResult { + return &schedulerv1.PieceResult{ TaskId: taskID, SrcPid: peerID, FinishedCount: finishedCount, - PieceInfo: &base.PieceInfo{ + PieceInfo: &commonv1.PieceInfo{ PieceNum: common.EndOfPiece, }, } @@ -74,22 +75,22 @@ func GetClientByAddr(addrs []dfnet.NetAddr, opts ...grpc.DialOption) (Client, er // Client is the interface for grpc client. type Client interface { // RegisterPeerTask registers a peer into task. - RegisterPeerTask(context.Context, *scheduler.PeerTaskRequest, ...grpc.CallOption) (*scheduler.RegisterResult, error) + RegisterPeerTask(context.Context, *schedulerv1.PeerTaskRequest, ...grpc.CallOption) (*schedulerv1.RegisterResult, error) // ReportPieceResult reports piece results and receives peer packets. - ReportPieceResult(context.Context, *scheduler.PeerTaskRequest, ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) + ReportPieceResult(context.Context, *schedulerv1.PeerTaskRequest, ...grpc.CallOption) (schedulerv1.Scheduler_ReportPieceResultClient, error) // ReportPeerResult reports downloading result for the peer. - ReportPeerResult(context.Context, *scheduler.PeerResult, ...grpc.CallOption) error + ReportPeerResult(context.Context, *schedulerv1.PeerResult, ...grpc.CallOption) error // LeaveTask makes the peer leaving from task. - LeaveTask(context.Context, *scheduler.PeerTarget, ...grpc.CallOption) error + LeaveTask(context.Context, *schedulerv1.PeerTarget, ...grpc.CallOption) error // Checks if any peer has the given task. - StatTask(context.Context, *scheduler.StatTaskRequest, ...grpc.CallOption) (*scheduler.Task, error) + StatTask(context.Context, *schedulerv1.StatTaskRequest, ...grpc.CallOption) (*schedulerv1.Task, error) // A peer announces that it has the announced task to other peers. - AnnounceTask(context.Context, *scheduler.AnnounceTaskRequest, ...grpc.CallOption) error + AnnounceTask(context.Context, *schedulerv1.AnnounceTaskRequest, ...grpc.CallOption) error // Update grpc addresses. UpdateState([]dfnet.NetAddr) @@ -107,17 +108,17 @@ type client struct { } // getClient gets scheduler client with hashkey. -func (sc *client) getClient(key string, stick bool) (scheduler.SchedulerClient, string, error) { +func (sc *client) getClient(key string, stick bool) (schedulerv1.SchedulerClient, string, error) { clientConn, err := sc.Connection.GetClientConn(key, stick) if err != nil { return nil, "", err } - return scheduler.NewSchedulerClient(clientConn), clientConn.Target(), nil + return schedulerv1.NewSchedulerClient(clientConn), clientConn.Target(), nil } // RegisterPeerTask registers a peer into task. -func (sc *client) RegisterPeerTask(ctx context.Context, req *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (*scheduler.RegisterResult, error) { +func (sc *client) RegisterPeerTask(ctx context.Context, req *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) (*schedulerv1.RegisterResult, error) { // Generate task id. client, target, err := sc.getClient(req.TaskId, false) if err != nil { @@ -134,7 +135,7 @@ func (sc *client) RegisterPeerTask(ctx context.Context, req *scheduler.PeerTaskR } // ReportPieceResult reports piece results and receives peer packets. -func (sc *client) ReportPieceResult(ctx context.Context, req *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) { +func (sc *client) ReportPieceResult(ctx context.Context, req *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) (schedulerv1.Scheduler_ReportPieceResultClient, error) { client, target, err := sc.getClient(req.TaskId, false) if err != nil { return nil, err @@ -150,7 +151,7 @@ func (sc *client) ReportPieceResult(ctx context.Context, req *scheduler.PeerTask } // ReportPeerResult reports downloading result for the peer. -func (sc *client) ReportPeerResult(ctx context.Context, req *scheduler.PeerResult, opts ...grpc.CallOption) error { +func (sc *client) ReportPeerResult(ctx context.Context, req *schedulerv1.PeerResult, opts ...grpc.CallOption) error { client, target, err := sc.getClient(req.TaskId, false) if err != nil { return err @@ -165,7 +166,7 @@ func (sc *client) ReportPeerResult(ctx context.Context, req *scheduler.PeerResul } // LeaveTask makes the peer leaving from task. -func (sc *client) LeaveTask(ctx context.Context, req *scheduler.PeerTarget, opts ...grpc.CallOption) error { +func (sc *client) LeaveTask(ctx context.Context, req *schedulerv1.PeerTarget, opts ...grpc.CallOption) error { client, target, err := sc.getClient(req.TaskId, false) if err != nil { return err @@ -180,7 +181,7 @@ func (sc *client) LeaveTask(ctx context.Context, req *scheduler.PeerTarget, opts } // Checks if any peer has the given task. -func (sc *client) StatTask(ctx context.Context, req *scheduler.StatTaskRequest, opts ...grpc.CallOption) (*scheduler.Task, error) { +func (sc *client) StatTask(ctx context.Context, req *schedulerv1.StatTaskRequest, opts ...grpc.CallOption) (*schedulerv1.Task, error) { client, target, err := sc.getClient(req.TaskId, false) if err != nil { return nil, err @@ -196,7 +197,7 @@ func (sc *client) StatTask(ctx context.Context, req *scheduler.StatTaskRequest, } // A peer announces that it has the announced task to other peers. -func (sc *client) AnnounceTask(ctx context.Context, req *scheduler.AnnounceTaskRequest, opts ...grpc.CallOption) error { +func (sc *client) AnnounceTask(ctx context.Context, req *schedulerv1.AnnounceTaskRequest, opts ...grpc.CallOption) error { client, target, err := sc.getClient(req.TaskId, false) if err != nil { return err diff --git a/pkg/rpc/scheduler/client/mocks/client_mock.go b/pkg/rpc/scheduler/client/mocks/client_mock.go index 5307f6690..541eb6b15 100644 --- a/pkg/rpc/scheduler/client/mocks/client_mock.go +++ b/pkg/rpc/scheduler/client/mocks/client_mock.go @@ -8,8 +8,8 @@ import ( context "context" reflect "reflect" + v1 "d7y.io/api/pkg/apis/scheduler/v1" dfnet "d7y.io/dragonfly/v2/pkg/dfnet" - scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" gomock "github.com/golang/mock/gomock" grpc "google.golang.org/grpc" ) @@ -38,7 +38,7 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder { } // AnnounceTask mocks base method. -func (m *MockClient) AnnounceTask(arg0 context.Context, arg1 *scheduler.AnnounceTaskRequest, arg2 ...grpc.CallOption) error { +func (m *MockClient) AnnounceTask(arg0 context.Context, arg1 *v1.AnnounceTaskRequest, arg2 ...grpc.CallOption) error { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1} for _, a := range arg2 { @@ -85,7 +85,7 @@ func (mr *MockClientMockRecorder) GetState() *gomock.Call { } // LeaveTask mocks base method. -func (m *MockClient) LeaveTask(arg0 context.Context, arg1 *scheduler.PeerTarget, arg2 ...grpc.CallOption) error { +func (m *MockClient) LeaveTask(arg0 context.Context, arg1 *v1.PeerTarget, arg2 ...grpc.CallOption) error { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1} for _, a := range arg2 { @@ -104,14 +104,14 @@ func (mr *MockClientMockRecorder) LeaveTask(arg0, arg1 interface{}, arg2 ...inte } // RegisterPeerTask mocks base method. -func (m *MockClient) RegisterPeerTask(arg0 context.Context, arg1 *scheduler.PeerTaskRequest, arg2 ...grpc.CallOption) (*scheduler.RegisterResult, error) { +func (m *MockClient) RegisterPeerTask(arg0 context.Context, arg1 *v1.PeerTaskRequest, arg2 ...grpc.CallOption) (*v1.RegisterResult, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "RegisterPeerTask", varargs...) - ret0, _ := ret[0].(*scheduler.RegisterResult) + ret0, _ := ret[0].(*v1.RegisterResult) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -124,7 +124,7 @@ func (mr *MockClientMockRecorder) RegisterPeerTask(arg0, arg1 interface{}, arg2 } // ReportPeerResult mocks base method. -func (m *MockClient) ReportPeerResult(arg0 context.Context, arg1 *scheduler.PeerResult, arg2 ...grpc.CallOption) error { +func (m *MockClient) ReportPeerResult(arg0 context.Context, arg1 *v1.PeerResult, arg2 ...grpc.CallOption) error { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1} for _, a := range arg2 { @@ -143,14 +143,14 @@ func (mr *MockClientMockRecorder) ReportPeerResult(arg0, arg1 interface{}, arg2 } // ReportPieceResult mocks base method. -func (m *MockClient) ReportPieceResult(arg0 context.Context, arg1 *scheduler.PeerTaskRequest, arg2 ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) { +func (m *MockClient) ReportPieceResult(arg0 context.Context, arg1 *v1.PeerTaskRequest, arg2 ...grpc.CallOption) (v1.Scheduler_ReportPieceResultClient, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "ReportPieceResult", varargs...) - ret0, _ := ret[0].(scheduler.Scheduler_ReportPieceResultClient) + ret0, _ := ret[0].(v1.Scheduler_ReportPieceResultClient) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -163,14 +163,14 @@ func (mr *MockClientMockRecorder) ReportPieceResult(arg0, arg1 interface{}, arg2 } // StatTask mocks base method. -func (m *MockClient) StatTask(arg0 context.Context, arg1 *scheduler.StatTaskRequest, arg2 ...grpc.CallOption) (*scheduler.Task, error) { +func (m *MockClient) StatTask(arg0 context.Context, arg1 *v1.StatTaskRequest, arg2 ...grpc.CallOption) (*v1.Task, error) { m.ctrl.T.Helper() varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "StatTask", varargs...) - ret0, _ := ret[0].(*scheduler.Task) + ret0, _ := ret[0].(*v1.Task) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/pkg/rpc/scheduler/mocks/scheduler_mock.go b/pkg/rpc/scheduler/mocks/scheduler_mock.go deleted file mode 100644 index 1b89941d8..000000000 --- a/pkg/rpc/scheduler/mocks/scheduler_mock.go +++ /dev/null @@ -1,647 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: scheduler/scheduler.pb.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" - gomock "github.com/golang/mock/gomock" - grpc "google.golang.org/grpc" - metadata "google.golang.org/grpc/metadata" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// MockisRegisterResult_DirectPiece is a mock of isRegisterResult_DirectPiece interface. -type MockisRegisterResult_DirectPiece struct { - ctrl *gomock.Controller - recorder *MockisRegisterResult_DirectPieceMockRecorder -} - -// MockisRegisterResult_DirectPieceMockRecorder is the mock recorder for MockisRegisterResult_DirectPiece. -type MockisRegisterResult_DirectPieceMockRecorder struct { - mock *MockisRegisterResult_DirectPiece -} - -// NewMockisRegisterResult_DirectPiece creates a new mock instance. -func NewMockisRegisterResult_DirectPiece(ctrl *gomock.Controller) *MockisRegisterResult_DirectPiece { - mock := &MockisRegisterResult_DirectPiece{ctrl: ctrl} - mock.recorder = &MockisRegisterResult_DirectPieceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockisRegisterResult_DirectPiece) EXPECT() *MockisRegisterResult_DirectPieceMockRecorder { - return m.recorder -} - -// isRegisterResult_DirectPiece mocks base method. -func (m *MockisRegisterResult_DirectPiece) isRegisterResult_DirectPiece() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "isRegisterResult_DirectPiece") -} - -// isRegisterResult_DirectPiece indicates an expected call of isRegisterResult_DirectPiece. -func (mr *MockisRegisterResult_DirectPieceMockRecorder) isRegisterResult_DirectPiece() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isRegisterResult_DirectPiece", reflect.TypeOf((*MockisRegisterResult_DirectPiece)(nil).isRegisterResult_DirectPiece)) -} - -// MockisPeerPacket_ErrorDetail is a mock of isPeerPacket_ErrorDetail interface. -type MockisPeerPacket_ErrorDetail struct { - ctrl *gomock.Controller - recorder *MockisPeerPacket_ErrorDetailMockRecorder -} - -// MockisPeerPacket_ErrorDetailMockRecorder is the mock recorder for MockisPeerPacket_ErrorDetail. -type MockisPeerPacket_ErrorDetailMockRecorder struct { - mock *MockisPeerPacket_ErrorDetail -} - -// NewMockisPeerPacket_ErrorDetail creates a new mock instance. -func NewMockisPeerPacket_ErrorDetail(ctrl *gomock.Controller) *MockisPeerPacket_ErrorDetail { - mock := &MockisPeerPacket_ErrorDetail{ctrl: ctrl} - mock.recorder = &MockisPeerPacket_ErrorDetailMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockisPeerPacket_ErrorDetail) EXPECT() *MockisPeerPacket_ErrorDetailMockRecorder { - return m.recorder -} - -// isPeerPacket_ErrorDetail mocks base method. -func (m *MockisPeerPacket_ErrorDetail) isPeerPacket_ErrorDetail() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "isPeerPacket_ErrorDetail") -} - -// isPeerPacket_ErrorDetail indicates an expected call of isPeerPacket_ErrorDetail. -func (mr *MockisPeerPacket_ErrorDetailMockRecorder) isPeerPacket_ErrorDetail() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isPeerPacket_ErrorDetail", reflect.TypeOf((*MockisPeerPacket_ErrorDetail)(nil).isPeerPacket_ErrorDetail)) -} - -// MockisPeerResult_ErrorDetail is a mock of isPeerResult_ErrorDetail interface. -type MockisPeerResult_ErrorDetail struct { - ctrl *gomock.Controller - recorder *MockisPeerResult_ErrorDetailMockRecorder -} - -// MockisPeerResult_ErrorDetailMockRecorder is the mock recorder for MockisPeerResult_ErrorDetail. -type MockisPeerResult_ErrorDetailMockRecorder struct { - mock *MockisPeerResult_ErrorDetail -} - -// NewMockisPeerResult_ErrorDetail creates a new mock instance. -func NewMockisPeerResult_ErrorDetail(ctrl *gomock.Controller) *MockisPeerResult_ErrorDetail { - mock := &MockisPeerResult_ErrorDetail{ctrl: ctrl} - mock.recorder = &MockisPeerResult_ErrorDetailMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockisPeerResult_ErrorDetail) EXPECT() *MockisPeerResult_ErrorDetailMockRecorder { - return m.recorder -} - -// isPeerResult_ErrorDetail mocks base method. -func (m *MockisPeerResult_ErrorDetail) isPeerResult_ErrorDetail() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "isPeerResult_ErrorDetail") -} - -// isPeerResult_ErrorDetail indicates an expected call of isPeerResult_ErrorDetail. -func (mr *MockisPeerResult_ErrorDetailMockRecorder) isPeerResult_ErrorDetail() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isPeerResult_ErrorDetail", reflect.TypeOf((*MockisPeerResult_ErrorDetail)(nil).isPeerResult_ErrorDetail)) -} - -// MockSchedulerClient is a mock of SchedulerClient interface. -type MockSchedulerClient struct { - ctrl *gomock.Controller - recorder *MockSchedulerClientMockRecorder -} - -// MockSchedulerClientMockRecorder is the mock recorder for MockSchedulerClient. -type MockSchedulerClientMockRecorder struct { - mock *MockSchedulerClient -} - -// NewMockSchedulerClient creates a new mock instance. -func NewMockSchedulerClient(ctrl *gomock.Controller) *MockSchedulerClient { - mock := &MockSchedulerClient{ctrl: ctrl} - mock.recorder = &MockSchedulerClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSchedulerClient) EXPECT() *MockSchedulerClientMockRecorder { - return m.recorder -} - -// AnnounceTask mocks base method. -func (m *MockSchedulerClient) AnnounceTask(ctx context.Context, in *scheduler.AnnounceTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "AnnounceTask", varargs...) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AnnounceTask indicates an expected call of AnnounceTask. -func (mr *MockSchedulerClientMockRecorder) AnnounceTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceTask", reflect.TypeOf((*MockSchedulerClient)(nil).AnnounceTask), varargs...) -} - -// LeaveTask mocks base method. -func (m *MockSchedulerClient) LeaveTask(ctx context.Context, in *scheduler.PeerTarget, opts ...grpc.CallOption) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "LeaveTask", varargs...) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// LeaveTask indicates an expected call of LeaveTask. -func (mr *MockSchedulerClientMockRecorder) LeaveTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveTask", reflect.TypeOf((*MockSchedulerClient)(nil).LeaveTask), varargs...) -} - -// RegisterPeerTask mocks base method. -func (m *MockSchedulerClient) RegisterPeerTask(ctx context.Context, in *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (*scheduler.RegisterResult, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "RegisterPeerTask", varargs...) - ret0, _ := ret[0].(*scheduler.RegisterResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RegisterPeerTask indicates an expected call of RegisterPeerTask. -func (mr *MockSchedulerClientMockRecorder) RegisterPeerTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterPeerTask", reflect.TypeOf((*MockSchedulerClient)(nil).RegisterPeerTask), varargs...) -} - -// ReportPeerResult mocks base method. -func (m *MockSchedulerClient) ReportPeerResult(ctx context.Context, in *scheduler.PeerResult, opts ...grpc.CallOption) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ReportPeerResult", varargs...) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReportPeerResult indicates an expected call of ReportPeerResult. -func (mr *MockSchedulerClientMockRecorder) ReportPeerResult(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeerResult", reflect.TypeOf((*MockSchedulerClient)(nil).ReportPeerResult), varargs...) -} - -// ReportPieceResult mocks base method. -func (m *MockSchedulerClient) ReportPieceResult(ctx context.Context, opts ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ReportPieceResult", varargs...) - ret0, _ := ret[0].(scheduler.Scheduler_ReportPieceResultClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReportPieceResult indicates an expected call of ReportPieceResult. -func (mr *MockSchedulerClientMockRecorder) ReportPieceResult(ctx interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPieceResult", reflect.TypeOf((*MockSchedulerClient)(nil).ReportPieceResult), varargs...) -} - -// StatTask mocks base method. -func (m *MockSchedulerClient) StatTask(ctx context.Context, in *scheduler.StatTaskRequest, opts ...grpc.CallOption) (*scheduler.Task, error) { - m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} - for _, a := range opts { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "StatTask", varargs...) - ret0, _ := ret[0].(*scheduler.Task) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StatTask indicates an expected call of StatTask. -func (mr *MockSchedulerClientMockRecorder) StatTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockSchedulerClient)(nil).StatTask), varargs...) -} - -// MockScheduler_ReportPieceResultClient is a mock of Scheduler_ReportPieceResultClient interface. -type MockScheduler_ReportPieceResultClient struct { - ctrl *gomock.Controller - recorder *MockScheduler_ReportPieceResultClientMockRecorder -} - -// MockScheduler_ReportPieceResultClientMockRecorder is the mock recorder for MockScheduler_ReportPieceResultClient. -type MockScheduler_ReportPieceResultClientMockRecorder struct { - mock *MockScheduler_ReportPieceResultClient -} - -// NewMockScheduler_ReportPieceResultClient creates a new mock instance. -func NewMockScheduler_ReportPieceResultClient(ctrl *gomock.Controller) *MockScheduler_ReportPieceResultClient { - mock := &MockScheduler_ReportPieceResultClient{ctrl: ctrl} - mock.recorder = &MockScheduler_ReportPieceResultClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockScheduler_ReportPieceResultClient) EXPECT() *MockScheduler_ReportPieceResultClientMockRecorder { - return m.recorder -} - -// CloseSend mocks base method. -func (m *MockScheduler_ReportPieceResultClient) CloseSend() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseSend indicates an expected call of CloseSend. -func (mr *MockScheduler_ReportPieceResultClientMockRecorder) CloseSend() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).CloseSend)) -} - -// Context mocks base method. -func (m *MockScheduler_ReportPieceResultClient) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Context)) -} - -// Header mocks base method. -func (m *MockScheduler_ReportPieceResultClient) Header() (metadata.MD, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Header indicates an expected call of Header. -func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Header() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Header)) -} - -// Recv mocks base method. -func (m *MockScheduler_ReportPieceResultClient) Recv() (*scheduler.PeerPacket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*scheduler.PeerPacket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv. -func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Recv)) -} - -// RecvMsg mocks base method. -func (m_2 *MockScheduler_ReportPieceResultClient) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockScheduler_ReportPieceResultClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).RecvMsg), m) -} - -// Send mocks base method. -func (m *MockScheduler_ReportPieceResultClient) Send(arg0 *scheduler.PieceResult) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send. -func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Send), arg0) -} - -// SendMsg mocks base method. -func (m_2 *MockScheduler_ReportPieceResultClient) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockScheduler_ReportPieceResultClientMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).SendMsg), m) -} - -// Trailer mocks base method. -func (m *MockScheduler_ReportPieceResultClient) Trailer() metadata.MD { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -// Trailer indicates an expected call of Trailer. -func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Trailer() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Trailer)) -} - -// MockSchedulerServer is a mock of SchedulerServer interface. -type MockSchedulerServer struct { - ctrl *gomock.Controller - recorder *MockSchedulerServerMockRecorder -} - -// MockSchedulerServerMockRecorder is the mock recorder for MockSchedulerServer. -type MockSchedulerServerMockRecorder struct { - mock *MockSchedulerServer -} - -// NewMockSchedulerServer creates a new mock instance. -func NewMockSchedulerServer(ctrl *gomock.Controller) *MockSchedulerServer { - mock := &MockSchedulerServer{ctrl: ctrl} - mock.recorder = &MockSchedulerServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSchedulerServer) EXPECT() *MockSchedulerServerMockRecorder { - return m.recorder -} - -// AnnounceTask mocks base method. -func (m *MockSchedulerServer) AnnounceTask(arg0 context.Context, arg1 *scheduler.AnnounceTaskRequest) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AnnounceTask", arg0, arg1) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AnnounceTask indicates an expected call of AnnounceTask. -func (mr *MockSchedulerServerMockRecorder) AnnounceTask(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceTask", reflect.TypeOf((*MockSchedulerServer)(nil).AnnounceTask), arg0, arg1) -} - -// LeaveTask mocks base method. -func (m *MockSchedulerServer) LeaveTask(arg0 context.Context, arg1 *scheduler.PeerTarget) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LeaveTask", arg0, arg1) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// LeaveTask indicates an expected call of LeaveTask. -func (mr *MockSchedulerServerMockRecorder) LeaveTask(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveTask", reflect.TypeOf((*MockSchedulerServer)(nil).LeaveTask), arg0, arg1) -} - -// RegisterPeerTask mocks base method. -func (m *MockSchedulerServer) RegisterPeerTask(arg0 context.Context, arg1 *scheduler.PeerTaskRequest) (*scheduler.RegisterResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterPeerTask", arg0, arg1) - ret0, _ := ret[0].(*scheduler.RegisterResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RegisterPeerTask indicates an expected call of RegisterPeerTask. -func (mr *MockSchedulerServerMockRecorder) RegisterPeerTask(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterPeerTask", reflect.TypeOf((*MockSchedulerServer)(nil).RegisterPeerTask), arg0, arg1) -} - -// ReportPeerResult mocks base method. -func (m *MockSchedulerServer) ReportPeerResult(arg0 context.Context, arg1 *scheduler.PeerResult) (*emptypb.Empty, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReportPeerResult", arg0, arg1) - ret0, _ := ret[0].(*emptypb.Empty) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReportPeerResult indicates an expected call of ReportPeerResult. -func (mr *MockSchedulerServerMockRecorder) ReportPeerResult(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeerResult", reflect.TypeOf((*MockSchedulerServer)(nil).ReportPeerResult), arg0, arg1) -} - -// ReportPieceResult mocks base method. -func (m *MockSchedulerServer) ReportPieceResult(arg0 scheduler.Scheduler_ReportPieceResultServer) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReportPieceResult", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// ReportPieceResult indicates an expected call of ReportPieceResult. -func (mr *MockSchedulerServerMockRecorder) ReportPieceResult(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPieceResult", reflect.TypeOf((*MockSchedulerServer)(nil).ReportPieceResult), arg0) -} - -// StatTask mocks base method. -func (m *MockSchedulerServer) StatTask(arg0 context.Context, arg1 *scheduler.StatTaskRequest) (*scheduler.Task, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StatTask", arg0, arg1) - ret0, _ := ret[0].(*scheduler.Task) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// StatTask indicates an expected call of StatTask. -func (mr *MockSchedulerServerMockRecorder) StatTask(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockSchedulerServer)(nil).StatTask), arg0, arg1) -} - -// MockScheduler_ReportPieceResultServer is a mock of Scheduler_ReportPieceResultServer interface. -type MockScheduler_ReportPieceResultServer struct { - ctrl *gomock.Controller - recorder *MockScheduler_ReportPieceResultServerMockRecorder -} - -// MockScheduler_ReportPieceResultServerMockRecorder is the mock recorder for MockScheduler_ReportPieceResultServer. -type MockScheduler_ReportPieceResultServerMockRecorder struct { - mock *MockScheduler_ReportPieceResultServer -} - -// NewMockScheduler_ReportPieceResultServer creates a new mock instance. -func NewMockScheduler_ReportPieceResultServer(ctrl *gomock.Controller) *MockScheduler_ReportPieceResultServer { - mock := &MockScheduler_ReportPieceResultServer{ctrl: ctrl} - mock.recorder = &MockScheduler_ReportPieceResultServerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockScheduler_ReportPieceResultServer) EXPECT() *MockScheduler_ReportPieceResultServerMockRecorder { - return m.recorder -} - -// Context mocks base method. -func (m *MockScheduler_ReportPieceResultServer) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context. -func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).Context)) -} - -// Recv mocks base method. -func (m *MockScheduler_ReportPieceResultServer) Recv() (*scheduler.PieceResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*scheduler.PieceResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Recv indicates an expected call of Recv. -func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Recv() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).Recv)) -} - -// RecvMsg mocks base method. -func (m_2 *MockScheduler_ReportPieceResultServer) RecvMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "RecvMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// RecvMsg indicates an expected call of RecvMsg. -func (mr *MockScheduler_ReportPieceResultServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).RecvMsg), m) -} - -// Send mocks base method. -func (m *MockScheduler_ReportPieceResultServer) Send(arg0 *scheduler.PeerPacket) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Send", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Send indicates an expected call of Send. -func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Send(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).Send), arg0) -} - -// SendHeader mocks base method. -func (m *MockScheduler_ReportPieceResultServer) SendHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendHeader indicates an expected call of SendHeader. -func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SendHeader), arg0) -} - -// SendMsg mocks base method. -func (m_2 *MockScheduler_ReportPieceResultServer) SendMsg(m interface{}) error { - m_2.ctrl.T.Helper() - ret := m_2.ctrl.Call(m_2, "SendMsg", m) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendMsg indicates an expected call of SendMsg. -func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SendMsg(m interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SendMsg), m) -} - -// SetHeader mocks base method. -func (m *MockScheduler_ReportPieceResultServer) SetHeader(arg0 metadata.MD) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetHeader", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetHeader indicates an expected call of SetHeader. -func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SetHeader), arg0) -} - -// SetTrailer mocks base method. -func (m *MockScheduler_ReportPieceResultServer) SetTrailer(arg0 metadata.MD) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetTrailer", arg0) -} - -// SetTrailer indicates an expected call of SetTrailer. -func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SetTrailer), arg0) -} diff --git a/pkg/rpc/scheduler/scheduler.pb.go b/pkg/rpc/scheduler/scheduler.pb.go deleted file mode 100644 index 4e846cb8f..000000000 --- a/pkg/rpc/scheduler/scheduler.pb.go +++ /dev/null @@ -1,2094 +0,0 @@ -// -// Copyright 2020 The Dragonfly Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 -// source: pkg/rpc/scheduler/scheduler.proto - -package scheduler - -import ( - context "context" - base "d7y.io/dragonfly/v2/pkg/rpc/base" - errordetails "d7y.io/dragonfly/v2/pkg/rpc/errordetails" - _ "github.com/envoyproxy/protoc-gen-validate/validate" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// PeerTaskRequest represents request of RegisterPeerTask. -type PeerTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Download url. - Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` - // URL meta info. - UrlMeta *base.UrlMeta `protobuf:"bytes,2,opt,name=url_meta,json=urlMeta,proto3" json:"url_meta,omitempty"` - // Peer id and it must be global uniqueness. - PeerId string `protobuf:"bytes,3,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` - // Peer host info. - PeerHost *PeerHost `protobuf:"bytes,4,opt,name=peer_host,json=peerHost,proto3" json:"peer_host,omitempty"` - // Peer host load. - HostLoad *base.HostLoad `protobuf:"bytes,5,opt,name=host_load,json=hostLoad,proto3" json:"host_load,omitempty"` - // Whether this request is caused by migration. - IsMigrating bool `protobuf:"varint,6,opt,name=is_migrating,json=isMigrating,proto3" json:"is_migrating,omitempty"` - // Pattern includes p2p, seed-peer and source. - Pattern base.Pattern `protobuf:"varint,7,opt,name=pattern,proto3,enum=base.Pattern" json:"pattern,omitempty"` - // Task id. - TaskId string `protobuf:"bytes,8,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` -} - -func (x *PeerTaskRequest) Reset() { - *x = PeerTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerTaskRequest) ProtoMessage() {} - -func (x *PeerTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerTaskRequest.ProtoReflect.Descriptor instead. -func (*PeerTaskRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{0} -} - -func (x *PeerTaskRequest) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *PeerTaskRequest) GetUrlMeta() *base.UrlMeta { - if x != nil { - return x.UrlMeta - } - return nil -} - -func (x *PeerTaskRequest) GetPeerId() string { - if x != nil { - return x.PeerId - } - return "" -} - -func (x *PeerTaskRequest) GetPeerHost() *PeerHost { - if x != nil { - return x.PeerHost - } - return nil -} - -func (x *PeerTaskRequest) GetHostLoad() *base.HostLoad { - if x != nil { - return x.HostLoad - } - return nil -} - -func (x *PeerTaskRequest) GetIsMigrating() bool { - if x != nil { - return x.IsMigrating - } - return false -} - -func (x *PeerTaskRequest) GetPattern() base.Pattern { - if x != nil { - return x.Pattern - } - return base.Pattern(0) -} - -func (x *PeerTaskRequest) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -// RegisterResult represents response of RegisterPeerTask. -type RegisterResult struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Task type. - TaskType base.TaskType `protobuf:"varint,1,opt,name=task_type,json=taskType,proto3,enum=base.TaskType" json:"task_type,omitempty"` - // Task id - TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - // File size scope. - SizeScope base.SizeScope `protobuf:"varint,3,opt,name=size_scope,json=sizeScope,proto3,enum=base.SizeScope" json:"size_scope,omitempty"` - // Download the only piece directly for small or tiny file. - // - // Types that are assignable to DirectPiece: - // *RegisterResult_SinglePiece - // *RegisterResult_PieceContent - DirectPiece isRegisterResult_DirectPiece `protobuf_oneof:"direct_piece"` - // Task extend attribute, - // only direct_piece will carry extend attribute. - ExtendAttribute *base.ExtendAttribute `protobuf:"bytes,6,opt,name=extend_attribute,json=extendAttribute,proto3" json:"extend_attribute,omitempty"` -} - -func (x *RegisterResult) Reset() { - *x = RegisterResult{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RegisterResult) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RegisterResult) ProtoMessage() {} - -func (x *RegisterResult) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RegisterResult.ProtoReflect.Descriptor instead. -func (*RegisterResult) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{1} -} - -func (x *RegisterResult) GetTaskType() base.TaskType { - if x != nil { - return x.TaskType - } - return base.TaskType(0) -} - -func (x *RegisterResult) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *RegisterResult) GetSizeScope() base.SizeScope { - if x != nil { - return x.SizeScope - } - return base.SizeScope(0) -} - -func (m *RegisterResult) GetDirectPiece() isRegisterResult_DirectPiece { - if m != nil { - return m.DirectPiece - } - return nil -} - -func (x *RegisterResult) GetSinglePiece() *SinglePiece { - if x, ok := x.GetDirectPiece().(*RegisterResult_SinglePiece); ok { - return x.SinglePiece - } - return nil -} - -func (x *RegisterResult) GetPieceContent() []byte { - if x, ok := x.GetDirectPiece().(*RegisterResult_PieceContent); ok { - return x.PieceContent - } - return nil -} - -func (x *RegisterResult) GetExtendAttribute() *base.ExtendAttribute { - if x != nil { - return x.ExtendAttribute - } - return nil -} - -type isRegisterResult_DirectPiece interface { - isRegisterResult_DirectPiece() -} - -type RegisterResult_SinglePiece struct { - // Return single piece info when size scope is small. - SinglePiece *SinglePiece `protobuf:"bytes,4,opt,name=single_piece,json=singlePiece,proto3,oneof"` -} - -type RegisterResult_PieceContent struct { - // Return task content when size scope is tiny. - PieceContent []byte `protobuf:"bytes,5,opt,name=piece_content,json=pieceContent,proto3,oneof"` -} - -func (*RegisterResult_SinglePiece) isRegisterResult_DirectPiece() {} - -func (*RegisterResult_PieceContent) isRegisterResult_DirectPiece() {} - -// SinglePiece represents information of single piece. -type SinglePiece struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Destination peer id. - DstPid string `protobuf:"bytes,1,opt,name=dst_pid,json=dstPid,proto3" json:"dst_pid,omitempty"` - // Destination download address. - DstAddr string `protobuf:"bytes,2,opt,name=dst_addr,json=dstAddr,proto3" json:"dst_addr,omitempty"` - // Piece info. - PieceInfo *base.PieceInfo `protobuf:"bytes,3,opt,name=piece_info,json=pieceInfo,proto3" json:"piece_info,omitempty"` -} - -func (x *SinglePiece) Reset() { - *x = SinglePiece{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SinglePiece) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SinglePiece) ProtoMessage() {} - -func (x *SinglePiece) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SinglePiece.ProtoReflect.Descriptor instead. -func (*SinglePiece) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{2} -} - -func (x *SinglePiece) GetDstPid() string { - if x != nil { - return x.DstPid - } - return "" -} - -func (x *SinglePiece) GetDstAddr() string { - if x != nil { - return x.DstAddr - } - return "" -} - -func (x *SinglePiece) GetPieceInfo() *base.PieceInfo { - if x != nil { - return x.PieceInfo - } - return nil -} - -// PeerHost represents information of peer host. -type PeerHost struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Peer host id. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // peer host ip - Ip string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` - // Port of grpc service. - RpcPort int32 `protobuf:"varint,3,opt,name=rpc_port,json=rpcPort,proto3" json:"rpc_port,omitempty"` - // Port of download server. - DownPort int32 `protobuf:"varint,4,opt,name=down_port,json=downPort,proto3" json:"down_port,omitempty"` - // Peer hostname. - HostName string `protobuf:"bytes,5,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // Security domain for network. - SecurityDomain string `protobuf:"bytes,6,opt,name=security_domain,json=securityDomain,proto3" json:"security_domain,omitempty"` - // Location path(area|country|province|city|...). - Location string `protobuf:"bytes,7,opt,name=location,proto3" json:"location,omitempty"` - // IDC where the peer host is located - Idc string `protobuf:"bytes,8,opt,name=idc,proto3" json:"idc,omitempty"` - // Network topology(switch|router|...). - NetTopology string `protobuf:"bytes,9,opt,name=net_topology,json=netTopology,proto3" json:"net_topology,omitempty"` -} - -func (x *PeerHost) Reset() { - *x = PeerHost{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerHost) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerHost) ProtoMessage() {} - -func (x *PeerHost) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerHost.ProtoReflect.Descriptor instead. -func (*PeerHost) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{3} -} - -func (x *PeerHost) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *PeerHost) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -func (x *PeerHost) GetRpcPort() int32 { - if x != nil { - return x.RpcPort - } - return 0 -} - -func (x *PeerHost) GetDownPort() int32 { - if x != nil { - return x.DownPort - } - return 0 -} - -func (x *PeerHost) GetHostName() string { - if x != nil { - return x.HostName - } - return "" -} - -func (x *PeerHost) GetSecurityDomain() string { - if x != nil { - return x.SecurityDomain - } - return "" -} - -func (x *PeerHost) GetLocation() string { - if x != nil { - return x.Location - } - return "" -} - -func (x *PeerHost) GetIdc() string { - if x != nil { - return x.Idc - } - return "" -} - -func (x *PeerHost) GetNetTopology() string { - if x != nil { - return x.NetTopology - } - return "" -} - -// PieceResult represents request of ReportPieceResult. -type PieceResult struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Task id. - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - // Source peer id. - SrcPid string `protobuf:"bytes,2,opt,name=src_pid,json=srcPid,proto3" json:"src_pid,omitempty"` - // Destination peer id. - DstPid string `protobuf:"bytes,3,opt,name=dst_pid,json=dstPid,proto3" json:"dst_pid,omitempty"` - // Piece info. - PieceInfo *base.PieceInfo `protobuf:"bytes,4,opt,name=piece_info,json=pieceInfo,proto3" json:"piece_info,omitempty"` - // Begin time of the piece downloading. - BeginTime uint64 `protobuf:"varint,5,opt,name=begin_time,json=beginTime,proto3" json:"begin_time,omitempty"` - // End time of the piece downloading. - EndTime uint64 `protobuf:"varint,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - // Whether the piece downloading is successfully. - Success bool `protobuf:"varint,7,opt,name=success,proto3" json:"success,omitempty"` - // Result code. - Code base.Code `protobuf:"varint,8,opt,name=code,proto3,enum=base.Code" json:"code,omitempty"` - // Peer host load. - HostLoad *base.HostLoad `protobuf:"bytes,9,opt,name=host_load,json=hostLoad,proto3" json:"host_load,omitempty"` - // Finished count. - FinishedCount int32 `protobuf:"varint,10,opt,name=finished_count,json=finishedCount,proto3" json:"finished_count,omitempty"` - // Task extend attribute, - // only first success back source piece will carry extend attribute. - ExtendAttribute *base.ExtendAttribute `protobuf:"bytes,11,opt,name=extend_attribute,json=extendAttribute,proto3" json:"extend_attribute,omitempty"` -} - -func (x *PieceResult) Reset() { - *x = PieceResult{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PieceResult) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PieceResult) ProtoMessage() {} - -func (x *PieceResult) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PieceResult.ProtoReflect.Descriptor instead. -func (*PieceResult) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{4} -} - -func (x *PieceResult) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *PieceResult) GetSrcPid() string { - if x != nil { - return x.SrcPid - } - return "" -} - -func (x *PieceResult) GetDstPid() string { - if x != nil { - return x.DstPid - } - return "" -} - -func (x *PieceResult) GetPieceInfo() *base.PieceInfo { - if x != nil { - return x.PieceInfo - } - return nil -} - -func (x *PieceResult) GetBeginTime() uint64 { - if x != nil { - return x.BeginTime - } - return 0 -} - -func (x *PieceResult) GetEndTime() uint64 { - if x != nil { - return x.EndTime - } - return 0 -} - -func (x *PieceResult) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -func (x *PieceResult) GetCode() base.Code { - if x != nil { - return x.Code - } - return base.Code(0) -} - -func (x *PieceResult) GetHostLoad() *base.HostLoad { - if x != nil { - return x.HostLoad - } - return nil -} - -func (x *PieceResult) GetFinishedCount() int32 { - if x != nil { - return x.FinishedCount - } - return 0 -} - -func (x *PieceResult) GetExtendAttribute() *base.ExtendAttribute { - if x != nil { - return x.ExtendAttribute - } - return nil -} - -// PeerPacket represents response of ReportPieceResult. -type PeerPacket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Task id. - TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - // Source peer id. - SrcPid string `protobuf:"bytes,3,opt,name=src_pid,json=srcPid,proto3" json:"src_pid,omitempty"` - // Concurrent downloading count from main peer. - ParallelCount int32 `protobuf:"varint,4,opt,name=parallel_count,json=parallelCount,proto3" json:"parallel_count,omitempty"` - // Main peer. - MainPeer *PeerPacket_DestPeer `protobuf:"bytes,5,opt,name=main_peer,json=mainPeer,proto3" json:"main_peer,omitempty"` - // Candidate peers. - CandidatePeers []*PeerPacket_DestPeer `protobuf:"bytes,6,rep,name=candidate_peers,json=candidatePeers,proto3" json:"candidate_peers,omitempty"` - // Result code. - Code base.Code `protobuf:"varint,7,opt,name=code,proto3,enum=base.Code" json:"code,omitempty"` - // Error detail. - // - // Types that are assignable to ErrorDetail: - // *PeerPacket_SourceError - ErrorDetail isPeerPacket_ErrorDetail `protobuf_oneof:"error_detail"` -} - -func (x *PeerPacket) Reset() { - *x = PeerPacket{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerPacket) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerPacket) ProtoMessage() {} - -func (x *PeerPacket) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerPacket.ProtoReflect.Descriptor instead. -func (*PeerPacket) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{5} -} - -func (x *PeerPacket) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *PeerPacket) GetSrcPid() string { - if x != nil { - return x.SrcPid - } - return "" -} - -func (x *PeerPacket) GetParallelCount() int32 { - if x != nil { - return x.ParallelCount - } - return 0 -} - -func (x *PeerPacket) GetMainPeer() *PeerPacket_DestPeer { - if x != nil { - return x.MainPeer - } - return nil -} - -func (x *PeerPacket) GetCandidatePeers() []*PeerPacket_DestPeer { - if x != nil { - return x.CandidatePeers - } - return nil -} - -func (x *PeerPacket) GetCode() base.Code { - if x != nil { - return x.Code - } - return base.Code(0) -} - -func (m *PeerPacket) GetErrorDetail() isPeerPacket_ErrorDetail { - if m != nil { - return m.ErrorDetail - } - return nil -} - -func (x *PeerPacket) GetSourceError() *errordetails.SourceError { - if x, ok := x.GetErrorDetail().(*PeerPacket_SourceError); ok { - return x.SourceError - } - return nil -} - -type isPeerPacket_ErrorDetail interface { - isPeerPacket_ErrorDetail() -} - -type PeerPacket_SourceError struct { - // Source error. - SourceError *errordetails.SourceError `protobuf:"bytes,8,opt,name=source_error,json=sourceError,proto3,oneof"` -} - -func (*PeerPacket_SourceError) isPeerPacket_ErrorDetail() {} - -// PeerResult represents response of ReportPeerResult. -type PeerResult struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Task id. - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - // Peer id. - PeerId string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` - // Source host ip. - SrcIp string `protobuf:"bytes,3,opt,name=src_ip,json=srcIp,proto3" json:"src_ip,omitempty"` - // Security domain. - SecurityDomain string `protobuf:"bytes,4,opt,name=security_domain,json=securityDomain,proto3" json:"security_domain,omitempty"` - // IDC where the peer host is located - Idc string `protobuf:"bytes,5,opt,name=idc,proto3" json:"idc,omitempty"` - // Download url. - Url string `protobuf:"bytes,6,opt,name=url,proto3" json:"url,omitempty"` - // Total content length. - ContentLength int64 `protobuf:"varint,7,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` - // Total network traffic. - Traffic uint64 `protobuf:"varint,8,opt,name=traffic,proto3" json:"traffic,omitempty"` - // Total cost time. - Cost uint32 `protobuf:"varint,9,opt,name=cost,proto3" json:"cost,omitempty"` - // Whether peer downloading file is successfully. - Success bool `protobuf:"varint,10,opt,name=success,proto3" json:"success,omitempty"` - // Result code. - Code base.Code `protobuf:"varint,11,opt,name=code,proto3,enum=base.Code" json:"code,omitempty"` - // Task total piece count. - TotalPieceCount int32 `protobuf:"varint,12,opt,name=total_piece_count,json=totalPieceCount,proto3" json:"total_piece_count,omitempty"` - // Error detail. - // - // Types that are assignable to ErrorDetail: - // *PeerResult_SourceError - ErrorDetail isPeerResult_ErrorDetail `protobuf_oneof:"error_detail"` -} - -func (x *PeerResult) Reset() { - *x = PeerResult{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerResult) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerResult) ProtoMessage() {} - -func (x *PeerResult) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerResult.ProtoReflect.Descriptor instead. -func (*PeerResult) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{6} -} - -func (x *PeerResult) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *PeerResult) GetPeerId() string { - if x != nil { - return x.PeerId - } - return "" -} - -func (x *PeerResult) GetSrcIp() string { - if x != nil { - return x.SrcIp - } - return "" -} - -func (x *PeerResult) GetSecurityDomain() string { - if x != nil { - return x.SecurityDomain - } - return "" -} - -func (x *PeerResult) GetIdc() string { - if x != nil { - return x.Idc - } - return "" -} - -func (x *PeerResult) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *PeerResult) GetContentLength() int64 { - if x != nil { - return x.ContentLength - } - return 0 -} - -func (x *PeerResult) GetTraffic() uint64 { - if x != nil { - return x.Traffic - } - return 0 -} - -func (x *PeerResult) GetCost() uint32 { - if x != nil { - return x.Cost - } - return 0 -} - -func (x *PeerResult) GetSuccess() bool { - if x != nil { - return x.Success - } - return false -} - -func (x *PeerResult) GetCode() base.Code { - if x != nil { - return x.Code - } - return base.Code(0) -} - -func (x *PeerResult) GetTotalPieceCount() int32 { - if x != nil { - return x.TotalPieceCount - } - return 0 -} - -func (m *PeerResult) GetErrorDetail() isPeerResult_ErrorDetail { - if m != nil { - return m.ErrorDetail - } - return nil -} - -func (x *PeerResult) GetSourceError() *errordetails.SourceError { - if x, ok := x.GetErrorDetail().(*PeerResult_SourceError); ok { - return x.SourceError - } - return nil -} - -type isPeerResult_ErrorDetail interface { - isPeerResult_ErrorDetail() -} - -type PeerResult_SourceError struct { - // Source error. - SourceError *errordetails.SourceError `protobuf:"bytes,13,opt,name=source_error,json=sourceError,proto3,oneof"` -} - -func (*PeerResult_SourceError) isPeerResult_ErrorDetail() {} - -// PeerTarget represents request of LeaveTask. -type PeerTarget struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Task id. - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - // Peer id. - PeerId string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` -} - -func (x *PeerTarget) Reset() { - *x = PeerTarget{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerTarget) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerTarget) ProtoMessage() {} - -func (x *PeerTarget) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerTarget.ProtoReflect.Descriptor instead. -func (*PeerTarget) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{7} -} - -func (x *PeerTarget) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *PeerTarget) GetPeerId() string { - if x != nil { - return x.PeerId - } - return "" -} - -// StatTaskRequest represents request of StatTask. -type StatTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Task id. - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` -} - -func (x *StatTaskRequest) Reset() { - *x = StatTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StatTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StatTaskRequest) ProtoMessage() {} - -func (x *StatTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StatTaskRequest.ProtoReflect.Descriptor instead. -func (*StatTaskRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{8} -} - -func (x *StatTaskRequest) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -// Task represents download task. -type Task struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Task id. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Task type. - Type base.TaskType `protobuf:"varint,2,opt,name=type,proto3,enum=base.TaskType" json:"type,omitempty"` - // Task content length. - ContentLength int64 `protobuf:"varint,3,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` - // Task total piece count. - TotalPieceCount int32 `protobuf:"varint,4,opt,name=total_piece_count,json=totalPieceCount,proto3" json:"total_piece_count,omitempty"` - // Task state. - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` - // Task peer count. - PeerCount int32 `protobuf:"varint,6,opt,name=peer_count,json=peerCount,proto3" json:"peer_count,omitempty"` - // Task contains available peer. - HasAvailablePeer bool `protobuf:"varint,7,opt,name=hasAvailablePeer,proto3" json:"hasAvailablePeer,omitempty"` -} - -func (x *Task) Reset() { - *x = Task{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Task) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Task) ProtoMessage() {} - -func (x *Task) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Task.ProtoReflect.Descriptor instead. -func (*Task) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{9} -} - -func (x *Task) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *Task) GetType() base.TaskType { - if x != nil { - return x.Type - } - return base.TaskType(0) -} - -func (x *Task) GetContentLength() int64 { - if x != nil { - return x.ContentLength - } - return 0 -} - -func (x *Task) GetTotalPieceCount() int32 { - if x != nil { - return x.TotalPieceCount - } - return 0 -} - -func (x *Task) GetState() string { - if x != nil { - return x.State - } - return "" -} - -func (x *Task) GetPeerCount() int32 { - if x != nil { - return x.PeerCount - } - return 0 -} - -func (x *Task) GetHasAvailablePeer() bool { - if x != nil { - return x.HasAvailablePeer - } - return false -} - -// AnnounceTaskRequest represents request of AnnounceTask. -type AnnounceTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Task id. - TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - // Download url. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - // URL meta info. - UrlMeta *base.UrlMeta `protobuf:"bytes,3,opt,name=url_meta,json=urlMeta,proto3" json:"url_meta,omitempty"` - // Peer host info. - PeerHost *PeerHost `protobuf:"bytes,4,opt,name=peer_host,json=peerHost,proto3" json:"peer_host,omitempty"` - // Task piece info. - PiecePacket *base.PiecePacket `protobuf:"bytes,5,opt,name=piece_packet,json=piecePacket,proto3" json:"piece_packet,omitempty"` - // Task type. - TaskType base.TaskType `protobuf:"varint,6,opt,name=task_type,json=taskType,proto3,enum=base.TaskType" json:"task_type,omitempty"` -} - -func (x *AnnounceTaskRequest) Reset() { - *x = AnnounceTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AnnounceTaskRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AnnounceTaskRequest) ProtoMessage() {} - -func (x *AnnounceTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AnnounceTaskRequest.ProtoReflect.Descriptor instead. -func (*AnnounceTaskRequest) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{10} -} - -func (x *AnnounceTaskRequest) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *AnnounceTaskRequest) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *AnnounceTaskRequest) GetUrlMeta() *base.UrlMeta { - if x != nil { - return x.UrlMeta - } - return nil -} - -func (x *AnnounceTaskRequest) GetPeerHost() *PeerHost { - if x != nil { - return x.PeerHost - } - return nil -} - -func (x *AnnounceTaskRequest) GetPiecePacket() *base.PiecePacket { - if x != nil { - return x.PiecePacket - } - return nil -} - -func (x *AnnounceTaskRequest) GetTaskType() base.TaskType { - if x != nil { - return x.TaskType - } - return base.TaskType(0) -} - -type PeerPacket_DestPeer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Destination ip. - Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` - // Port of grpc service. - RpcPort int32 `protobuf:"varint,2,opt,name=rpc_port,json=rpcPort,proto3" json:"rpc_port,omitempty"` - // Destination peer id. - PeerId string `protobuf:"bytes,3,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` -} - -func (x *PeerPacket_DestPeer) Reset() { - *x = PeerPacket_DestPeer{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PeerPacket_DestPeer) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PeerPacket_DestPeer) ProtoMessage() {} - -func (x *PeerPacket_DestPeer) ProtoReflect() protoreflect.Message { - mi := &file_pkg_rpc_scheduler_scheduler_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PeerPacket_DestPeer.ProtoReflect.Descriptor instead. -func (*PeerPacket_DestPeer) Descriptor() ([]byte, []int) { - return file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP(), []int{5, 0} -} - -func (x *PeerPacket_DestPeer) GetIp() string { - if x != nil { - return x.Ip - } - return "" -} - -func (x *PeerPacket_DestPeer) GetRpcPort() int32 { - if x != nil { - return x.RpcPort - } - return 0 -} - -func (x *PeerPacket_DestPeer) GetPeerId() string { - if x != nil { - return x.PeerId - } - return "" -} - -var File_pkg_rpc_scheduler_scheduler_proto protoreflect.FileDescriptor - -var file_pkg_rpc_scheduler_scheduler_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x72, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x1a, 0x17, - 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x62, 0x61, 0x73, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, - 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc7, 0x02, 0x0a, 0x0f, 0x50, 0x65, 0x65, 0x72, - 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x03, 0x75, - 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, - 0x01, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x32, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, - 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x07, 0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x20, 0x0a, 0x07, 0x70, - 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, - 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, - 0x72, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x12, - 0x2b, 0x0a, 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x4c, 0x6f, - 0x61, 0x64, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x12, 0x21, 0x0a, 0x0c, - 0x69, 0x73, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x12, - 0x27, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x52, - 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, - 0x64, 0x22, 0xcf, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x54, - 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, - 0x6b, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x73, 0x63, 0x6f, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x53, - 0x69, 0x7a, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x3b, 0x0a, - 0x0c, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, - 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x69, 0x65, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x73, - 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x69, 0x65, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x70, 0x69, - 0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x12, 0x40, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x61, - 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x69, - 0x65, 0x63, 0x65, 0x22, 0x83, 0x01, 0x0a, 0x0b, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x69, - 0x65, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x64, - 0x73, 0x74, 0x50, 0x69, 0x64, 0x12, 0x22, 0x0a, 0x08, 0x64, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x52, 0x07, 0x64, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x12, 0x2e, 0x0a, 0x0a, 0x70, 0x69, 0x65, - 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, - 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xb0, 0x02, 0x0a, 0x08, 0x50, 0x65, - 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12, 0x27, 0x0a, 0x08, 0x72, 0x70, 0x63, 0x5f, - 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, - 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, 0x08, 0x52, 0x07, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, - 0x74, 0x12, 0x29, 0x0a, 0x09, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, - 0x80, 0x08, 0x52, 0x08, 0x64, 0x6f, 0x77, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x24, 0x0a, 0x09, - 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x68, 0x01, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, - 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x74, - 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x6e, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x22, 0xa4, 0x03, 0x0a, - 0x0b, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x07, - 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, - 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x72, 0x63, 0x50, 0x69, 0x64, - 0x12, 0x17, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x73, 0x74, 0x50, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x70, 0x69, 0x65, - 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, - 0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, - 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, - 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x2b, 0x0a, - 0x09, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x4c, 0x6f, 0x61, 0x64, - 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x69, - 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0d, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x40, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x61, - 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x22, 0xe6, 0x03, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, - 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x73, 0x72, 0x63, 0x5f, 0x70, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, - 0x73, 0x72, 0x63, 0x50, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, - 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x1a, 0x02, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, - 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x09, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, - 0x65, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x63, 0x68, 0x65, - 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, - 0x2e, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x08, 0x6d, 0x61, 0x69, 0x6e, 0x50, - 0x65, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x0f, 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, - 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, - 0x6b, 0x65, 0x74, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x0e, 0x63, 0x61, - 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x0a, 0x04, - 0x63, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, 0x73, - 0x65, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x3e, 0x0a, 0x0c, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, - 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x6e, 0x0a, 0x08, - 0x44, 0x65, 0x73, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x02, 0x69, - 0x70, 0x12, 0x27, 0x0a, 0x08, 0x72, 0x70, 0x63, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x1a, 0x07, 0x10, 0xff, 0xff, 0x03, 0x28, 0x80, - 0x08, 0x52, 0x07, 0x72, 0x70, 0x63, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x42, 0x0e, 0x0a, 0x0c, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0xf6, 0x03, 0x0a, - 0x0a, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, - 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, - 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x1e, 0x0a, 0x06, 0x73, 0x72, 0x63, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x70, 0x01, 0x52, 0x05, 0x73, 0x72, 0x63, 0x49, 0x70, 0x12, - 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x6f, 0x6d, 0x61, - 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x63, 0x12, 0x1a, 0x0a, 0x03, 0x75, 0x72, - 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, 0x01, - 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x37, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x10, - 0xfa, 0x42, 0x0d, 0x22, 0x0b, 0x28, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, - 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, - 0x18, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x07, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x73, - 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x64, - 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x3c, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x05, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x1a, 0x0b, 0x28, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0x01, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x0e, 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x50, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, - 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, - 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x22, 0x33, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x54, - 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x22, 0x9b, 0x02, 0x0a, - 0x04, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x22, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x62, - 0x61, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, - 0x02, 0x28, 0x01, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x12, 0x33, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69, 0x65, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x1a, 0x02, 0x28, 0x01, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, - 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x1a, - 0x02, 0x28, 0x00, 0x52, 0x09, 0x70, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, - 0x0a, 0x10, 0x68, 0x61, 0x73, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x68, 0x61, 0x73, 0x41, 0x76, 0x61, - 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x65, 0x65, 0x72, 0x22, 0xa9, 0x02, 0x0a, 0x13, 0x41, - 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, - 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0x88, 0x01, 0x01, 0xd0, 0x01, 0x01, 0x52, 0x03, - 0x75, 0x72, 0x6c, 0x12, 0x32, 0x0a, 0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, - 0x4d, 0x65, 0x74, 0x61, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, - 0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x30, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, - 0x68, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x52, - 0x08, 0x70, 0x65, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x70, 0x69, 0x65, - 0x63, 0x65, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, - 0x65, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x70, 0x69, - 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2b, 0x0a, 0x09, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x62, - 0x61, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x74, 0x61, - 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x32, 0x9e, 0x03, 0x0a, 0x09, 0x53, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x72, 0x12, 0x49, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, - 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1a, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, - 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x46, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x12, 0x16, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, - 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x15, 0x2e, 0x73, - 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x50, 0x61, 0x63, - 0x6b, 0x65, 0x74, 0x28, 0x01, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x15, 0x2e, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x09, 0x4c, 0x65, - 0x61, 0x76, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x15, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x72, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x37, 0x0a, 0x08, 0x53, 0x74, 0x61, 0x74, 0x54, 0x61, - 0x73, 0x6b, 0x12, 0x1a, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, - 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x12, - 0x46, 0x0a, 0x0c, 0x41, 0x6e, 0x6e, 0x6f, 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, - 0x1e, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, - 0x75, 0x6e, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x27, 0x5a, 0x25, 0x64, 0x37, 0x79, 0x2e, 0x69, - 0x6f, 0x2f, 0x64, 0x72, 0x61, 0x67, 0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, - 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_pkg_rpc_scheduler_scheduler_proto_rawDescOnce sync.Once - file_pkg_rpc_scheduler_scheduler_proto_rawDescData = file_pkg_rpc_scheduler_scheduler_proto_rawDesc -) - -func file_pkg_rpc_scheduler_scheduler_proto_rawDescGZIP() []byte { - file_pkg_rpc_scheduler_scheduler_proto_rawDescOnce.Do(func() { - file_pkg_rpc_scheduler_scheduler_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_rpc_scheduler_scheduler_proto_rawDescData) - }) - return file_pkg_rpc_scheduler_scheduler_proto_rawDescData -} - -var file_pkg_rpc_scheduler_scheduler_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_pkg_rpc_scheduler_scheduler_proto_goTypes = []interface{}{ - (*PeerTaskRequest)(nil), // 0: scheduler.PeerTaskRequest - (*RegisterResult)(nil), // 1: scheduler.RegisterResult - (*SinglePiece)(nil), // 2: scheduler.SinglePiece - (*PeerHost)(nil), // 3: scheduler.PeerHost - (*PieceResult)(nil), // 4: scheduler.PieceResult - (*PeerPacket)(nil), // 5: scheduler.PeerPacket - (*PeerResult)(nil), // 6: scheduler.PeerResult - (*PeerTarget)(nil), // 7: scheduler.PeerTarget - (*StatTaskRequest)(nil), // 8: scheduler.StatTaskRequest - (*Task)(nil), // 9: scheduler.Task - (*AnnounceTaskRequest)(nil), // 10: scheduler.AnnounceTaskRequest - (*PeerPacket_DestPeer)(nil), // 11: scheduler.PeerPacket.DestPeer - (*base.UrlMeta)(nil), // 12: base.UrlMeta - (*base.HostLoad)(nil), // 13: base.HostLoad - (base.Pattern)(0), // 14: base.Pattern - (base.TaskType)(0), // 15: base.TaskType - (base.SizeScope)(0), // 16: base.SizeScope - (*base.ExtendAttribute)(nil), // 17: base.ExtendAttribute - (*base.PieceInfo)(nil), // 18: base.PieceInfo - (base.Code)(0), // 19: base.Code - (*errordetails.SourceError)(nil), // 20: errordetails.SourceError - (*base.PiecePacket)(nil), // 21: base.PiecePacket - (*emptypb.Empty)(nil), // 22: google.protobuf.Empty -} -var file_pkg_rpc_scheduler_scheduler_proto_depIdxs = []int32{ - 12, // 0: scheduler.PeerTaskRequest.url_meta:type_name -> base.UrlMeta - 3, // 1: scheduler.PeerTaskRequest.peer_host:type_name -> scheduler.PeerHost - 13, // 2: scheduler.PeerTaskRequest.host_load:type_name -> base.HostLoad - 14, // 3: scheduler.PeerTaskRequest.pattern:type_name -> base.Pattern - 15, // 4: scheduler.RegisterResult.task_type:type_name -> base.TaskType - 16, // 5: scheduler.RegisterResult.size_scope:type_name -> base.SizeScope - 2, // 6: scheduler.RegisterResult.single_piece:type_name -> scheduler.SinglePiece - 17, // 7: scheduler.RegisterResult.extend_attribute:type_name -> base.ExtendAttribute - 18, // 8: scheduler.SinglePiece.piece_info:type_name -> base.PieceInfo - 18, // 9: scheduler.PieceResult.piece_info:type_name -> base.PieceInfo - 19, // 10: scheduler.PieceResult.code:type_name -> base.Code - 13, // 11: scheduler.PieceResult.host_load:type_name -> base.HostLoad - 17, // 12: scheduler.PieceResult.extend_attribute:type_name -> base.ExtendAttribute - 11, // 13: scheduler.PeerPacket.main_peer:type_name -> scheduler.PeerPacket.DestPeer - 11, // 14: scheduler.PeerPacket.candidate_peers:type_name -> scheduler.PeerPacket.DestPeer - 19, // 15: scheduler.PeerPacket.code:type_name -> base.Code - 20, // 16: scheduler.PeerPacket.source_error:type_name -> errordetails.SourceError - 19, // 17: scheduler.PeerResult.code:type_name -> base.Code - 20, // 18: scheduler.PeerResult.source_error:type_name -> errordetails.SourceError - 15, // 19: scheduler.Task.type:type_name -> base.TaskType - 12, // 20: scheduler.AnnounceTaskRequest.url_meta:type_name -> base.UrlMeta - 3, // 21: scheduler.AnnounceTaskRequest.peer_host:type_name -> scheduler.PeerHost - 21, // 22: scheduler.AnnounceTaskRequest.piece_packet:type_name -> base.PiecePacket - 15, // 23: scheduler.AnnounceTaskRequest.task_type:type_name -> base.TaskType - 0, // 24: scheduler.Scheduler.RegisterPeerTask:input_type -> scheduler.PeerTaskRequest - 4, // 25: scheduler.Scheduler.ReportPieceResult:input_type -> scheduler.PieceResult - 6, // 26: scheduler.Scheduler.ReportPeerResult:input_type -> scheduler.PeerResult - 7, // 27: scheduler.Scheduler.LeaveTask:input_type -> scheduler.PeerTarget - 8, // 28: scheduler.Scheduler.StatTask:input_type -> scheduler.StatTaskRequest - 10, // 29: scheduler.Scheduler.AnnounceTask:input_type -> scheduler.AnnounceTaskRequest - 1, // 30: scheduler.Scheduler.RegisterPeerTask:output_type -> scheduler.RegisterResult - 5, // 31: scheduler.Scheduler.ReportPieceResult:output_type -> scheduler.PeerPacket - 22, // 32: scheduler.Scheduler.ReportPeerResult:output_type -> google.protobuf.Empty - 22, // 33: scheduler.Scheduler.LeaveTask:output_type -> google.protobuf.Empty - 9, // 34: scheduler.Scheduler.StatTask:output_type -> scheduler.Task - 22, // 35: scheduler.Scheduler.AnnounceTask:output_type -> google.protobuf.Empty - 30, // [30:36] is the sub-list for method output_type - 24, // [24:30] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name -} - -func init() { file_pkg_rpc_scheduler_scheduler_proto_init() } -func file_pkg_rpc_scheduler_scheduler_proto_init() { - if File_pkg_rpc_scheduler_scheduler_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RegisterResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SinglePiece); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerHost); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PieceResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerPacket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerTarget); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StatTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Task); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AnnounceTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerPacket_DestPeer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[1].OneofWrappers = []interface{}{ - (*RegisterResult_SinglePiece)(nil), - (*RegisterResult_PieceContent)(nil), - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[5].OneofWrappers = []interface{}{ - (*PeerPacket_SourceError)(nil), - } - file_pkg_rpc_scheduler_scheduler_proto_msgTypes[6].OneofWrappers = []interface{}{ - (*PeerResult_SourceError)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_pkg_rpc_scheduler_scheduler_proto_rawDesc, - NumEnums: 0, - NumMessages: 12, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_pkg_rpc_scheduler_scheduler_proto_goTypes, - DependencyIndexes: file_pkg_rpc_scheduler_scheduler_proto_depIdxs, - MessageInfos: file_pkg_rpc_scheduler_scheduler_proto_msgTypes, - }.Build() - File_pkg_rpc_scheduler_scheduler_proto = out.File - file_pkg_rpc_scheduler_scheduler_proto_rawDesc = nil - file_pkg_rpc_scheduler_scheduler_proto_goTypes = nil - file_pkg_rpc_scheduler_scheduler_proto_depIdxs = nil -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// SchedulerClient is the client API for Scheduler service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type SchedulerClient interface { - // RegisterPeerTask registers a peer into task. - RegisterPeerTask(ctx context.Context, in *PeerTaskRequest, opts ...grpc.CallOption) (*RegisterResult, error) - // ReportPieceResult reports piece results and receives peer packets. - ReportPieceResult(ctx context.Context, opts ...grpc.CallOption) (Scheduler_ReportPieceResultClient, error) - // ReportPeerResult reports downloading result for the peer. - ReportPeerResult(ctx context.Context, in *PeerResult, opts ...grpc.CallOption) (*emptypb.Empty, error) - // LeaveTask makes the peer leaving from task. - LeaveTask(ctx context.Context, in *PeerTarget, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Checks if any peer has the given task. - StatTask(ctx context.Context, in *StatTaskRequest, opts ...grpc.CallOption) (*Task, error) - // A peer announces that it has the announced task to other peers. - AnnounceTask(ctx context.Context, in *AnnounceTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) -} - -type schedulerClient struct { - cc grpc.ClientConnInterface -} - -func NewSchedulerClient(cc grpc.ClientConnInterface) SchedulerClient { - return &schedulerClient{cc} -} - -func (c *schedulerClient) RegisterPeerTask(ctx context.Context, in *PeerTaskRequest, opts ...grpc.CallOption) (*RegisterResult, error) { - out := new(RegisterResult) - err := c.cc.Invoke(ctx, "/scheduler.Scheduler/RegisterPeerTask", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *schedulerClient) ReportPieceResult(ctx context.Context, opts ...grpc.CallOption) (Scheduler_ReportPieceResultClient, error) { - stream, err := c.cc.NewStream(ctx, &_Scheduler_serviceDesc.Streams[0], "/scheduler.Scheduler/ReportPieceResult", opts...) - if err != nil { - return nil, err - } - x := &schedulerReportPieceResultClient{stream} - return x, nil -} - -type Scheduler_ReportPieceResultClient interface { - Send(*PieceResult) error - Recv() (*PeerPacket, error) - grpc.ClientStream -} - -type schedulerReportPieceResultClient struct { - grpc.ClientStream -} - -func (x *schedulerReportPieceResultClient) Send(m *PieceResult) error { - return x.ClientStream.SendMsg(m) -} - -func (x *schedulerReportPieceResultClient) Recv() (*PeerPacket, error) { - m := new(PeerPacket) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *schedulerClient) ReportPeerResult(ctx context.Context, in *PeerResult, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/scheduler.Scheduler/ReportPeerResult", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *schedulerClient) LeaveTask(ctx context.Context, in *PeerTarget, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/scheduler.Scheduler/LeaveTask", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *schedulerClient) StatTask(ctx context.Context, in *StatTaskRequest, opts ...grpc.CallOption) (*Task, error) { - out := new(Task) - err := c.cc.Invoke(ctx, "/scheduler.Scheduler/StatTask", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *schedulerClient) AnnounceTask(ctx context.Context, in *AnnounceTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/scheduler.Scheduler/AnnounceTask", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SchedulerServer is the server API for Scheduler service. -type SchedulerServer interface { - // RegisterPeerTask registers a peer into task. - RegisterPeerTask(context.Context, *PeerTaskRequest) (*RegisterResult, error) - // ReportPieceResult reports piece results and receives peer packets. - ReportPieceResult(Scheduler_ReportPieceResultServer) error - // ReportPeerResult reports downloading result for the peer. - ReportPeerResult(context.Context, *PeerResult) (*emptypb.Empty, error) - // LeaveTask makes the peer leaving from task. - LeaveTask(context.Context, *PeerTarget) (*emptypb.Empty, error) - // Checks if any peer has the given task. - StatTask(context.Context, *StatTaskRequest) (*Task, error) - // A peer announces that it has the announced task to other peers. - AnnounceTask(context.Context, *AnnounceTaskRequest) (*emptypb.Empty, error) -} - -// UnimplementedSchedulerServer can be embedded to have forward compatible implementations. -type UnimplementedSchedulerServer struct { -} - -func (*UnimplementedSchedulerServer) RegisterPeerTask(context.Context, *PeerTaskRequest) (*RegisterResult, error) { - return nil, status.Errorf(codes.Unimplemented, "method RegisterPeerTask not implemented") -} -func (*UnimplementedSchedulerServer) ReportPieceResult(Scheduler_ReportPieceResultServer) error { - return status.Errorf(codes.Unimplemented, "method ReportPieceResult not implemented") -} -func (*UnimplementedSchedulerServer) ReportPeerResult(context.Context, *PeerResult) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReportPeerResult not implemented") -} -func (*UnimplementedSchedulerServer) LeaveTask(context.Context, *PeerTarget) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaveTask not implemented") -} -func (*UnimplementedSchedulerServer) StatTask(context.Context, *StatTaskRequest) (*Task, error) { - return nil, status.Errorf(codes.Unimplemented, "method StatTask not implemented") -} -func (*UnimplementedSchedulerServer) AnnounceTask(context.Context, *AnnounceTaskRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method AnnounceTask not implemented") -} - -func RegisterSchedulerServer(s *grpc.Server, srv SchedulerServer) { - s.RegisterService(&_Scheduler_serviceDesc, srv) -} - -func _Scheduler_RegisterPeerTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PeerTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SchedulerServer).RegisterPeerTask(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/scheduler.Scheduler/RegisterPeerTask", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SchedulerServer).RegisterPeerTask(ctx, req.(*PeerTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Scheduler_ReportPieceResult_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SchedulerServer).ReportPieceResult(&schedulerReportPieceResultServer{stream}) -} - -type Scheduler_ReportPieceResultServer interface { - Send(*PeerPacket) error - Recv() (*PieceResult, error) - grpc.ServerStream -} - -type schedulerReportPieceResultServer struct { - grpc.ServerStream -} - -func (x *schedulerReportPieceResultServer) Send(m *PeerPacket) error { - return x.ServerStream.SendMsg(m) -} - -func (x *schedulerReportPieceResultServer) Recv() (*PieceResult, error) { - m := new(PieceResult) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Scheduler_ReportPeerResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PeerResult) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SchedulerServer).ReportPeerResult(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/scheduler.Scheduler/ReportPeerResult", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SchedulerServer).ReportPeerResult(ctx, req.(*PeerResult)) - } - return interceptor(ctx, in, info, handler) -} - -func _Scheduler_LeaveTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PeerTarget) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SchedulerServer).LeaveTask(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/scheduler.Scheduler/LeaveTask", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SchedulerServer).LeaveTask(ctx, req.(*PeerTarget)) - } - return interceptor(ctx, in, info, handler) -} - -func _Scheduler_StatTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SchedulerServer).StatTask(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/scheduler.Scheduler/StatTask", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SchedulerServer).StatTask(ctx, req.(*StatTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Scheduler_AnnounceTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AnnounceTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SchedulerServer).AnnounceTask(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/scheduler.Scheduler/AnnounceTask", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SchedulerServer).AnnounceTask(ctx, req.(*AnnounceTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Scheduler_serviceDesc = grpc.ServiceDesc{ - ServiceName: "scheduler.Scheduler", - HandlerType: (*SchedulerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "RegisterPeerTask", - Handler: _Scheduler_RegisterPeerTask_Handler, - }, - { - MethodName: "ReportPeerResult", - Handler: _Scheduler_ReportPeerResult_Handler, - }, - { - MethodName: "LeaveTask", - Handler: _Scheduler_LeaveTask_Handler, - }, - { - MethodName: "StatTask", - Handler: _Scheduler_StatTask_Handler, - }, - { - MethodName: "AnnounceTask", - Handler: _Scheduler_AnnounceTask_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ReportPieceResult", - Handler: _Scheduler_ReportPieceResult_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "pkg/rpc/scheduler/scheduler.proto", -} diff --git a/pkg/rpc/scheduler/scheduler.pb.validate.go b/pkg/rpc/scheduler/scheduler.pb.validate.go deleted file mode 100644 index 6bf8bba99..000000000 --- a/pkg/rpc/scheduler/scheduler.pb.validate.go +++ /dev/null @@ -1,1384 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: pkg/rpc/scheduler/scheduler.proto - -package scheduler - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" - - base "d7y.io/dragonfly/v2/pkg/rpc/base" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - - _ = base.Pattern(0) - - _ = base.TaskType(0) - - _ = base.SizeScope(0) - - _ = base.Code(0) - - _ = base.Code(0) - - _ = base.Code(0) - - _ = base.TaskType(0) - - _ = base.TaskType(0) -) - -// Validate checks the field values on PeerTaskRequest with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. -func (m *PeerTaskRequest) Validate() error { - if m == nil { - return nil - } - - if uri, err := url.Parse(m.GetUrl()); err != nil { - return PeerTaskRequestValidationError{ - field: "Url", - reason: "value must be a valid URI", - cause: err, - } - } else if !uri.IsAbs() { - return PeerTaskRequestValidationError{ - field: "Url", - reason: "value must be absolute", - } - } - - if m.GetUrlMeta() == nil { - return PeerTaskRequestValidationError{ - field: "UrlMeta", - reason: "value is required", - } - } - - if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PeerTaskRequestValidationError{ - field: "UrlMeta", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if utf8.RuneCountInString(m.GetPeerId()) < 1 { - return PeerTaskRequestValidationError{ - field: "PeerId", - reason: "value length must be at least 1 runes", - } - } - - if v, ok := interface{}(m.GetPeerHost()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PeerTaskRequestValidationError{ - field: "PeerHost", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if v, ok := interface{}(m.GetHostLoad()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PeerTaskRequestValidationError{ - field: "HostLoad", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for IsMigrating - - // no validation rules for Pattern - - // no validation rules for TaskId - - return nil -} - -// PeerTaskRequestValidationError is the validation error returned by -// PeerTaskRequest.Validate if the designated constraints aren't met. -type PeerTaskRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PeerTaskRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PeerTaskRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PeerTaskRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PeerTaskRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PeerTaskRequestValidationError) ErrorName() string { return "PeerTaskRequestValidationError" } - -// Error satisfies the builtin error interface -func (e PeerTaskRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPeerTaskRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PeerTaskRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PeerTaskRequestValidationError{} - -// Validate checks the field values on RegisterResult with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. -func (m *RegisterResult) Validate() error { - if m == nil { - return nil - } - - // no validation rules for TaskType - - if utf8.RuneCountInString(m.GetTaskId()) < 1 { - return RegisterResultValidationError{ - field: "TaskId", - reason: "value length must be at least 1 runes", - } - } - - if _, ok := base.SizeScope_name[int32(m.GetSizeScope())]; !ok { - return RegisterResultValidationError{ - field: "SizeScope", - reason: "value must be one of the defined enum values", - } - } - - if v, ok := interface{}(m.GetExtendAttribute()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RegisterResultValidationError{ - field: "ExtendAttribute", - reason: "embedded message failed validation", - cause: err, - } - } - } - - switch m.DirectPiece.(type) { - - case *RegisterResult_SinglePiece: - - if v, ok := interface{}(m.GetSinglePiece()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return RegisterResultValidationError{ - field: "SinglePiece", - reason: "embedded message failed validation", - cause: err, - } - } - } - - case *RegisterResult_PieceContent: - // no validation rules for PieceContent - - } - - return nil -} - -// RegisterResultValidationError is the validation error returned by -// RegisterResult.Validate if the designated constraints aren't met. -type RegisterResultValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e RegisterResultValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e RegisterResultValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e RegisterResultValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e RegisterResultValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e RegisterResultValidationError) ErrorName() string { return "RegisterResultValidationError" } - -// Error satisfies the builtin error interface -func (e RegisterResultValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sRegisterResult.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = RegisterResultValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = RegisterResultValidationError{} - -// Validate checks the field values on SinglePiece with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. -func (m *SinglePiece) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetDstPid()) < 1 { - return SinglePieceValidationError{ - field: "DstPid", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetDstAddr()) < 1 { - return SinglePieceValidationError{ - field: "DstAddr", - reason: "value length must be at least 1 runes", - } - } - - if v, ok := interface{}(m.GetPieceInfo()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SinglePieceValidationError{ - field: "PieceInfo", - reason: "embedded message failed validation", - cause: err, - } - } - } - - return nil -} - -// SinglePieceValidationError is the validation error returned by -// SinglePiece.Validate if the designated constraints aren't met. -type SinglePieceValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SinglePieceValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SinglePieceValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SinglePieceValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SinglePieceValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SinglePieceValidationError) ErrorName() string { return "SinglePieceValidationError" } - -// Error satisfies the builtin error interface -func (e SinglePieceValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSinglePiece.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SinglePieceValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SinglePieceValidationError{} - -// Validate checks the field values on PeerHost with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *PeerHost) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetId()) < 1 { - return PeerHostValidationError{ - field: "Id", - reason: "value length must be at least 1 runes", - } - } - - if ip := net.ParseIP(m.GetIp()); ip == nil { - return PeerHostValidationError{ - field: "Ip", - reason: "value must be a valid IP address", - } - } - - if val := m.GetRpcPort(); val < 1024 || val >= 65535 { - return PeerHostValidationError{ - field: "RpcPort", - reason: "value must be inside range [1024, 65535)", - } - } - - if val := m.GetDownPort(); val < 1024 || val >= 65535 { - return PeerHostValidationError{ - field: "DownPort", - reason: "value must be inside range [1024, 65535)", - } - } - - if err := m._validateHostname(m.GetHostName()); err != nil { - return PeerHostValidationError{ - field: "HostName", - reason: "value must be a valid hostname", - cause: err, - } - } - - // no validation rules for SecurityDomain - - // no validation rules for Location - - // no validation rules for Idc - - // no validation rules for NetTopology - - return nil -} - -func (m *PeerHost) _validateHostname(host string) error { - s := strings.ToLower(strings.TrimSuffix(host, ".")) - - if len(host) > 253 { - return errors.New("hostname cannot exceed 253 characters") - } - - for _, part := range strings.Split(s, ".") { - if l := len(part); l == 0 || l > 63 { - return errors.New("hostname part must be non-empty and cannot exceed 63 characters") - } - - if part[0] == '-' { - return errors.New("hostname parts cannot begin with hyphens") - } - - if part[len(part)-1] == '-' { - return errors.New("hostname parts cannot end with hyphens") - } - - for _, r := range part { - if (r < 'a' || r > 'z') && (r < '0' || r > '9') && r != '-' { - return fmt.Errorf("hostname parts can only contain alphanumeric characters or hyphens, got %q", string(r)) - } - } - } - - return nil -} - -// PeerHostValidationError is the validation error returned by -// PeerHost.Validate if the designated constraints aren't met. -type PeerHostValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PeerHostValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PeerHostValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PeerHostValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PeerHostValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PeerHostValidationError) ErrorName() string { return "PeerHostValidationError" } - -// Error satisfies the builtin error interface -func (e PeerHostValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPeerHost.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PeerHostValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PeerHostValidationError{} - -// Validate checks the field values on PieceResult with the rules defined in -// the proto definition for this message. If any rules are violated, an error -// is returned. -func (m *PieceResult) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetTaskId()) < 1 { - return PieceResultValidationError{ - field: "TaskId", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetSrcPid()) < 1 { - return PieceResultValidationError{ - field: "SrcPid", - reason: "value length must be at least 1 runes", - } - } - - // no validation rules for DstPid - - if v, ok := interface{}(m.GetPieceInfo()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PieceResultValidationError{ - field: "PieceInfo", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for BeginTime - - // no validation rules for EndTime - - // no validation rules for Success - - // no validation rules for Code - - if v, ok := interface{}(m.GetHostLoad()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PieceResultValidationError{ - field: "HostLoad", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for FinishedCount - - if v, ok := interface{}(m.GetExtendAttribute()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PieceResultValidationError{ - field: "ExtendAttribute", - reason: "embedded message failed validation", - cause: err, - } - } - } - - return nil -} - -// PieceResultValidationError is the validation error returned by -// PieceResult.Validate if the designated constraints aren't met. -type PieceResultValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PieceResultValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PieceResultValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PieceResultValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PieceResultValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PieceResultValidationError) ErrorName() string { return "PieceResultValidationError" } - -// Error satisfies the builtin error interface -func (e PieceResultValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPieceResult.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PieceResultValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PieceResultValidationError{} - -// Validate checks the field values on PeerPacket with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *PeerPacket) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetTaskId()) < 1 { - return PeerPacketValidationError{ - field: "TaskId", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetSrcPid()) < 1 { - return PeerPacketValidationError{ - field: "SrcPid", - reason: "value length must be at least 1 runes", - } - } - - if m.GetParallelCount() < 1 { - return PeerPacketValidationError{ - field: "ParallelCount", - reason: "value must be greater than or equal to 1", - } - } - - if v, ok := interface{}(m.GetMainPeer()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PeerPacketValidationError{ - field: "MainPeer", - reason: "embedded message failed validation", - cause: err, - } - } - } - - for idx, item := range m.GetCandidatePeers() { - _, _ = idx, item - - if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PeerPacketValidationError{ - field: fmt.Sprintf("CandidatePeers[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - // no validation rules for Code - - switch m.ErrorDetail.(type) { - - case *PeerPacket_SourceError: - - if v, ok := interface{}(m.GetSourceError()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PeerPacketValidationError{ - field: "SourceError", - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - return nil -} - -// PeerPacketValidationError is the validation error returned by -// PeerPacket.Validate if the designated constraints aren't met. -type PeerPacketValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PeerPacketValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PeerPacketValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PeerPacketValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PeerPacketValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PeerPacketValidationError) ErrorName() string { return "PeerPacketValidationError" } - -// Error satisfies the builtin error interface -func (e PeerPacketValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPeerPacket.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PeerPacketValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PeerPacketValidationError{} - -// Validate checks the field values on PeerResult with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *PeerResult) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetTaskId()) < 1 { - return PeerResultValidationError{ - field: "TaskId", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetPeerId()) < 1 { - return PeerResultValidationError{ - field: "PeerId", - reason: "value length must be at least 1 runes", - } - } - - if ip := net.ParseIP(m.GetSrcIp()); ip == nil { - return PeerResultValidationError{ - field: "SrcIp", - reason: "value must be a valid IP address", - } - } - - // no validation rules for SecurityDomain - - // no validation rules for Idc - - if uri, err := url.Parse(m.GetUrl()); err != nil { - return PeerResultValidationError{ - field: "Url", - reason: "value must be a valid URI", - cause: err, - } - } else if !uri.IsAbs() { - return PeerResultValidationError{ - field: "Url", - reason: "value must be absolute", - } - } - - if m.GetContentLength() < -1 { - return PeerResultValidationError{ - field: "ContentLength", - reason: "value must be greater than or equal to -1", - } - } - - // no validation rules for Traffic - - // no validation rules for Cost - - // no validation rules for Success - - // no validation rules for Code - - if m.GetTotalPieceCount() < -1 { - return PeerResultValidationError{ - field: "TotalPieceCount", - reason: "value must be greater than or equal to -1", - } - } - - switch m.ErrorDetail.(type) { - - case *PeerResult_SourceError: - - if v, ok := interface{}(m.GetSourceError()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return PeerResultValidationError{ - field: "SourceError", - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - return nil -} - -// PeerResultValidationError is the validation error returned by -// PeerResult.Validate if the designated constraints aren't met. -type PeerResultValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PeerResultValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PeerResultValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PeerResultValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PeerResultValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PeerResultValidationError) ErrorName() string { return "PeerResultValidationError" } - -// Error satisfies the builtin error interface -func (e PeerResultValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPeerResult.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PeerResultValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PeerResultValidationError{} - -// Validate checks the field values on PeerTarget with the rules defined in the -// proto definition for this message. If any rules are violated, an error is returned. -func (m *PeerTarget) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetTaskId()) < 1 { - return PeerTargetValidationError{ - field: "TaskId", - reason: "value length must be at least 1 runes", - } - } - - if utf8.RuneCountInString(m.GetPeerId()) < 1 { - return PeerTargetValidationError{ - field: "PeerId", - reason: "value length must be at least 1 runes", - } - } - - return nil -} - -// PeerTargetValidationError is the validation error returned by -// PeerTarget.Validate if the designated constraints aren't met. -type PeerTargetValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PeerTargetValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PeerTargetValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PeerTargetValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PeerTargetValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PeerTargetValidationError) ErrorName() string { return "PeerTargetValidationError" } - -// Error satisfies the builtin error interface -func (e PeerTargetValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPeerTarget.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PeerTargetValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PeerTargetValidationError{} - -// Validate checks the field values on StatTaskRequest with the rules defined -// in the proto definition for this message. If any rules are violated, an -// error is returned. -func (m *StatTaskRequest) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetTaskId()) < 1 { - return StatTaskRequestValidationError{ - field: "TaskId", - reason: "value length must be at least 1 runes", - } - } - - return nil -} - -// StatTaskRequestValidationError is the validation error returned by -// StatTaskRequest.Validate if the designated constraints aren't met. -type StatTaskRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e StatTaskRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e StatTaskRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e StatTaskRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e StatTaskRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e StatTaskRequestValidationError) ErrorName() string { return "StatTaskRequestValidationError" } - -// Error satisfies the builtin error interface -func (e StatTaskRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sStatTaskRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = StatTaskRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = StatTaskRequestValidationError{} - -// Validate checks the field values on Task with the rules defined in the proto -// definition for this message. If any rules are violated, an error is returned. -func (m *Task) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetId()) < 1 { - return TaskValidationError{ - field: "Id", - reason: "value length must be at least 1 runes", - } - } - - // no validation rules for Type - - if m.GetContentLength() < 1 { - return TaskValidationError{ - field: "ContentLength", - reason: "value must be greater than or equal to 1", - } - } - - if m.GetTotalPieceCount() < 1 { - return TaskValidationError{ - field: "TotalPieceCount", - reason: "value must be greater than or equal to 1", - } - } - - if utf8.RuneCountInString(m.GetState()) < 1 { - return TaskValidationError{ - field: "State", - reason: "value length must be at least 1 runes", - } - } - - if m.GetPeerCount() < 0 { - return TaskValidationError{ - field: "PeerCount", - reason: "value must be greater than or equal to 0", - } - } - - // no validation rules for HasAvailablePeer - - return nil -} - -// TaskValidationError is the validation error returned by Task.Validate if the -// designated constraints aren't met. -type TaskValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e TaskValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e TaskValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e TaskValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e TaskValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e TaskValidationError) ErrorName() string { return "TaskValidationError" } - -// Error satisfies the builtin error interface -func (e TaskValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sTask.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = TaskValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = TaskValidationError{} - -// Validate checks the field values on AnnounceTaskRequest with the rules -// defined in the proto definition for this message. If any rules are -// violated, an error is returned. -func (m *AnnounceTaskRequest) Validate() error { - if m == nil { - return nil - } - - if utf8.RuneCountInString(m.GetTaskId()) < 1 { - return AnnounceTaskRequestValidationError{ - field: "TaskId", - reason: "value length must be at least 1 runes", - } - } - - if m.GetUrl() != "" { - - if uri, err := url.Parse(m.GetUrl()); err != nil { - return AnnounceTaskRequestValidationError{ - field: "Url", - reason: "value must be a valid URI", - cause: err, - } - } else if !uri.IsAbs() { - return AnnounceTaskRequestValidationError{ - field: "Url", - reason: "value must be absolute", - } - } - - } - - if m.GetUrlMeta() == nil { - return AnnounceTaskRequestValidationError{ - field: "UrlMeta", - reason: "value is required", - } - } - - if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return AnnounceTaskRequestValidationError{ - field: "UrlMeta", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if v, ok := interface{}(m.GetPeerHost()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return AnnounceTaskRequestValidationError{ - field: "PeerHost", - reason: "embedded message failed validation", - cause: err, - } - } - } - - if m.GetPiecePacket() == nil { - return AnnounceTaskRequestValidationError{ - field: "PiecePacket", - reason: "value is required", - } - } - - if v, ok := interface{}(m.GetPiecePacket()).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return AnnounceTaskRequestValidationError{ - field: "PiecePacket", - reason: "embedded message failed validation", - cause: err, - } - } - } - - // no validation rules for TaskType - - return nil -} - -// AnnounceTaskRequestValidationError is the validation error returned by -// AnnounceTaskRequest.Validate if the designated constraints aren't met. -type AnnounceTaskRequestValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e AnnounceTaskRequestValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e AnnounceTaskRequestValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e AnnounceTaskRequestValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e AnnounceTaskRequestValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e AnnounceTaskRequestValidationError) ErrorName() string { - return "AnnounceTaskRequestValidationError" -} - -// Error satisfies the builtin error interface -func (e AnnounceTaskRequestValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sAnnounceTaskRequest.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = AnnounceTaskRequestValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = AnnounceTaskRequestValidationError{} - -// Validate checks the field values on PeerPacket_DestPeer with the rules -// defined in the proto definition for this message. If any rules are -// violated, an error is returned. -func (m *PeerPacket_DestPeer) Validate() error { - if m == nil { - return nil - } - - if ip := net.ParseIP(m.GetIp()); ip == nil { - return PeerPacket_DestPeerValidationError{ - field: "Ip", - reason: "value must be a valid IP address", - } - } - - if val := m.GetRpcPort(); val < 1024 || val >= 65535 { - return PeerPacket_DestPeerValidationError{ - field: "RpcPort", - reason: "value must be inside range [1024, 65535)", - } - } - - if utf8.RuneCountInString(m.GetPeerId()) < 1 { - return PeerPacket_DestPeerValidationError{ - field: "PeerId", - reason: "value length must be at least 1 runes", - } - } - - return nil -} - -// PeerPacket_DestPeerValidationError is the validation error returned by -// PeerPacket_DestPeer.Validate if the designated constraints aren't met. -type PeerPacket_DestPeerValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e PeerPacket_DestPeerValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e PeerPacket_DestPeerValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e PeerPacket_DestPeerValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e PeerPacket_DestPeerValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e PeerPacket_DestPeerValidationError) ErrorName() string { - return "PeerPacket_DestPeerValidationError" -} - -// Error satisfies the builtin error interface -func (e PeerPacket_DestPeerValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sPeerPacket_DestPeer.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = PeerPacket_DestPeerValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = PeerPacket_DestPeerValidationError{} diff --git a/pkg/rpc/scheduler/scheduler.proto b/pkg/rpc/scheduler/scheduler.proto deleted file mode 100644 index 597486774..000000000 --- a/pkg/rpc/scheduler/scheduler.proto +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright 2020 The Dragonfly Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -syntax = "proto3"; - -package scheduler; - -import "pkg/rpc/base/base.proto"; -import "pkg/rpc/errordetails/error_details.proto"; -import "validate/validate.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "d7y.io/dragonfly/v2/pkg/rpc/scheduler"; - -// PeerTaskRequest represents request of RegisterPeerTask. -message PeerTaskRequest{ - // Download url. - string url = 1 [(validate.rules).string.uri = true]; - // URL meta info. - base.UrlMeta url_meta = 2 [(validate.rules).message.required = true]; - // Peer id and it must be global uniqueness. - string peer_id = 3 [(validate.rules).string.min_len = 1]; - // Peer host info. - PeerHost peer_host = 4; - // Peer host load. - base.HostLoad host_load = 5; - // Whether this request is caused by migration. - bool is_migrating = 6; - // Pattern includes p2p, seed-peer and source. - base.Pattern pattern = 7; - // Task id. - string task_id = 8; -} - -// RegisterResult represents response of RegisterPeerTask. -message RegisterResult{ - // Task type. - base.TaskType task_type = 1; - // Task id - string task_id = 2 [(validate.rules).string.min_len = 1]; - // File size scope. - base.SizeScope size_scope = 3 [(validate.rules).enum.defined_only = true]; - // Download the only piece directly for small or tiny file. - oneof direct_piece{ - // Return single piece info when size scope is small. - SinglePiece single_piece = 4; - // Return task content when size scope is tiny. - bytes piece_content = 5; - } - // Task extend attribute, - // only direct_piece will carry extend attribute. - base.ExtendAttribute extend_attribute = 6; -} - -// SinglePiece represents information of single piece. -message SinglePiece{ - // Destination peer id. - string dst_pid = 1 [(validate.rules).string.min_len = 1]; - // Destination download address. - string dst_addr = 2 [(validate.rules).string.min_len = 1]; - // Piece info. - base.PieceInfo piece_info = 3; -} - -// PeerHost represents information of peer host. -message PeerHost{ - // Peer host id. - string id = 1 [(validate.rules).string.min_len = 1]; - // peer host ip - string ip = 2 [(validate.rules).string.ip = true]; - // Port of grpc service. - int32 rpc_port = 3 [(validate.rules).int32 = {gte: 1024, lt: 65535}]; - // Port of download server. - int32 down_port = 4 [(validate.rules).int32 = {gte: 1024, lt: 65535}]; - // Peer hostname. - string host_name = 5 [(validate.rules).string.hostname = true]; - // Security domain for network. - string security_domain = 6; - // Location path(area|country|province|city|...). - string location = 7; - // IDC where the peer host is located - string idc = 8; - // Network topology(switch|router|...). - string net_topology = 9; -} - -// PieceResult represents request of ReportPieceResult. -message PieceResult{ - // Task id. - string task_id = 1 [(validate.rules).string.min_len = 1]; - // Source peer id. - string src_pid = 2 [(validate.rules).string.min_len = 1]; - // Destination peer id. - string dst_pid = 3; - // Piece info. - base.PieceInfo piece_info = 4; - // Begin time of the piece downloading. - uint64 begin_time = 5; - // End time of the piece downloading. - uint64 end_time = 6; - // Whether the piece downloading is successfully. - bool success = 7; - // Result code. - base.Code code = 8; - // Peer host load. - base.HostLoad host_load = 9; - // Finished count. - int32 finished_count = 10; - // Task extend attribute, - // only first success back source piece will carry extend attribute. - base.ExtendAttribute extend_attribute = 11; -} - -// PeerPacket represents response of ReportPieceResult. -message PeerPacket{ - message DestPeer{ - // Destination ip. - string ip = 1 [(validate.rules).string.ip = true]; - // Port of grpc service. - int32 rpc_port = 2 [(validate.rules).int32 = {gte: 1024, lt: 65535}]; - // Destination peer id. - string peer_id = 3 [(validate.rules).string.min_len = 1]; - } - - // Task id. - string task_id = 2 [(validate.rules).string.min_len = 1]; - // Source peer id. - string src_pid = 3 [(validate.rules).string.min_len = 1]; - // Concurrent downloading count from main peer. - int32 parallel_count = 4 [(validate.rules).int32.gte = 1]; - // Main peer. - DestPeer main_peer = 5; - // Candidate peers. - repeated DestPeer candidate_peers = 6; - // Result code. - base.Code code = 7; - // Error detail. - oneof error_detail{ - // Source error. - errordetails.SourceError source_error = 8; - } -} - -// PeerResult represents response of ReportPeerResult. -message PeerResult{ - // Task id. - string task_id = 1 [(validate.rules).string.min_len = 1]; - // Peer id. - string peer_id = 2 [(validate.rules).string.min_len = 1]; - // Source host ip. - string src_ip = 3 [(validate.rules).string.ip = true]; - // Security domain. - string security_domain = 4; - // IDC where the peer host is located - string idc = 5; - // Download url. - string url = 6 [(validate.rules).string.uri = true]; - // Total content length. - int64 content_length = 7 [(validate.rules).int64.gte = -1]; - // Total network traffic. - uint64 traffic = 8; - // Total cost time. - uint32 cost = 9; - // Whether peer downloading file is successfully. - bool success = 10; - // Result code. - base.Code code = 11; - // Task total piece count. - int32 total_piece_count = 12 [(validate.rules).int32.gte = -1]; - // Error detail. - oneof error_detail{ - // Source error. - errordetails.SourceError source_error = 13; - } -} - -// PeerTarget represents request of LeaveTask. -message PeerTarget{ - // Task id. - string task_id = 1 [(validate.rules).string.min_len = 1]; - // Peer id. - string peer_id = 2 [(validate.rules).string.min_len = 1]; -} - -// StatTaskRequest represents request of StatTask. -message StatTaskRequest{ - // Task id. - string task_id = 1 [(validate.rules).string.min_len = 1]; -} - -// Task represents download task. -message Task{ - // Task id. - string id = 1 [(validate.rules).string.min_len = 1]; - // Task type. - base.TaskType type = 2; - // Task content length. - int64 content_length = 3 [(validate.rules).int64.gte = 1]; - // Task total piece count. - int32 total_piece_count = 4 [(validate.rules).int32.gte = 1]; - // Task state. - string state = 5 [(validate.rules).string.min_len = 1]; - // Task peer count. - int32 peer_count = 6 [(validate.rules).int32.gte = 0]; - // Task contains available peer. - bool hasAvailablePeer = 7; -} - -// AnnounceTaskRequest represents request of AnnounceTask. -message AnnounceTaskRequest{ - // Task id. - string task_id = 1 [(validate.rules).string.min_len = 1]; - // Download url. - string url = 2 [(validate.rules).string = {uri: true, ignore_empty: true}]; - // URL meta info. - base.UrlMeta url_meta = 3 [(validate.rules).message.required = true]; - // Peer host info. - PeerHost peer_host = 4; - // Task piece info. - base.PiecePacket piece_packet = 5 [(validate.rules).message.required = true]; - // Task type. - base.TaskType task_type = 6; -} - -// Scheduler RPC Service. -service Scheduler{ - // RegisterPeerTask registers a peer into task. - rpc RegisterPeerTask(PeerTaskRequest)returns(RegisterResult); - - // ReportPieceResult reports piece results and receives peer packets. - rpc ReportPieceResult(stream PieceResult)returns(stream PeerPacket); - - // ReportPeerResult reports downloading result for the peer. - rpc ReportPeerResult(PeerResult)returns(google.protobuf.Empty); - - // LeaveTask makes the peer leaving from task. - rpc LeaveTask(PeerTarget)returns(google.protobuf.Empty); - - // Checks if any peer has the given task. - rpc StatTask(StatTaskRequest)returns(Task); - - // A peer announces that it has the announced task to other peers. - rpc AnnounceTask(AnnounceTaskRequest) returns(google.protobuf.Empty); -} diff --git a/pkg/rpc/server.go b/pkg/rpc/server.go index 161eed310..be42e1d0e 100644 --- a/pkg/rpc/server.go +++ b/pkg/rpc/server.go @@ -14,12 +14,6 @@ * limitations under the License. */ -//go:generate mockgen -destination base/mocks/base_mock.go -source base/base.pb.go -package mocks -//go:generate mockgen -destination cdnsystem/mocks/cdnsystem_mock.go -source cdnsystem/cdnsystem.pb.go -package mocks -//go:generate mockgen -destination dfdaemon/mocks/dfdaemon_mock.go -source dfdaemon/dfdaemon.pb.go -package mocks -//go:generate mockgen -destination manager/mocks/manager_mock.go -source manager/manager.pb.go -package mocks -//go:generate mockgen -destination scheduler/mocks/scheduler_mock.go -source scheduler/scheduler.pb.go -package mocks - package rpc import ( @@ -35,10 +29,11 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/internal/dferrors" logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/base/common" + "d7y.io/dragonfly/v2/pkg/rpc/common" ) func DefaultServerOptions() []grpc.ServerOption { @@ -88,7 +83,7 @@ func unaryServerInterceptor(ctx context.Context, req any, info *grpc.UnaryServer func convertServerError(err error) error { if status.Code(err) == codes.InvalidArgument { - err = dferrors.New(base.Code_BadRequest, err.Error()) + err = dferrors.New(commonv1.Code_BadRequest, err.Error()) } if v, ok := err.(*dferrors.DfError); ok { logger.GrpcLogger.Errorf(v.Message) diff --git a/pkg/source/source_client.go b/pkg/source/source_client.go index 3984235bf..33f730fd6 100644 --- a/pkg/source/source_client.go +++ b/pkg/source/source_client.go @@ -131,7 +131,7 @@ type URLEntry struct { // Name returns the name of the file (or subdirectory) described by the entry. // Name will be used in recursive downloading as file name or subdirectory name - // This name is only the final element of the path (the base name), not the entire path. + // This name is only the final element of the path (the commonv1 name), not the entire path. // For example, Name would return "hello.go" not "home/gopher/hello.go". Name string // IsDir reports whether the entry describes a directory. diff --git a/scheduler/config/dynconfig.go b/scheduler/config/dynconfig.go index f1ed790a6..999c2cb00 100644 --- a/scheduler/config/dynconfig.go +++ b/scheduler/config/dynconfig.go @@ -24,10 +24,11 @@ import ( "path/filepath" "time" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" dc "d7y.io/dragonfly/v2/internal/dynconfig" "d7y.io/dragonfly/v2/manager/types" - "d7y.io/dragonfly/v2/pkg/rpc/manager" managerclient "d7y.io/dragonfly/v2/pkg/rpc/manager/client" ) @@ -255,8 +256,8 @@ func newManagerClient(client managerclient.Client, cfg *Config) dc.ManagerClient } func (mc *managerClient) Get() (any, error) { - scheduler, err := mc.GetScheduler(&manager.GetSchedulerRequest{ - SourceType: manager.SourceType_SCHEDULER_SOURCE, + scheduler, err := mc.GetScheduler(&managerv1.GetSchedulerRequest{ + SourceType: managerv1.SourceType_SCHEDULER_SOURCE, HostName: mc.config.Server.Host, Ip: mc.config.Server.IP, SchedulerClusterId: uint64(mc.config.Manager.SchedulerClusterID), diff --git a/scheduler/config/dynconfig_test.go b/scheduler/config/dynconfig_test.go index a516f67dd..4e5c604bf 100644 --- a/scheduler/config/dynconfig_test.go +++ b/scheduler/config/dynconfig_test.go @@ -26,7 +26,8 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - "d7y.io/dragonfly/v2/pkg/rpc/manager" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + "d7y.io/dragonfly/v2/pkg/rpc/manager/client/mocks" ) @@ -61,8 +62,8 @@ func TestDynconfig_GetManagerSourceType(t *testing.T) { }, sleep: func() {}, mock: func(m *mocks.MockClientMockRecorder) { - m.GetScheduler(gomock.Any()).Return(&manager.Scheduler{ - SeedPeers: []*manager.SeedPeer{ + m.GetScheduler(gomock.Any()).Return(&managerv1.Scheduler{ + SeedPeers: []*managerv1.SeedPeer{ { HostName: "bar", Ip: "127.0.0.1", @@ -93,8 +94,8 @@ func TestDynconfig_GetManagerSourceType(t *testing.T) { }, mock: func(m *mocks.MockClientMockRecorder) { gomock.InOrder( - m.GetScheduler(gomock.Any()).Return(&manager.Scheduler{ - SeedPeers: []*manager.SeedPeer{ + m.GetScheduler(gomock.Any()).Return(&managerv1.Scheduler{ + SeedPeers: []*managerv1.SeedPeer{ { HostName: "bar", Ip: "127.0.0.1", diff --git a/scheduler/job/job.go b/scheduler/job/job.go index 9ceed7336..7e81c9691 100644 --- a/scheduler/job/job.go +++ b/scheduler/job/job.go @@ -26,11 +26,12 @@ import ( "github.com/go-http-utils/headers" "github.com/go-playground/validator/v10" + cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1" + commonv1 "d7y.io/api/pkg/apis/common/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" internaljob "d7y.io/dragonfly/v2/internal/job" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" "d7y.io/dragonfly/v2/scheduler/config" "d7y.io/dragonfly/v2/scheduler/resource" ) @@ -149,7 +150,7 @@ func (j *job) preheat(ctx context.Context, req string) error { return err } - urlMeta := &base.UrlMeta{ + urlMeta := &commonv1.UrlMeta{ Header: request.Headers, Tag: request.Tag, Filter: request.Filter, @@ -168,7 +169,7 @@ func (j *job) preheat(ctx context.Context, req string) error { log := logger.WithTaskIDAndURL(taskID, request.URL) log.Infof("preheat %s headers: %#v, tag: %s, range: %s, filter: %s, digest: %s", request.URL, urlMeta.Header, urlMeta.Tag, urlMeta.Range, urlMeta.Filter, urlMeta.Digest) - stream, err := j.resource.SeedPeer().Client().ObtainSeeds(ctx, &cdnsystem.SeedRequest{ + stream, err := j.resource.SeedPeer().Client().ObtainSeeds(ctx, &cdnsystemv1.SeedRequest{ TaskId: taskID, Url: request.URL, UrlMeta: urlMeta, diff --git a/scheduler/resource/host.go b/scheduler/resource/host.go index 8475037f3..78ee7c0f9 100644 --- a/scheduler/resource/host.go +++ b/scheduler/resource/host.go @@ -22,8 +22,9 @@ import ( "go.uber.org/atomic" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" "d7y.io/dragonfly/v2/scheduler/config" ) @@ -118,7 +119,7 @@ type Host struct { } // New host instance. -func NewHost(rawHost *scheduler.PeerHost, options ...HostOption) *Host { +func NewHost(rawHost *schedulerv1.PeerHost, options ...HostOption) *Host { h := &Host{ ID: rawHost.Id, Type: HostTypeNormal, diff --git a/scheduler/resource/host_manager_test.go b/scheduler/resource/host_manager_test.go index dfa72a1b5..270f6e998 100644 --- a/scheduler/resource/host_manager_test.go +++ b/scheduler/resource/host_manager_test.go @@ -25,8 +25,9 @@ import ( gomock "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/pkg/gc" - "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/scheduler/config" ) @@ -383,7 +384,7 @@ func TestHostManager_RunGC(t *testing.T) { tc.mock(gc.EXPECT()) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, mockTask, mockHost) hostManager, err := newHostManager(mockHostGCConfig, gc) if err != nil { diff --git a/scheduler/resource/host_test.go b/scheduler/resource/host_test.go index af3752c62..3adbec92c 100644 --- a/scheduler/resource/host_test.go +++ b/scheduler/resource/host_test.go @@ -21,14 +21,15 @@ import ( "github.com/stretchr/testify/assert" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" "d7y.io/dragonfly/v2/scheduler/config" ) var ( - mockRawHost = &scheduler.PeerHost{ + mockRawHost = &schedulerv1.PeerHost{ Id: idgen.HostID("hostname", 8003), Ip: "127.0.0.1", RpcPort: 8003, @@ -40,7 +41,7 @@ var ( NetTopology: "net_topology", } - mockRawSeedHost = &scheduler.PeerHost{ + mockRawSeedHost = &schedulerv1.PeerHost{ Id: idgen.HostID("hostname_seed", 8003), Ip: "127.0.0.1", RpcPort: 8003, @@ -56,7 +57,7 @@ var ( func TestHost_NewHost(t *testing.T) { tests := []struct { name string - rawHost *scheduler.PeerHost + rawHost *schedulerv1.PeerHost options []HostOption expect func(t *testing.T, host *Host) }{ @@ -140,7 +141,7 @@ func TestHost_NewHost(t *testing.T) { func TestHost_LoadPeer(t *testing.T) { tests := []struct { name string - rawHost *scheduler.PeerHost + rawHost *schedulerv1.PeerHost peerID string options []HostOption expect func(t *testing.T, peer *Peer, ok bool) @@ -178,7 +179,7 @@ func TestHost_LoadPeer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { host := NewHost(tc.rawHost, tc.options...) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, mockTask, host) host.StorePeer(mockPeer) @@ -191,7 +192,7 @@ func TestHost_LoadPeer(t *testing.T) { func TestHost_StorePeer(t *testing.T) { tests := []struct { name string - rawHost *scheduler.PeerHost + rawHost *schedulerv1.PeerHost peerID string options []HostOption expect func(t *testing.T, peer *Peer, ok bool) @@ -221,7 +222,7 @@ func TestHost_StorePeer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { host := NewHost(tc.rawHost, tc.options...) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(tc.peerID, mockTask, host) host.StorePeer(mockPeer) @@ -234,7 +235,7 @@ func TestHost_StorePeer(t *testing.T) { func TestHost_DeletePeer(t *testing.T) { tests := []struct { name string - rawHost *scheduler.PeerHost + rawHost *schedulerv1.PeerHost peerID string options []HostOption expect func(t *testing.T, host *Host) @@ -265,7 +266,7 @@ func TestHost_DeletePeer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { host := NewHost(tc.rawHost, tc.options...) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, mockTask, host) host.StorePeer(mockPeer) @@ -278,7 +279,7 @@ func TestHost_DeletePeer(t *testing.T) { func TestHost_LeavePeers(t *testing.T) { tests := []struct { name string - rawHost *scheduler.PeerHost + rawHost *schedulerv1.PeerHost options []HostOption expect func(t *testing.T, host *Host, mockPeer *Peer) }{ @@ -315,7 +316,7 @@ func TestHost_LeavePeers(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { host := NewHost(tc.rawHost, tc.options...) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, mockTask, host) tc.expect(t, host, mockPeer) @@ -326,7 +327,7 @@ func TestHost_LeavePeers(t *testing.T) { func TestHost_FreeUploadLoad(t *testing.T) { tests := []struct { name string - rawHost *scheduler.PeerHost + rawHost *schedulerv1.PeerHost options []HostOption expect func(t *testing.T, host *Host, mockTask *Task, mockPeer *Peer) }{ @@ -365,7 +366,7 @@ func TestHost_FreeUploadLoad(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { host := NewHost(tc.rawHost, tc.options...) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, mockTask, host) tc.expect(t, host, mockTask, mockPeer) diff --git a/scheduler/resource/peer.go b/scheduler/resource/peer.go index 4bed49585..078286010 100644 --- a/scheduler/resource/peer.go +++ b/scheduler/resource/peer.go @@ -29,9 +29,10 @@ import ( "github.com/looplab/fsm" "go.uber.org/atomic" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/container/set" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) const ( @@ -288,17 +289,17 @@ func (p *Peer) PieceCosts() []int64 { } // LoadStream return grpc stream. -func (p *Peer) LoadStream() (scheduler.Scheduler_ReportPieceResultServer, bool) { +func (p *Peer) LoadStream() (schedulerv1.Scheduler_ReportPieceResultServer, bool) { rawStream := p.Stream.Load() if rawStream == nil { return nil, false } - return rawStream.(scheduler.Scheduler_ReportPieceResultServer), true + return rawStream.(schedulerv1.Scheduler_ReportPieceResultServer), true } // StoreStream set grpc stream. -func (p *Peer) StoreStream(stream scheduler.Scheduler_ReportPieceResultServer) { +func (p *Peer) StoreStream(stream schedulerv1.Scheduler_ReportPieceResultServer) { p.Stream.Store(stream) } diff --git a/scheduler/resource/peer_manager_test.go b/scheduler/resource/peer_manager_test.go index c0136453b..49f50e6c5 100644 --- a/scheduler/resource/peer_manager_test.go +++ b/scheduler/resource/peer_manager_test.go @@ -25,9 +25,10 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/pkg/gc" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/scheduler/config" ) @@ -133,7 +134,7 @@ func TestPeerManager_Load(t *testing.T) { tc.mock(gc.EXPECT()) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, mockTask, mockHost) peerManager, err := newPeerManager(mockPeerGCConfig, gc) if err != nil { @@ -188,7 +189,7 @@ func TestPeerManager_Store(t *testing.T) { tc.mock(gc.EXPECT()) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, mockTask, mockHost) peerManager, err := newPeerManager(mockPeerGCConfig, gc) if err != nil { @@ -241,7 +242,7 @@ func TestPeerManager_LoadOrStore(t *testing.T) { tc.mock(gc.EXPECT()) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, mockTask, mockHost) peerManager, err := newPeerManager(mockPeerGCConfig, gc) if err != nil { @@ -296,7 +297,7 @@ func TestPeerManager_Delete(t *testing.T) { tc.mock(gc.EXPECT()) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, mockTask, mockHost) peerManager, err := newPeerManager(mockPeerGCConfig, gc) if err != nil { @@ -420,7 +421,7 @@ func TestPeerManager_RunGC(t *testing.T) { tc.mock(gc.EXPECT()) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, mockTask, mockHost) peerManager, err := newPeerManager(tc.gcConfig, gc) if err != nil { diff --git a/scheduler/resource/peer_test.go b/scheduler/resource/peer_test.go index 711c8ea35..fd1ae57e9 100644 --- a/scheduler/resource/peer_test.go +++ b/scheduler/resource/peer_test.go @@ -29,11 +29,12 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/api/pkg/apis/scheduler/v1/mocks" + "d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks" ) var ( @@ -89,7 +90,7 @@ func TestPeer_NewPeer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) tc.expect(t, NewPeer(tc.id, mockTask, mockHost, tc.options...), mockTask, mockHost) }) } @@ -122,7 +123,7 @@ func TestPeer_AppendPieceCost(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := NewPeer(mockPeerID, mockTask, mockHost) tc.expect(t, peer) @@ -157,7 +158,7 @@ func TestPeer_PieceCosts(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := NewPeer(mockPeerID, mockTask, mockHost) tc.expect(t, peer) @@ -168,11 +169,11 @@ func TestPeer_PieceCosts(t *testing.T) { func TestPeer_LoadStream(t *testing.T) { tests := []struct { name string - expect func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) + expect func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) }{ { name: "load stream", - expect: func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) { assert := assert.New(t) peer.StoreStream(stream) newStream, ok := peer.LoadStream() @@ -182,7 +183,7 @@ func TestPeer_LoadStream(t *testing.T) { }, { name: "stream does not exist", - expect: func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) { assert := assert.New(t) _, ok := peer.LoadStream() assert.Equal(ok, false) @@ -197,7 +198,7 @@ func TestPeer_LoadStream(t *testing.T) { stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := NewPeer(mockPeerID, mockTask, mockHost) tc.expect(t, peer, stream) }) @@ -207,11 +208,11 @@ func TestPeer_LoadStream(t *testing.T) { func TestPeer_StoreStream(t *testing.T) { tests := []struct { name string - expect func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) + expect func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) }{ { name: "store stream", - expect: func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) { assert := assert.New(t) peer.StoreStream(stream) newStream, ok := peer.LoadStream() @@ -228,7 +229,7 @@ func TestPeer_StoreStream(t *testing.T) { stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := NewPeer(mockPeerID, mockTask, mockHost) tc.expect(t, peer, stream) }) @@ -238,11 +239,11 @@ func TestPeer_StoreStream(t *testing.T) { func TestPeer_DeleteStream(t *testing.T) { tests := []struct { name string - expect func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) + expect func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) }{ { name: "delete stream", - expect: func(t *testing.T, peer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) { assert := assert.New(t) peer.StoreStream(stream) peer.DeleteStream() @@ -259,7 +260,7 @@ func TestPeer_DeleteStream(t *testing.T) { stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := NewPeer(mockPeerID, mockTask, mockHost) tc.expect(t, peer, stream) }) @@ -269,11 +270,11 @@ func TestPeer_DeleteStream(t *testing.T) { func TestPeer_Parents(t *testing.T) { tests := []struct { name string - expect func(t *testing.T, peer *Peer, seedPeer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) + expect func(t *testing.T, peer *Peer, seedPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) }{ { name: "peer has no parents", - expect: func(t *testing.T, peer *Peer, seedPeer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + expect: func(t *testing.T, peer *Peer, seedPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) { assert := assert.New(t) peer.Task.StorePeer(peer) assert.Equal(len(peer.Parents()), 0) @@ -281,7 +282,7 @@ func TestPeer_Parents(t *testing.T) { }, { name: "peer has parents", - expect: func(t *testing.T, peer *Peer, seedPeer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + expect: func(t *testing.T, peer *Peer, seedPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) { assert := assert.New(t) peer.Task.StorePeer(peer) peer.Task.StorePeer(seedPeer) @@ -302,7 +303,7 @@ func TestPeer_Parents(t *testing.T) { stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := NewPeer(mockPeerID, mockTask, mockHost) seedPeer := NewPeer(mockSeedPeerID, mockTask, mockHost) tc.expect(t, peer, seedPeer, stream) @@ -313,11 +314,11 @@ func TestPeer_Parents(t *testing.T) { func TestPeer_Children(t *testing.T) { tests := []struct { name string - expect func(t *testing.T, peer *Peer, seedPeer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) + expect func(t *testing.T, peer *Peer, seedPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) }{ { name: "peer has no children", - expect: func(t *testing.T, peer *Peer, seedPeer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + expect: func(t *testing.T, peer *Peer, seedPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) { assert := assert.New(t) peer.Task.StorePeer(peer) assert.Equal(len(peer.Children()), 0) @@ -325,7 +326,7 @@ func TestPeer_Children(t *testing.T) { }, { name: "peer has children", - expect: func(t *testing.T, peer *Peer, seedPeer *Peer, stream scheduler.Scheduler_ReportPieceResultServer) { + expect: func(t *testing.T, peer *Peer, seedPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) { assert := assert.New(t) peer.Task.StorePeer(peer) peer.Task.StorePeer(seedPeer) @@ -346,7 +347,7 @@ func TestPeer_Children(t *testing.T) { stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := NewPeer(mockPeerID, mockTask, mockHost) seedPeer := NewPeer(mockSeedPeerID, mockTask, mockHost) tc.expect(t, peer, seedPeer, stream) @@ -445,7 +446,7 @@ func TestPeer_DownloadTinyFile(t *testing.T) { mockRawHost.Ip = ip mockRawHost.DownPort = int32(port) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer = NewPeer(mockPeerID, mockTask, mockHost) tc.expect(t, peer) }) diff --git a/scheduler/resource/seed_peer.go b/scheduler/resource/seed_peer.go index 7019fe7d9..1ae9a5edd 100644 --- a/scheduler/resource/seed_peer.go +++ b/scheduler/resource/seed_peer.go @@ -23,9 +23,10 @@ import ( "fmt" "time" - "d7y.io/dragonfly/v2/pkg/rpc/base/common" - "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" - rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + + "d7y.io/dragonfly/v2/pkg/rpc/common" pkgtime "d7y.io/dragonfly/v2/pkg/time" ) @@ -41,7 +42,7 @@ const ( type SeedPeer interface { // TriggerTask triggers the seed peer to download the task. - TriggerTask(context.Context, *Task) (*Peer, *rpcscheduler.PeerResult, error) + TriggerTask(context.Context, *Task) (*Peer, *schedulerv1.PeerResult, error) // Client returns grpc client of seed peer. Client() SeedPeerClient @@ -66,11 +67,11 @@ func newSeedPeer(client SeedPeerClient, peerManager PeerManager, hostManager Hos } // TriggerTask start to trigger seed peer task. -func (s *seedPeer) TriggerTask(ctx context.Context, task *Task) (*Peer, *rpcscheduler.PeerResult, error) { +func (s *seedPeer) TriggerTask(ctx context.Context, task *Task) (*Peer, *schedulerv1.PeerResult, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() - stream, err := s.client.ObtainSeeds(ctx, &cdnsystem.SeedRequest{ + stream, err := s.client.ObtainSeeds(ctx, &cdnsystemv1.SeedRequest{ TaskId: task.ID, Url: task.URL, UrlMeta: task.URLMeta, @@ -127,7 +128,7 @@ func (s *seedPeer) TriggerTask(ctx context.Context, task *Task) (*Peer, *rpcsche // Handle end of piece. if piece.Done { peer.Log.Infof("receive done piece") - return peer, &rpcscheduler.PeerResult{ + return peer, &schedulerv1.PeerResult{ TotalPieceCount: piece.TotalPieceCount, ContentLength: piece.ContentLength, }, nil @@ -137,7 +138,7 @@ func (s *seedPeer) TriggerTask(ctx context.Context, task *Task) (*Peer, *rpcsche } // Initialize seed peer. -func (s *seedPeer) initSeedPeer(task *Task, ps *cdnsystem.PieceSeed) (*Peer, error) { +func (s *seedPeer) initSeedPeer(task *Task, ps *cdnsystemv1.PieceSeed) (*Peer, error) { // Load peer from manager. peer, ok := s.peerManager.Load(ps.PeerId) if ok { diff --git a/scheduler/resource/seed_peer_client.go b/scheduler/resource/seed_peer_client.go index 6e4cca537..d97c94e07 100644 --- a/scheduler/resource/seed_peer_client.go +++ b/scheduler/resource/seed_peer_client.go @@ -24,12 +24,13 @@ import ( "google.golang.org/grpc" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/manager/model" "d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/idgen" - client "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" - rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" "d7y.io/dragonfly/v2/scheduler/config" ) @@ -119,7 +120,7 @@ func seedPeersToHosts(seedPeers []*config.SeedPeer) map[string]*Host { } id := idgen.HostID(seedPeer.Hostname, seedPeer.Port) - hosts[id] = NewHost(&rpcscheduler.PeerHost{ + hosts[id] = NewHost(&schedulerv1.PeerHost{ Id: id, Ip: seedPeer.IP, RpcPort: seedPeer.Port, diff --git a/scheduler/resource/seed_peer_client_mock.go b/scheduler/resource/seed_peer_client_mock.go index b16973ee4..4238470fe 100644 --- a/scheduler/resource/seed_peer_client_mock.go +++ b/scheduler/resource/seed_peer_client_mock.go @@ -8,9 +8,9 @@ import ( context "context" reflect "reflect" + v1 "d7y.io/api/pkg/apis/cdnsystem/v1" + v10 "d7y.io/api/pkg/apis/common/v1" dfnet "d7y.io/dragonfly/v2/pkg/dfnet" - base "d7y.io/dragonfly/v2/pkg/rpc/base" - cdnsystem "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" client "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" config "d7y.io/dragonfly/v2/scheduler/config" gomock "github.com/golang/mock/gomock" @@ -55,14 +55,14 @@ func (mr *MockSeedPeerClientMockRecorder) Close() *gomock.Call { } // GetPieceTasks mocks base method. -func (m *MockSeedPeerClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { +func (m *MockSeedPeerClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *v10.PieceTaskRequest, opts ...grpc.CallOption) (*v10.PiecePacket, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, addr, req} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) - ret0, _ := ret[0].(*base.PiecePacket) + ret0, _ := ret[0].(*v10.PiecePacket) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -75,7 +75,7 @@ func (mr *MockSeedPeerClientMockRecorder) GetPieceTasks(ctx, addr, req interface } // ObtainSeeds mocks base method. -func (m *MockSeedPeerClient) ObtainSeeds(ctx context.Context, sr *cdnsystem.SeedRequest, opts ...grpc.CallOption) (*client.PieceSeedStream, error) { +func (m *MockSeedPeerClient) ObtainSeeds(ctx context.Context, sr *v1.SeedRequest, opts ...grpc.CallOption) (*client.PieceSeedStream, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, sr} for _, a := range opts { @@ -107,14 +107,14 @@ func (mr *MockSeedPeerClientMockRecorder) OnNotify(arg0 interface{}) *gomock.Cal } // SyncPieceTasks mocks base method. -func (m *MockSeedPeerClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (cdnsystem.Seeder_SyncPieceTasksClient, error) { +func (m *MockSeedPeerClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *v10.PieceTaskRequest, opts ...grpc.CallOption) (v1.Seeder_SyncPieceTasksClient, error) { m.ctrl.T.Helper() varargs := []interface{}{ctx, addr, ptr} for _, a := range opts { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...) - ret0, _ := ret[0].(cdnsystem.Seeder_SyncPieceTasksClient) + ret0, _ := ret[0].(v1.Seeder_SyncPieceTasksClient) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/scheduler/resource/seed_peer_mock.go b/scheduler/resource/seed_peer_mock.go index d38ffdfb2..707bd908f 100644 --- a/scheduler/resource/seed_peer_mock.go +++ b/scheduler/resource/seed_peer_mock.go @@ -8,7 +8,7 @@ import ( context "context" reflect "reflect" - scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + v1 "d7y.io/api/pkg/apis/scheduler/v1" gomock "github.com/golang/mock/gomock" ) @@ -50,11 +50,11 @@ func (mr *MockSeedPeerMockRecorder) Client() *gomock.Call { } // TriggerTask mocks base method. -func (m *MockSeedPeer) TriggerTask(arg0 context.Context, arg1 *Task) (*Peer, *scheduler.PeerResult, error) { +func (m *MockSeedPeer) TriggerTask(arg0 context.Context, arg1 *Task) (*Peer, *v1.PeerResult, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "TriggerTask", arg0, arg1) ret0, _ := ret[0].(*Peer) - ret1, _ := ret[1].(*scheduler.PeerResult) + ret1, _ := ret[1].(*v1.PeerResult) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } diff --git a/scheduler/resource/seed_peer_test.go b/scheduler/resource/seed_peer_test.go index a08aa0c07..c1a53240e 100644 --- a/scheduler/resource/seed_peer_test.go +++ b/scheduler/resource/seed_peer_test.go @@ -25,8 +25,8 @@ import ( gomock "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" - "d7y.io/dragonfly/v2/pkg/rpc/base" - rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" ) func TestSeedPeer_newSeedPeer(t *testing.T) { @@ -60,14 +60,14 @@ func TestSeedPeer_TriggerTask(t *testing.T) { tests := []struct { name string mock func(mc *MockSeedPeerClientMockRecorder) - expect func(t *testing.T, peer *Peer, result *rpcscheduler.PeerResult, err error) + expect func(t *testing.T, peer *Peer, result *schedulerv1.PeerResult, err error) }{ { name: "start obtain seed stream failed", mock: func(mc *MockSeedPeerClientMockRecorder) { mc.ObtainSeeds(gomock.Any(), gomock.Any()).Return(nil, errors.New("foo")).Times(1) }, - expect: func(t *testing.T, peer *Peer, result *rpcscheduler.PeerResult, err error) { + expect: func(t *testing.T, peer *Peer, result *schedulerv1.PeerResult, err error) { assert := assert.New(t) assert.EqualError(err, "foo") }, @@ -84,7 +84,7 @@ func TestSeedPeer_TriggerTask(t *testing.T) { tc.mock(client.EXPECT()) seedPeer := newSeedPeer(client, peerManager, hostManager) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer, result, err := seedPeer.TriggerTask(context.Background(), mockTask) tc.expect(t, peer, result, err) }) diff --git a/scheduler/resource/task.go b/scheduler/resource/task.go index d42ee64c1..633dfd3ba 100644 --- a/scheduler/resource/task.go +++ b/scheduler/resource/task.go @@ -25,11 +25,12 @@ import ( "github.com/looplab/fsm" "go.uber.org/atomic" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/container/set" "d7y.io/dragonfly/v2/pkg/dag" - "d7y.io/dragonfly/v2/pkg/rpc/base" - rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" ) const ( @@ -86,10 +87,10 @@ type Task struct { URL string // Type is task type. - Type base.TaskType + Type commonv1.TaskType // URLMeta is task download url meta. - URLMeta *base.UrlMeta + URLMeta *commonv1.UrlMeta // DirectPiece is tiny piece data. DirectPiece []byte @@ -130,7 +131,7 @@ type Task struct { } // New task instance. -func NewTask(id, url string, taskType base.TaskType, meta *base.UrlMeta, options ...Option) *Task { +func NewTask(id, url string, taskType commonv1.TaskType, meta *commonv1.UrlMeta, options ...Option) *Task { t := &Task{ ID: id, URL: url, @@ -357,17 +358,17 @@ func (t *Task) IsSeedPeerFailed() bool { } // LoadPiece return piece for a key. -func (t *Task) LoadPiece(key int32) (*base.PieceInfo, bool) { +func (t *Task) LoadPiece(key int32) (*commonv1.PieceInfo, bool) { rawPiece, ok := t.Pieces.Load(key) if !ok { return nil, false } - return rawPiece.(*base.PieceInfo), ok + return rawPiece.(*commonv1.PieceInfo), ok } // StorePiece set piece. -func (t *Task) StorePiece(piece *base.PieceInfo) { +func (t *Task) StorePiece(piece *commonv1.PieceInfo) { t.Pieces.Store(piece.PieceNum, piece) } @@ -377,7 +378,7 @@ func (t *Task) DeletePiece(key int32) { } // SizeScope return task size scope type. -func (t *Task) SizeScope() (base.SizeScope, error) { +func (t *Task) SizeScope() (commonv1.SizeScope, error) { if t.ContentLength.Load() < 0 { return -1, errors.New("invalid content length") } @@ -387,23 +388,23 @@ func (t *Task) SizeScope() (base.SizeScope, error) { } if t.ContentLength.Load() <= TinyFileSize { - return base.SizeScope_TINY, nil + return commonv1.SizeScope_TINY, nil } if t.TotalPieceCount.Load() == 1 { - return base.SizeScope_SMALL, nil + return commonv1.SizeScope_SMALL, nil } - return base.SizeScope_NORMAL, nil + return commonv1.SizeScope_NORMAL, nil } // CanBackToSource represents whether peer can back-to-source. func (t *Task) CanBackToSource() bool { - return int32(t.BackToSourcePeers.Len()) < t.BackToSourceLimit.Load() && (t.Type == base.TaskType_Normal || t.Type == base.TaskType_DfStore) + return int32(t.BackToSourcePeers.Len()) < t.BackToSourceLimit.Load() && (t.Type == commonv1.TaskType_Normal || t.Type == commonv1.TaskType_DfStore) } // NotifyPeers notify all peers in the task with the state code. -func (t *Task) NotifyPeers(peerPacket *rpcscheduler.PeerPacket, event string) { +func (t *Task) NotifyPeers(peerPacket *schedulerv1.PeerPacket, event string) { for _, vertex := range t.DAG.GetVertices() { peer := vertex.Value if peer == nil { diff --git a/scheduler/resource/task_manager_test.go b/scheduler/resource/task_manager_test.go index f115b4c2e..97268b0c7 100644 --- a/scheduler/resource/task_manager_test.go +++ b/scheduler/resource/task_manager_test.go @@ -25,8 +25,9 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + commonv1 "d7y.io/api/pkg/apis/common/v1" + "d7y.io/dragonfly/v2/pkg/gc" - "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/scheduler/config" ) @@ -131,7 +132,7 @@ func TestTaskManager_Load(t *testing.T) { gc := gc.NewMockGC(ctl) tc.mock(gc.EXPECT()) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) taskManager, err := newTaskManager(mockTaskGCConfig, gc) if err != nil { t.Fatal(err) @@ -184,7 +185,7 @@ func TestTaskManager_Store(t *testing.T) { gc := gc.NewMockGC(ctl) tc.mock(gc.EXPECT()) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) taskManager, err := newTaskManager(mockTaskGCConfig, gc) if err != nil { t.Fatal(err) @@ -235,7 +236,7 @@ func TestTaskManager_LoadOrStore(t *testing.T) { gc := gc.NewMockGC(ctl) tc.mock(gc.EXPECT()) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) taskManager, err := newTaskManager(mockTaskGCConfig, gc) if err != nil { t.Fatal(err) @@ -288,7 +289,7 @@ func TestTaskManager_Delete(t *testing.T) { gc := gc.NewMockGC(ctl) tc.mock(gc.EXPECT()) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) taskManager, err := newTaskManager(mockTaskGCConfig, gc) if err != nil { t.Fatal(err) @@ -364,7 +365,7 @@ func TestTaskManager_RunGC(t *testing.T) { tc.mock(gc.EXPECT()) mockHost := NewHost(mockRawHost) - mockTask := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, mockTask, mockHost) taskManager, err := newTaskManager(mockTaskGCConfig, gc) if err != nil { diff --git a/scheduler/resource/task_test.go b/scheduler/resource/task_test.go index 921c87ebd..a65b47bb7 100644 --- a/scheduler/resource/task_test.go +++ b/scheduler/resource/task_test.go @@ -24,14 +24,15 @@ import ( gomock "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/api/pkg/apis/scheduler/v1/mocks" + "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" - rpcschedulermocks "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks" ) var ( - mockTaskURLMeta = &base.UrlMeta{ + mockTaskURLMeta = &commonv1.UrlMeta{ Digest: "digest", Tag: "tag", Range: "range", @@ -43,7 +44,7 @@ var ( mockTaskBackToSourceLimit int32 = 200 mockTaskURL = "http://example.com/foo" mockTaskID = idgen.TaskID(mockTaskURL, mockTaskURLMeta) - mockPieceInfo = &base.PieceInfo{ + mockPieceInfo = &commonv1.PieceInfo{ PieceNum: 1, RangeStart: 0, RangeSize: 100, @@ -56,7 +57,7 @@ func TestTask_NewTask(t *testing.T) { tests := []struct { name string id string - urlMeta *base.UrlMeta + urlMeta *commonv1.UrlMeta url string backToSourceLimit int32 expect func(t *testing.T, task *Task) @@ -89,7 +90,7 @@ func TestTask_NewTask(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - tc.expect(t, NewTask(tc.id, tc.url, base.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))) + tc.expect(t, NewTask(tc.id, tc.url, commonv1.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))) }) } } @@ -98,7 +99,7 @@ func TestTask_LoadPeer(t *testing.T) { tests := []struct { name string id string - urlMeta *base.UrlMeta + urlMeta *commonv1.UrlMeta url string backToSourceLimit int32 peerID string @@ -146,7 +147,7 @@ func TestTask_LoadPeer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(tc.id, tc.url, base.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) + task := NewTask(tc.id, tc.url, commonv1.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) mockPeer := NewPeer(mockPeerID, task, mockHost) task.StorePeer(mockPeer) @@ -213,7 +214,7 @@ func TestTask_LoadRandomPeers(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { host := NewHost(mockRawHost) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta) tc.expect(t, task, host) }) @@ -224,7 +225,7 @@ func TestTask_StorePeer(t *testing.T) { tests := []struct { name string id string - urlMeta *base.UrlMeta + urlMeta *commonv1.UrlMeta url string backToSourceLimit int32 peerID string @@ -261,7 +262,7 @@ func TestTask_StorePeer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(tc.id, tc.url, base.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) + task := NewTask(tc.id, tc.url, commonv1.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) mockPeer := NewPeer(tc.peerID, task, mockHost) task.StorePeer(mockPeer) @@ -275,7 +276,7 @@ func TestTask_DeletePeer(t *testing.T) { tests := []struct { name string id string - urlMeta *base.UrlMeta + urlMeta *commonv1.UrlMeta url string backToSourceLimit int32 peerID string @@ -313,7 +314,7 @@ func TestTask_DeletePeer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(tc.id, tc.url, base.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) + task := NewTask(tc.id, tc.url, commonv1.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) mockPeer := NewPeer(mockPeerID, task, mockHost) task.StorePeer(mockPeer) @@ -350,7 +351,7 @@ func TestTask_PeerCount(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta) mockPeer := NewPeer(mockPeerID, task, mockHost) tc.expect(t, mockPeer, task) @@ -440,7 +441,7 @@ func TestTask_AddPeerEdge(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta) tc.expect(t, mockHost, task) }) @@ -536,7 +537,7 @@ func TestTask_DeletePeerInEdges(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta) tc.expect(t, mockHost, task) }) @@ -630,7 +631,7 @@ func TestTask_DeletePeerOutEdges(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta) tc.expect(t, mockHost, task) }) @@ -711,7 +712,7 @@ func TestTask_CanAddPeerEdge(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta) tc.expect(t, mockHost, task) }) @@ -770,7 +771,7 @@ func TestTask_PeerDegree(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta) tc.expect(t, mockHost, task) }) @@ -829,7 +830,7 @@ func TestTask_PeerInDegree(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta) tc.expect(t, mockHost, task) }) @@ -888,7 +889,7 @@ func TestTask_PeerOutDegree(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta) tc.expect(t, mockHost, task) }) @@ -899,7 +900,7 @@ func TestTask_HasAvailablePeer(t *testing.T) { tests := []struct { name string id string - urlMeta *base.UrlMeta + urlMeta *commonv1.UrlMeta url string backToSourceLimit int32 expect func(t *testing.T, task *Task, mockPeer *Peer) @@ -935,7 +936,7 @@ func TestTask_HasAvailablePeer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) mockPeer := NewPeer(mockPeerID, task, mockHost) tc.expect(t, task, mockPeer) @@ -998,7 +999,7 @@ func TestTask_LoadSeedPeer(t *testing.T) { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) mockSeedHost := NewHost(mockRawSeedHost, WithHostType(HostTypeSuperSeed)) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, task, mockHost) mockSeedPeer := NewPeer(mockSeedPeerID, task, mockSeedHost) @@ -1061,7 +1062,7 @@ func TestTask_IsSeedPeerFailed(t *testing.T) { t.Run(tc.name, func(t *testing.T) { mockHost := NewHost(mockRawHost) mockSeedHost := NewHost(mockRawSeedHost, WithHostType(HostTypeSuperSeed)) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, task, mockHost) mockSeedPeer := NewPeer(mockSeedPeerID, task, mockSeedHost) @@ -1074,12 +1075,12 @@ func TestTask_LoadPiece(t *testing.T) { tests := []struct { name string id string - urlMeta *base.UrlMeta + urlMeta *commonv1.UrlMeta url string backToSourceLimit int32 - pieceInfo *base.PieceInfo + pieceInfo *commonv1.PieceInfo pieceNum int32 - expect func(t *testing.T, piece *base.PieceInfo, ok bool) + expect func(t *testing.T, piece *commonv1.PieceInfo, ok bool) }{ { name: "load piece", @@ -1089,7 +1090,7 @@ func TestTask_LoadPiece(t *testing.T) { backToSourceLimit: mockTaskBackToSourceLimit, pieceInfo: mockPieceInfo, pieceNum: mockPieceInfo.PieceNum, - expect: func(t *testing.T, piece *base.PieceInfo, ok bool) { + expect: func(t *testing.T, piece *commonv1.PieceInfo, ok bool) { assert := assert.New(t) assert.Equal(ok, true) assert.Equal(piece.PieceNum, mockPieceInfo.PieceNum) @@ -1103,7 +1104,7 @@ func TestTask_LoadPiece(t *testing.T) { backToSourceLimit: mockTaskBackToSourceLimit, pieceInfo: mockPieceInfo, pieceNum: 2, - expect: func(t *testing.T, piece *base.PieceInfo, ok bool) { + expect: func(t *testing.T, piece *commonv1.PieceInfo, ok bool) { assert := assert.New(t) assert.Equal(ok, false) }, @@ -1116,7 +1117,7 @@ func TestTask_LoadPiece(t *testing.T) { backToSourceLimit: mockTaskBackToSourceLimit, pieceInfo: mockPieceInfo, pieceNum: 0, - expect: func(t *testing.T, piece *base.PieceInfo, ok bool) { + expect: func(t *testing.T, piece *commonv1.PieceInfo, ok bool) { assert := assert.New(t) assert.Equal(ok, false) }, @@ -1125,7 +1126,7 @@ func TestTask_LoadPiece(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - task := NewTask(tc.id, tc.url, base.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) + task := NewTask(tc.id, tc.url, commonv1.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) task.StorePiece(tc.pieceInfo) piece, ok := task.LoadPiece(tc.pieceNum) @@ -1138,12 +1139,12 @@ func TestTask_StorePiece(t *testing.T) { tests := []struct { name string id string - urlMeta *base.UrlMeta + urlMeta *commonv1.UrlMeta url string backToSourceLimit int32 - pieceInfo *base.PieceInfo + pieceInfo *commonv1.PieceInfo pieceNum int32 - expect func(t *testing.T, piece *base.PieceInfo, ok bool) + expect func(t *testing.T, piece *commonv1.PieceInfo, ok bool) }{ { name: "store piece", @@ -1153,7 +1154,7 @@ func TestTask_StorePiece(t *testing.T) { backToSourceLimit: mockTaskBackToSourceLimit, pieceInfo: mockPieceInfo, pieceNum: mockPieceInfo.PieceNum, - expect: func(t *testing.T, piece *base.PieceInfo, ok bool) { + expect: func(t *testing.T, piece *commonv1.PieceInfo, ok bool) { assert := assert.New(t) assert.Equal(ok, true) assert.Equal(piece.PieceNum, mockPieceInfo.PieceNum) @@ -1163,7 +1164,7 @@ func TestTask_StorePiece(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - task := NewTask(tc.id, tc.url, base.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) + task := NewTask(tc.id, tc.url, commonv1.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) task.StorePiece(tc.pieceInfo) piece, ok := task.LoadPiece(tc.pieceNum) @@ -1176,10 +1177,10 @@ func TestTask_DeletePiece(t *testing.T) { tests := []struct { name string id string - urlMeta *base.UrlMeta + urlMeta *commonv1.UrlMeta url string backToSourceLimit int32 - pieceInfo *base.PieceInfo + pieceInfo *commonv1.PieceInfo pieceNum int32 expect func(t *testing.T, task *Task) }{ @@ -1216,7 +1217,7 @@ func TestTask_DeletePiece(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - task := NewTask(tc.id, tc.url, base.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) + task := NewTask(tc.id, tc.url, commonv1.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) task.StorePiece(tc.pieceInfo) task.DeletePiece(tc.pieceNum) @@ -1229,7 +1230,7 @@ func TestTask_SizeScope(t *testing.T) { tests := []struct { name string id string - urlMeta *base.UrlMeta + urlMeta *commonv1.UrlMeta url string backToSourceLimit int32 contentLength int64 @@ -1248,7 +1249,7 @@ func TestTask_SizeScope(t *testing.T) { assert := assert.New(t) sizeScope, err := task.SizeScope() assert.NoError(err) - assert.Equal(sizeScope, base.SizeScope_TINY) + assert.Equal(sizeScope, commonv1.SizeScope_TINY) }, }, { @@ -1263,7 +1264,7 @@ func TestTask_SizeScope(t *testing.T) { assert := assert.New(t) sizeScope, err := task.SizeScope() assert.NoError(err) - assert.Equal(sizeScope, base.SizeScope_SMALL) + assert.Equal(sizeScope, commonv1.SizeScope_SMALL) }, }, { @@ -1278,7 +1279,7 @@ func TestTask_SizeScope(t *testing.T) { assert := assert.New(t) sizeScope, err := task.SizeScope() assert.NoError(err) - assert.Equal(sizeScope, base.SizeScope_NORMAL) + assert.Equal(sizeScope, commonv1.SizeScope_NORMAL) }, }, { @@ -1313,7 +1314,7 @@ func TestTask_SizeScope(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - task := NewTask(tc.id, tc.url, base.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) + task := NewTask(tc.id, tc.url, commonv1.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) task.ContentLength.Store(tc.contentLength) task.TotalPieceCount.Store(tc.totalPieceCount) tc.expect(t, task) @@ -1325,7 +1326,7 @@ func TestTask_CanBackToSource(t *testing.T) { tests := []struct { name string id string - urlMeta *base.UrlMeta + urlMeta *commonv1.UrlMeta url string backToSourceLimit int32 expect func(t *testing.T, task *Task) @@ -1342,7 +1343,7 @@ func TestTask_CanBackToSource(t *testing.T) { }, }, { - name: "task can not base-to-source", + name: "task can not commonv1-to-source", id: mockTaskID, urlMeta: mockTaskURLMeta, url: mockTaskURL, @@ -1360,7 +1361,7 @@ func TestTask_CanBackToSource(t *testing.T) { backToSourceLimit: 1, expect: func(t *testing.T, task *Task) { assert := assert.New(t) - task.Type = base.TaskType_DfStore + task.Type = commonv1.TaskType_DfStore assert.Equal(task.CanBackToSource(), true) }, }, @@ -1372,7 +1373,7 @@ func TestTask_CanBackToSource(t *testing.T) { backToSourceLimit: 1, expect: func(t *testing.T, task *Task) { assert := assert.New(t) - task.Type = base.TaskType_DfCache + task.Type = commonv1.TaskType_DfCache assert.Equal(task.CanBackToSource(), false) }, }, @@ -1380,7 +1381,7 @@ func TestTask_CanBackToSource(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - task := NewTask(tc.id, tc.url, base.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) + task := NewTask(tc.id, tc.url, commonv1.TaskType_Normal, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)) tc.expect(t, task) }) } @@ -1389,13 +1390,13 @@ func TestTask_CanBackToSource(t *testing.T) { func TestTask_NotifyPeers(t *testing.T) { tests := []struct { name string - run func(t *testing.T, task *Task, mockPeer *Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) + run func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) }{ { name: "peer state is PeerStatePending", - run: func(t *testing.T, task *Task, mockPeer *Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { mockPeer.FSM.SetState(PeerStatePending) - task.NotifyPeers(&rpcscheduler.PeerPacket{Code: base.Code_SchedTaskStatusError}, PeerEventDownloadFailed) + task.NotifyPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed) assert := assert.New(t) assert.True(mockPeer.FSM.Is(PeerStatePending)) @@ -1403,9 +1404,9 @@ func TestTask_NotifyPeers(t *testing.T) { }, { name: "peer state is PeerStateRunning and stream is empty", - run: func(t *testing.T, task *Task, mockPeer *Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { mockPeer.FSM.SetState(PeerStateRunning) - task.NotifyPeers(&rpcscheduler.PeerPacket{Code: base.Code_SchedTaskStatusError}, PeerEventDownloadFailed) + task.NotifyPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed) assert := assert.New(t) assert.True(mockPeer.FSM.Is(PeerStateRunning)) @@ -1413,12 +1414,12 @@ func TestTask_NotifyPeers(t *testing.T) { }, { name: "peer state is PeerStateRunning and stream sending failed", - run: func(t *testing.T, task *Task, mockPeer *Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { mockPeer.FSM.SetState(PeerStateRunning) mockPeer.StoreStream(stream) - ms.Send(gomock.Eq(&rpcscheduler.PeerPacket{Code: base.Code_SchedTaskStatusError})).Return(errors.New("foo")).Times(1) + ms.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError})).Return(errors.New("foo")).Times(1) - task.NotifyPeers(&rpcscheduler.PeerPacket{Code: base.Code_SchedTaskStatusError}, PeerEventDownloadFailed) + task.NotifyPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed) assert := assert.New(t) assert.True(mockPeer.FSM.Is(PeerStateRunning)) @@ -1426,12 +1427,12 @@ func TestTask_NotifyPeers(t *testing.T) { }, { name: "peer state is PeerStateRunning and state changing failed", - run: func(t *testing.T, task *Task, mockPeer *Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { mockPeer.FSM.SetState(PeerStateRunning) mockPeer.StoreStream(stream) - ms.Send(gomock.Eq(&rpcscheduler.PeerPacket{Code: base.Code_SchedTaskStatusError})).Return(errors.New("foo")).Times(1) + ms.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError})).Return(errors.New("foo")).Times(1) - task.NotifyPeers(&rpcscheduler.PeerPacket{Code: base.Code_SchedTaskStatusError}, PeerEventDownloadFailed) + task.NotifyPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed) assert := assert.New(t) assert.True(mockPeer.FSM.Is(PeerStateRunning)) @@ -1439,12 +1440,12 @@ func TestTask_NotifyPeers(t *testing.T) { }, { name: "peer state is PeerStateRunning and notify peer successfully", - run: func(t *testing.T, task *Task, mockPeer *Peer, stream rpcscheduler.Scheduler_ReportPieceResultServer, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder) { + run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) { mockPeer.FSM.SetState(PeerStateRunning) mockPeer.StoreStream(stream) - ms.Send(gomock.Eq(&rpcscheduler.PeerPacket{Code: base.Code_SchedTaskStatusError})).Return(nil).Times(1) + ms.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError})).Return(nil).Times(1) - task.NotifyPeers(&rpcscheduler.PeerPacket{Code: base.Code_SchedTaskStatusError}, PeerEventDownloadFailed) + task.NotifyPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed) assert := assert.New(t) assert.True(mockPeer.FSM.Is(PeerStateFailed)) @@ -1456,10 +1457,10 @@ func TestTask_NotifyPeers(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() - stream := rpcschedulermocks.NewMockScheduler_ReportPieceResultServer(ctl) + stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) mockHost := NewHost(mockRawHost) - task := NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) + task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := NewPeer(mockPeerID, task, mockHost) task.StorePeer(mockPeer) tc.run(t, task, mockPeer, stream, stream.EXPECT()) diff --git a/scheduler/rpcserver/rpcserver.go b/scheduler/rpcserver/rpcserver.go index 7b756ba71..3e44605b3 100644 --- a/scheduler/rpcserver/rpcserver.go +++ b/scheduler/rpcserver/rpcserver.go @@ -24,9 +24,10 @@ import ( healthpb "google.golang.org/grpc/health/grpc_health_v1" empty "google.golang.org/protobuf/types/known/emptypb" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/rpc" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" "d7y.io/dragonfly/v2/scheduler/metrics" "d7y.io/dragonfly/v2/scheduler/resource" "d7y.io/dragonfly/v2/scheduler/service" @@ -38,7 +39,7 @@ type Server struct { service *service.Service // GRPC UnimplementedSchedulerServer interface. - scheduler.UnimplementedSchedulerServer + schedulerv1.UnimplementedSchedulerServer } // New returns a new transparent scheduler server from the given options. @@ -47,13 +48,13 @@ func New(service *service.Service, opts ...grpc.ServerOption) *grpc.Server { grpcServer := grpc.NewServer(append(rpc.DefaultServerOptions(), opts...)...) // Register servers on grpc server. - scheduler.RegisterSchedulerServer(grpcServer, svr) + schedulerv1.RegisterSchedulerServer(grpcServer, svr) healthpb.RegisterHealthServer(grpcServer, health.NewServer()) return grpcServer } // RegisterPeerTask registers peer and triggers seed peer download task. -func (s *Server) RegisterPeerTask(ctx context.Context, req *scheduler.PeerTaskRequest) (*scheduler.RegisterResult, error) { +func (s *Server) RegisterPeerTask(ctx context.Context, req *schedulerv1.PeerTaskRequest) (*schedulerv1.RegisterResult, error) { // FIXME: Scheudler will not generate task id. if req.TaskId == "" { req.TaskId = idgen.TaskID(req.Url, req.UrlMeta) @@ -76,7 +77,7 @@ func (s *Server) RegisterPeerTask(ctx context.Context, req *scheduler.PeerTaskRe } // ReportPieceResult handles the piece information reported by dfdaemon. -func (s *Server) ReportPieceResult(stream scheduler.Scheduler_ReportPieceResultServer) error { +func (s *Server) ReportPieceResult(stream schedulerv1.Scheduler_ReportPieceResultServer) error { metrics.ConcurrentScheduleGauge.Inc() defer metrics.ConcurrentScheduleGauge.Dec() @@ -84,12 +85,12 @@ func (s *Server) ReportPieceResult(stream scheduler.Scheduler_ReportPieceResultS } // ReportPeerResult handles peer result reported by dfdaemon. -func (s *Server) ReportPeerResult(ctx context.Context, req *scheduler.PeerResult) (*empty.Empty, error) { +func (s *Server) ReportPeerResult(ctx context.Context, req *schedulerv1.PeerResult) (*empty.Empty, error) { return new(empty.Empty), s.service.ReportPeerResult(ctx, req) } // StatTask checks if the given task exists. -func (s *Server) StatTask(ctx context.Context, req *scheduler.StatTaskRequest) (*scheduler.Task, error) { +func (s *Server) StatTask(ctx context.Context, req *schedulerv1.StatTaskRequest) (*schedulerv1.Task, error) { metrics.StatTaskCount.Inc() task, err := s.service.StatTask(ctx, req) if err != nil { @@ -101,7 +102,7 @@ func (s *Server) StatTask(ctx context.Context, req *scheduler.StatTaskRequest) ( } // AnnounceTask informs scheduler a peer has completed task. -func (s *Server) AnnounceTask(ctx context.Context, req *scheduler.AnnounceTaskRequest) (*empty.Empty, error) { +func (s *Server) AnnounceTask(ctx context.Context, req *schedulerv1.AnnounceTaskRequest) (*empty.Empty, error) { metrics.AnnounceCount.Inc() if err := s.service.AnnounceTask(ctx, req); err != nil { metrics.AnnounceFailureCount.Inc() @@ -112,6 +113,6 @@ func (s *Server) AnnounceTask(ctx context.Context, req *scheduler.AnnounceTaskRe } // LeaveTask makes the peer unschedulable. -func (s *Server) LeaveTask(ctx context.Context, req *scheduler.PeerTarget) (*empty.Empty, error) { +func (s *Server) LeaveTask(ctx context.Context, req *schedulerv1.PeerTarget) (*empty.Empty, error) { return new(empty.Empty), s.service.LeaveTask(ctx, req) } diff --git a/scheduler/scheduler.go b/scheduler/scheduler.go index c5fe4f1fe..a67a3b715 100644 --- a/scheduler/scheduler.go +++ b/scheduler/scheduler.go @@ -26,10 +26,11 @@ import ( "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" + managerv1 "d7y.io/api/pkg/apis/manager/v1" + logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/dfpath" "d7y.io/dragonfly/v2/pkg/gc" - rpcmanager "d7y.io/dragonfly/v2/pkg/rpc/manager" managerclient "d7y.io/dragonfly/v2/pkg/rpc/manager/client" "d7y.io/dragonfly/v2/scheduler/config" "d7y.io/dragonfly/v2/scheduler/job" @@ -81,8 +82,8 @@ func New(ctx context.Context, cfg *config.Config, d dfpath.Dfpath) (*Server, err s.managerClient = managerClient // Register to manager. - if _, err := s.managerClient.UpdateScheduler(&rpcmanager.UpdateSchedulerRequest{ - SourceType: rpcmanager.SourceType_SCHEDULER_SOURCE, + if _, err := s.managerClient.UpdateScheduler(&managerv1.UpdateSchedulerRequest{ + SourceType: managerv1.SourceType_SCHEDULER_SOURCE, HostName: s.config.Server.Host, Ip: s.config.Server.IP, Port: int32(s.config.Server.Port), @@ -197,8 +198,8 @@ func (s *Server) Serve() error { // scheduler keepalive with manager. go func() { logger.Info("start keepalive to manager") - s.managerClient.KeepAlive(s.config.Manager.KeepAlive.Interval, &rpcmanager.KeepAliveRequest{ - SourceType: rpcmanager.SourceType_SCHEDULER_SOURCE, + s.managerClient.KeepAlive(s.config.Manager.KeepAlive.Interval, &managerv1.KeepAliveRequest{ + SourceType: managerv1.SourceType_SCHEDULER_SOURCE, HostName: s.config.Server.Host, Ip: s.config.Server.IP, ClusterId: uint64(s.config.Manager.SchedulerClusterID), diff --git a/scheduler/scheduler/evaluator/evaluator_base_test.go b/scheduler/scheduler/evaluator/evaluator_base_test.go index a36e99112..f1a18de26 100644 --- a/scheduler/scheduler/evaluator/evaluator_base_test.go +++ b/scheduler/scheduler/evaluator/evaluator_base_test.go @@ -22,14 +22,15 @@ import ( "github.com/stretchr/testify/assert" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/scheduler" "d7y.io/dragonfly/v2/scheduler/resource" ) var ( - mockRawHost = &scheduler.PeerHost{ + mockRawHost = &schedulerv1.PeerHost{ Id: idgen.HostID("hostname", 8003), Ip: "127.0.0.1", RpcPort: 8003, @@ -40,7 +41,7 @@ var ( Idc: "idc", NetTopology: "net_topology", } - mockTaskURLMeta = &base.UrlMeta{ + mockTaskURLMeta = &commonv1.UrlMeta{ Digest: "digest", Tag: "tag", Range: "range", @@ -61,7 +62,7 @@ func TestEvaluatorBase_NewEvaluatorBase(t *testing.T) { expect func(t *testing.T, e any) }{ { - name: "new evaluator base", + name: "new evaluator commonv1", expect: func(t *testing.T, e any) { assert := assert.New(t) assert.Equal(reflect.TypeOf(e).Elem().Name(), "evaluatorBase") @@ -78,9 +79,9 @@ func TestEvaluatorBase_NewEvaluatorBase(t *testing.T) { func TestEvaluatorBase_Evaluate(t *testing.T) { parentMockHost := resource.NewHost(mockRawHost) - parentMockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + parentMockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) childMockHost := resource.NewHost(mockRawHost) - childMockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + childMockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) tests := []struct { name string @@ -162,7 +163,7 @@ func TestEvaluatorBase_Evaluate(t *testing.T) { func TestEvaluatorBase_calculatePieceScore(t *testing.T) { mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) tests := []struct { name string @@ -306,7 +307,7 @@ func TestEvaluatorBase_calculateFreeLoadScore(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { host := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := resource.NewPeer(mockPeerID, mockTask, host) tc.mock(host, mockPeer) tc.expect(t, calculateFreeLoadScore(host)) @@ -355,7 +356,7 @@ func TestEvaluatorBase_calculateHostTypeAffinityScore(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := resource.NewPeer(mockPeerID, mockTask, mockHost) tc.mock(peer) tc.expect(t, calculateHostTypeAffinityScore(peer)) @@ -560,7 +561,7 @@ func TestEvaluatorBase_calculateMultiElementAffinityScore(t *testing.T) { func TestEvaluatorBase_IsBadNode(t *testing.T) { mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) tests := []struct { name string diff --git a/scheduler/scheduler/scheduler.go b/scheduler/scheduler/scheduler.go index 7e577dff7..49be0154a 100644 --- a/scheduler/scheduler/scheduler.go +++ b/scheduler/scheduler/scheduler.go @@ -23,9 +23,10 @@ import ( "sort" "time" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/pkg/container/set" - "d7y.io/dragonfly/v2/pkg/rpc/base" - rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" "d7y.io/dragonfly/v2/scheduler/config" "d7y.io/dragonfly/v2/scheduler/resource" "d7y.io/dragonfly/v2/scheduler/scheduler/evaluator" @@ -87,7 +88,7 @@ func (s *scheduler) ScheduleParent(ctx context.Context, peer *resource.Peer, blo n, needBackToSource) // Notify peer back-to-source. - if err := stream.Send(&rpcscheduler.PeerPacket{Code: base.Code_SchedNeedBackSource}); err != nil { + if err := stream.Send(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedNeedBackSource}); err != nil { peer.Log.Errorf("send packet failed: %s", err.Error()) return } @@ -118,11 +119,11 @@ func (s *scheduler) ScheduleParent(ctx context.Context, peer *resource.Peer, blo } // Notify peer schedule failed. - if err := stream.Send(&rpcscheduler.PeerPacket{Code: base.Code_SchedTaskStatusError}); err != nil { + if err := stream.Send(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}); err != nil { peer.Log.Errorf("send packet failed: %s", err.Error()) return } - peer.Log.Errorf("peer scheduling exceeds the limit %d times and return code %d", s.config.RetryLimit, base.Code_SchedTaskStatusError) + peer.Log.Errorf("peer scheduling exceeds the limit %d times and return code %d", s.config.RetryLimit, commonv1.Code_SchedTaskStatusError) return } @@ -308,31 +309,31 @@ func (s *scheduler) filterCandidateParents(peer *resource.Peer, blocklist set.Sa } // Construct peer successful packet. -func constructSuccessPeerPacket(dynconfig config.DynconfigInterface, peer *resource.Peer, parent *resource.Peer, candidateParents []*resource.Peer) *rpcscheduler.PeerPacket { +func constructSuccessPeerPacket(dynconfig config.DynconfigInterface, peer *resource.Peer, parent *resource.Peer, candidateParents []*resource.Peer) *schedulerv1.PeerPacket { parallelCount := config.DefaultClientParallelCount if config, ok := dynconfig.GetSchedulerClusterClientConfig(); ok && config.ParallelCount > 0 { parallelCount = int(config.ParallelCount) } - var CandidatePeers []*rpcscheduler.PeerPacket_DestPeer + var CandidatePeers []*schedulerv1.PeerPacket_DestPeer for _, candidateParent := range candidateParents { - CandidatePeers = append(CandidatePeers, &rpcscheduler.PeerPacket_DestPeer{ + CandidatePeers = append(CandidatePeers, &schedulerv1.PeerPacket_DestPeer{ Ip: candidateParent.Host.IP, RpcPort: candidateParent.Host.Port, PeerId: candidateParent.ID, }) } - return &rpcscheduler.PeerPacket{ + return &schedulerv1.PeerPacket{ TaskId: peer.Task.ID, SrcPid: peer.ID, ParallelCount: int32(parallelCount), - MainPeer: &rpcscheduler.PeerPacket_DestPeer{ + MainPeer: &schedulerv1.PeerPacket_DestPeer{ Ip: parent.Host.IP, RpcPort: parent.Host.Port, PeerId: parent.ID, }, CandidatePeers: CandidatePeers, - Code: base.Code_Success, + Code: commonv1.Code_Success, } } diff --git a/scheduler/scheduler/scheduler_test.go b/scheduler/scheduler/scheduler_test.go index 5d04e5376..8bbfd0e51 100644 --- a/scheduler/scheduler/scheduler_test.go +++ b/scheduler/scheduler/scheduler_test.go @@ -27,12 +27,13 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + commonv1 "d7y.io/api/pkg/apis/common/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/api/pkg/apis/scheduler/v1/mocks" + "d7y.io/dragonfly/v2/manager/types" "d7y.io/dragonfly/v2/pkg/container/set" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" - rpcschedulermocks "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks" "d7y.io/dragonfly/v2/scheduler/config" configmocks "d7y.io/dragonfly/v2/scheduler/config/mocks" "d7y.io/dragonfly/v2/scheduler/resource" @@ -48,7 +49,7 @@ var ( BackSourceCount: int(mockTaskBackToSourceLimit), Algorithm: evaluator.DefaultAlgorithm, } - mockRawHost = &rpcscheduler.PeerHost{ + mockRawHost = &schedulerv1.PeerHost{ Id: idgen.HostID("hostname", 8003), Ip: "127.0.0.1", RpcPort: 8003, @@ -60,7 +61,7 @@ var ( NetTopology: "net_topology", } - mockRawSeedHost = &rpcscheduler.PeerHost{ + mockRawSeedHost = &schedulerv1.PeerHost{ Id: idgen.HostID("hostname_seed", 8003), Ip: "127.0.0.1", RpcPort: 8003, @@ -72,7 +73,7 @@ var ( NetTopology: "net_topology", } - mockTaskURLMeta = &base.UrlMeta{ + mockTaskURLMeta = &commonv1.UrlMeta{ Digest: "digest", Tag: "tag", Range: "range", @@ -127,12 +128,12 @@ func TestScheduler_New(t *testing.T) { func TestScheduler_ScheduleParent(t *testing.T) { tests := []struct { name string - mock func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) + mock func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, mr *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) expect func(t *testing.T, peer *resource.Peer) }{ { name: "context was done", - mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, mr *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) cancel() }, @@ -143,7 +144,7 @@ func TestScheduler_ScheduleParent(t *testing.T) { }, { name: "peer needs back-to-source and peer stream load failed", - mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, mr *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { task := peer.Task task.StorePeer(peer) peer.NeedBackToSource.Store(true) @@ -156,14 +157,14 @@ func TestScheduler_ScheduleParent(t *testing.T) { }, { name: "peer needs back-to-source and send Code_SchedNeedBackSource code failed", - mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, mr *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { task := peer.Task task.StorePeer(peer) peer.NeedBackToSource.Store(true) peer.FSM.SetState(resource.PeerStateRunning) peer.StoreStream(stream) - mr.Send(gomock.Eq(&rpcscheduler.PeerPacket{Code: base.Code_SchedNeedBackSource})).Return(errors.New("foo")).Times(1) + mr.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedNeedBackSource})).Return(errors.New("foo")).Times(1) }, expect: func(t *testing.T, peer *resource.Peer) { assert := assert.New(t) @@ -173,14 +174,14 @@ func TestScheduler_ScheduleParent(t *testing.T) { }, { name: "peer needs back-to-source and send Code_SchedNeedBackSource code success", - mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, mr *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { task := peer.Task task.StorePeer(peer) peer.NeedBackToSource.Store(true) peer.FSM.SetState(resource.PeerStateRunning) peer.StoreStream(stream) - mr.Send(gomock.Eq(&rpcscheduler.PeerPacket{Code: base.Code_SchedNeedBackSource})).Return(nil).Times(1) + mr.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedNeedBackSource})).Return(nil).Times(1) }, expect: func(t *testing.T, peer *resource.Peer) { assert := assert.New(t) @@ -191,7 +192,7 @@ func TestScheduler_ScheduleParent(t *testing.T) { }, { name: "peer needs back-to-source and task state is TaskStateFailed", - mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, mr *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { task := peer.Task task.StorePeer(peer) peer.NeedBackToSource.Store(true) @@ -199,7 +200,7 @@ func TestScheduler_ScheduleParent(t *testing.T) { task.FSM.SetState(resource.TaskStateFailed) peer.StoreStream(stream) - mr.Send(gomock.Eq(&rpcscheduler.PeerPacket{Code: base.Code_SchedNeedBackSource})).Return(nil).Times(1) + mr.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedNeedBackSource})).Return(nil).Times(1) }, expect: func(t *testing.T, peer *resource.Peer) { assert := assert.New(t) @@ -210,7 +211,7 @@ func TestScheduler_ScheduleParent(t *testing.T) { }, { name: "schedule exceeds RetryBackSourceLimit and peer stream load failed", - mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, mr *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { task := peer.Task task.StorePeer(peer) peer.FSM.SetState(resource.PeerStateRunning) @@ -223,7 +224,7 @@ func TestScheduler_ScheduleParent(t *testing.T) { }, { name: "schedule exceeds RetryLimit and peer stream load failed", - mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, mr *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { task := peer.Task task.StorePeer(peer) peer.FSM.SetState(resource.PeerStateRunning) @@ -238,7 +239,7 @@ func TestScheduler_ScheduleParent(t *testing.T) { }, { name: "schedule exceeds RetryLimit and send Code_SchedTaskStatusError code failed", - mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, mr *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { task := peer.Task task.StorePeer(peer) peer.FSM.SetState(resource.PeerStateRunning) @@ -247,7 +248,7 @@ func TestScheduler_ScheduleParent(t *testing.T) { gomock.InOrder( md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, false).Times(2), - mr.Send(gomock.Eq(&rpcscheduler.PeerPacket{Code: base.Code_SchedTaskStatusError})).Return(errors.New("foo")).Times(1), + mr.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError})).Return(errors.New("foo")).Times(1), ) }, expect: func(t *testing.T, peer *resource.Peer) { @@ -258,7 +259,7 @@ func TestScheduler_ScheduleParent(t *testing.T) { }, { name: "schedule exceeds RetryLimit and send Code_SchedTaskStatusError code success", - mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, mr *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { task := peer.Task task.StorePeer(peer) peer.FSM.SetState(resource.PeerStateRunning) @@ -267,7 +268,7 @@ func TestScheduler_ScheduleParent(t *testing.T) { gomock.InOrder( md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, false).Times(2), - mr.Send(gomock.Eq(&rpcscheduler.PeerPacket{Code: base.Code_SchedTaskStatusError})).Return(nil).Times(1), + mr.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError})).Return(nil).Times(1), ) }, expect: func(t *testing.T, peer *resource.Peer) { @@ -278,7 +279,7 @@ func TestScheduler_ScheduleParent(t *testing.T) { }, { name: "schedule succeeded", - mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, mr *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(cancel context.CancelFunc, peer *resource.Peer, seedPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, mr *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { task := peer.Task task.StorePeer(peer) task.StorePeer(seedPeer) @@ -305,11 +306,11 @@ func TestScheduler_ScheduleParent(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() - stream := rpcschedulermocks.NewMockScheduler_ReportPieceResultServer(ctl) + stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) ctx, cancel := context.WithCancel(context.Background()) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := resource.NewPeer(mockPeerID, mockTask, mockHost) mockSeedHost := resource.NewHost(mockRawSeedHost, resource.WithHostType(resource.HostTypeSuperSeed)) seedPeer := resource.NewPeer(mockSeedPeerID, mockTask, mockSeedHost) @@ -326,12 +327,12 @@ func TestScheduler_ScheduleParent(t *testing.T) { func TestScheduler_NotifyAndFindParent(t *testing.T) { tests := []struct { name string - mock func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) + mock func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) expect func(t *testing.T, peer *resource.Peer, parents []*resource.Peer, ok bool) }{ { name: "peer state is PeerStatePending", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStatePending) }, expect: func(t *testing.T, peer *resource.Peer, parents []*resource.Peer, ok bool) { @@ -341,7 +342,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "peer state is PeerStateReceivedSmall", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateReceivedSmall) }, expect: func(t *testing.T, peer *resource.Peer, parents []*resource.Peer, ok bool) { @@ -351,7 +352,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "peer state is PeerStateReceivedNormal", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateReceivedNormal) }, expect: func(t *testing.T, peer *resource.Peer, parents []*resource.Peer, ok bool) { @@ -361,7 +362,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "peer state is PeerStateBackToSource", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateBackToSource) }, expect: func(t *testing.T, peer *resource.Peer, parents []*resource.Peer, ok bool) { @@ -371,7 +372,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "peer state is PeerStateSucceeded", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateSucceeded) }, expect: func(t *testing.T, peer *resource.Peer, parents []*resource.Peer, ok bool) { @@ -381,7 +382,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "peer state is PeerStateFailed", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateFailed) }, expect: func(t *testing.T, peer *resource.Peer, parents []*resource.Peer, ok bool) { @@ -391,7 +392,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "peer state is PeerStateLeave", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateLeave) }, expect: func(t *testing.T, peer *resource.Peer, parents []*resource.Peer, ok bool) { @@ -401,7 +402,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "task peers is empty", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) peer.Task.StorePeer(peer) md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, false).Times(1) @@ -413,7 +414,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "task contains only one peer and peer is itself", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) peer.Task.StorePeer(peer) @@ -426,7 +427,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "peer is in blocklist", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) peer.Task.StorePeer(peer) peer.Task.StorePeer(mockPeer) @@ -441,7 +442,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "peer is bad node", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) peer.FSM.SetState(resource.PeerStateFailed) peer.Task.StorePeer(mockPeer) @@ -453,7 +454,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "parent is peer's descendant", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) mockPeer.FSM.SetState(resource.PeerStateRunning) peer.Task.StorePeer(peer) @@ -471,7 +472,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "parent is peer's ancestor", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) mockPeer.FSM.SetState(resource.PeerStateRunning) peer.Task.StorePeer(peer) @@ -489,7 +490,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "parent free upload load is zero", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) mockPeer.FSM.SetState(resource.PeerStateRunning) peer.Task.StorePeer(peer) @@ -505,7 +506,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "peer stream is empty", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) mockPeer.FSM.SetState(resource.PeerStateRunning) peer.Task.StorePeer(peer) @@ -521,7 +522,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "peer stream send failed", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) mockPeer.FSM.SetState(resource.PeerStateRunning) peer.Task.BackToSourcePeers.Add(mockPeer.ID) @@ -545,7 +546,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { }, { name: "schedule parent", - mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream rpcscheduler.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { + mock: func(peer *resource.Peer, mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, blocklist set.SafeSet[string], stream schedulerv1.Scheduler_ReportPieceResultServer, dynconfig config.DynconfigInterface, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) mockPeer.FSM.SetState(resource.PeerStateRunning) candidatePeer := resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost) @@ -579,10 +580,10 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctl := gomock.NewController(t) defer ctl.Finish() - stream := rpcschedulermocks.NewMockScheduler_ReportPieceResultServer(ctl) + stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl) dynconfig := configmocks.NewMockDynconfigInterface(ctl) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := resource.NewPeer(mockPeerID, mockTask, mockHost) mockPeer := resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost) blocklist := set.NewSafeSet[string]() @@ -830,7 +831,7 @@ func TestScheduler_FindParent(t *testing.T) { defer ctl.Finish() dynconfig := configmocks.NewMockDynconfigInterface(ctl) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := resource.NewPeer(mockPeerID, mockTask, mockHost) var mockPeers []*resource.Peer @@ -852,7 +853,7 @@ func TestScheduler_constructSuccessPeerPacket(t *testing.T) { tests := []struct { name string mock func(md *configmocks.MockDynconfigInterfaceMockRecorder) - expect func(t *testing.T, packet *rpcscheduler.PeerPacket, parent *resource.Peer, candidateParents []*resource.Peer) + expect func(t *testing.T, packet *schedulerv1.PeerPacket, parent *resource.Peer, candidateParents []*resource.Peer) }{ { name: "get parallelCount from dynconfig", @@ -861,25 +862,25 @@ func TestScheduler_constructSuccessPeerPacket(t *testing.T) { ParallelCount: 1, }, true).Times(1) }, - expect: func(t *testing.T, packet *rpcscheduler.PeerPacket, parent *resource.Peer, candidateParents []*resource.Peer) { + expect: func(t *testing.T, packet *schedulerv1.PeerPacket, parent *resource.Peer, candidateParents []*resource.Peer) { assert := assert.New(t) - assert.EqualValues(packet, &rpcscheduler.PeerPacket{ + assert.EqualValues(packet, &schedulerv1.PeerPacket{ TaskId: mockTaskID, SrcPid: mockPeerID, ParallelCount: 1, - MainPeer: &rpcscheduler.PeerPacket_DestPeer{ + MainPeer: &schedulerv1.PeerPacket_DestPeer{ Ip: parent.Host.IP, RpcPort: parent.Host.Port, PeerId: parent.ID, }, - CandidatePeers: []*rpcscheduler.PeerPacket_DestPeer{ + CandidatePeers: []*schedulerv1.PeerPacket_DestPeer{ { Ip: candidateParents[0].Host.IP, RpcPort: candidateParents[0].Host.Port, PeerId: candidateParents[0].ID, }, }, - Code: base.Code_Success, + Code: commonv1.Code_Success, }) }, }, @@ -888,25 +889,25 @@ func TestScheduler_constructSuccessPeerPacket(t *testing.T) { mock: func(md *configmocks.MockDynconfigInterfaceMockRecorder) { md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{}, false).Times(1) }, - expect: func(t *testing.T, packet *rpcscheduler.PeerPacket, parent *resource.Peer, candidateParents []*resource.Peer) { + expect: func(t *testing.T, packet *schedulerv1.PeerPacket, parent *resource.Peer, candidateParents []*resource.Peer) { assert := assert.New(t) - assert.EqualValues(packet, &rpcscheduler.PeerPacket{ + assert.EqualValues(packet, &schedulerv1.PeerPacket{ TaskId: mockTaskID, SrcPid: mockPeerID, ParallelCount: 4, - MainPeer: &rpcscheduler.PeerPacket_DestPeer{ + MainPeer: &schedulerv1.PeerPacket_DestPeer{ Ip: parent.Host.IP, RpcPort: parent.Host.Port, PeerId: parent.ID, }, - CandidatePeers: []*rpcscheduler.PeerPacket_DestPeer{ + CandidatePeers: []*schedulerv1.PeerPacket_DestPeer{ { Ip: candidateParents[0].Host.IP, RpcPort: candidateParents[0].Host.Port, PeerId: candidateParents[0].ID, }, }, - Code: base.Code_Success, + Code: commonv1.Code_Success, }) }, }, @@ -918,7 +919,7 @@ func TestScheduler_constructSuccessPeerPacket(t *testing.T) { defer ctl.Finish() dynconfig := configmocks.NewMockDynconfigInterface(ctl) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := resource.NewPeer(mockPeerID, mockTask, mockHost) parent := resource.NewPeer(idgen.PeerID("127.0.0.1"), mockTask, mockHost) diff --git a/scheduler/service/service.go b/scheduler/service/service.go index 2a3041f38..b421930e8 100644 --- a/scheduler/service/service.go +++ b/scheduler/service/service.go @@ -25,13 +25,14 @@ import ( "go.opentelemetry.io/otel/trace" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + errordetailsv1 "d7y.io/api/pkg/apis/errordetails/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + "d7y.io/dragonfly/v2/internal/dferrors" logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/container/set" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/base/common" - "d7y.io/dragonfly/v2/pkg/rpc/errordetails" - rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" + "d7y.io/dragonfly/v2/pkg/rpc/common" pkgtime "d7y.io/dragonfly/v2/pkg/time" "d7y.io/dragonfly/v2/scheduler/config" "d7y.io/dragonfly/v2/scheduler/metrics" @@ -76,13 +77,13 @@ func New( } // RegisterPeerTask registers peer and triggers seed peer download task. -func (s *Service) RegisterPeerTask(ctx context.Context, req *rpcscheduler.PeerTaskRequest) (*rpcscheduler.RegisterResult, error) { +func (s *Service) RegisterPeerTask(ctx context.Context, req *schedulerv1.PeerTaskRequest) (*schedulerv1.RegisterResult, error) { // Register task and trigger seed peer download task. task, needBackToSource, err := s.registerTask(ctx, req) if err != nil { msg := fmt.Sprintf("peer %s register is failed: %s", req.PeerId, err.Error()) logger.Error(msg) - return nil, dferrors.New(base.Code_SchedTaskStatusError, msg) + return nil, dferrors.New(commonv1.Code_SchedTaskStatusError, msg) } host := s.registerHost(ctx, req.PeerHost) peer := s.registerPeer(ctx, req.PeerId, task, host, req.UrlMeta.Tag) @@ -97,30 +98,30 @@ func (s *Service) RegisterPeerTask(ctx context.Context, req *rpcscheduler.PeerTa if task.FSM.Is(resource.TaskStateSucceeded) && err == nil { peer.Log.Info("task can be reused") switch sizeScope { - case base.SizeScope_TINY: + case commonv1.SizeScope_TINY: peer.Log.Info("task size scope is tiny and return piece content directly") if len(task.DirectPiece) > 0 && int64(len(task.DirectPiece)) == task.ContentLength.Load() { if err := peer.FSM.Event(resource.PeerEventRegisterTiny); err != nil { msg := fmt.Sprintf("peer %s register is failed: %s", req.PeerId, err.Error()) peer.Log.Error(msg) - return nil, dferrors.New(base.Code_SchedError, msg) + return nil, dferrors.New(commonv1.Code_SchedError, msg) } - return &rpcscheduler.RegisterResult{ + return &schedulerv1.RegisterResult{ TaskId: task.ID, TaskType: task.Type, - SizeScope: base.SizeScope_TINY, - DirectPiece: &rpcscheduler.RegisterResult_PieceContent{ + SizeScope: commonv1.SizeScope_TINY, + DirectPiece: &schedulerv1.RegisterResult_PieceContent{ PieceContent: task.DirectPiece, }, }, nil } - // Fallback to base.SizeScope_SMALL. + // Fallback to commonv1.SizeScope_SMALL. peer.Log.Warnf("task size scope is tiny, length of direct piece is %d and content length is %d. fall through to size scope small", len(task.DirectPiece), task.ContentLength.Load()) fallthrough - case base.SizeScope_SMALL: + case commonv1.SizeScope_SMALL: peer.Log.Info("task size scope is small") // There is no need to build a tree, just find the parent and return. parent, ok := s.scheduler.FindParent(ctx, peer, set.NewSafeSet[string]()) @@ -129,13 +130,13 @@ func (s *Service) RegisterPeerTask(ctx context.Context, req *rpcscheduler.PeerTa if err := peer.FSM.Event(resource.PeerEventRegisterNormal); err != nil { msg := fmt.Sprintf("peer %s register is failed: %s", req.PeerId, err.Error()) peer.Log.Error(msg) - return nil, dferrors.New(base.Code_SchedError, msg) + return nil, dferrors.New(commonv1.Code_SchedError, msg) } - return &rpcscheduler.RegisterResult{ + return &schedulerv1.RegisterResult{ TaskId: task.ID, TaskType: task.Type, - SizeScope: base.SizeScope_NORMAL, + SizeScope: commonv1.SizeScope_NORMAL, }, nil } @@ -146,13 +147,13 @@ func (s *Service) RegisterPeerTask(ctx context.Context, req *rpcscheduler.PeerTa if err := peer.FSM.Event(resource.PeerEventRegisterNormal); err != nil { msg := fmt.Sprintf("peer %s register is failed: %s", req.PeerId, err.Error()) peer.Log.Error(msg) - return nil, dferrors.New(base.Code_SchedError, msg) + return nil, dferrors.New(commonv1.Code_SchedError, msg) } - return &rpcscheduler.RegisterResult{ + return &schedulerv1.RegisterResult{ TaskId: task.ID, TaskType: task.Type, - SizeScope: base.SizeScope_NORMAL, + SizeScope: commonv1.SizeScope_NORMAL, }, nil } @@ -162,13 +163,13 @@ func (s *Service) RegisterPeerTask(ctx context.Context, req *rpcscheduler.PeerTa if err := peer.FSM.Event(resource.PeerEventRegisterNormal); err != nil { msg := fmt.Sprintf("peer %s register is failed: %s", req.PeerId, err.Error()) peer.Log.Error(msg) - return nil, dferrors.New(base.Code_SchedError, msg) + return nil, dferrors.New(commonv1.Code_SchedError, msg) } - return &rpcscheduler.RegisterResult{ + return &schedulerv1.RegisterResult{ TaskId: task.ID, TaskType: task.Type, - SizeScope: base.SizeScope_NORMAL, + SizeScope: commonv1.SizeScope_NORMAL, }, nil } @@ -176,7 +177,7 @@ func (s *Service) RegisterPeerTask(ctx context.Context, req *rpcscheduler.PeerTa if err := task.DeletePeerInEdges(peer.ID); err != nil { msg := fmt.Sprintf("peer deletes inedges failed: %s", err.Error()) peer.Log.Error(msg) - return nil, dferrors.New(base.Code_SchedError, msg) + return nil, dferrors.New(commonv1.Code_SchedError, msg) } // Add edges between parent and peer. @@ -185,27 +186,27 @@ func (s *Service) RegisterPeerTask(ctx context.Context, req *rpcscheduler.PeerTa if err := peer.FSM.Event(resource.PeerEventRegisterNormal); err != nil { msg := fmt.Sprintf("peer %s register is failed: %s", req.PeerId, err.Error()) peer.Log.Error(msg) - return nil, dferrors.New(base.Code_SchedError, msg) + return nil, dferrors.New(commonv1.Code_SchedError, msg) } - return &rpcscheduler.RegisterResult{ + return &schedulerv1.RegisterResult{ TaskId: task.ID, TaskType: task.Type, - SizeScope: base.SizeScope_NORMAL, + SizeScope: commonv1.SizeScope_NORMAL, }, nil } if err := peer.FSM.Event(resource.PeerEventRegisterSmall); err != nil { msg := fmt.Sprintf("peer %s register is failed: %s", req.PeerId, err.Error()) peer.Log.Error(msg) - return nil, dferrors.New(base.Code_SchedError, msg) + return nil, dferrors.New(commonv1.Code_SchedError, msg) } peer.Log.Infof("schedule parent successful, replace parent to %s ", parent.ID) - singlePiece := &rpcscheduler.SinglePiece{ + singlePiece := &schedulerv1.SinglePiece{ DstPid: parent.ID, DstAddr: fmt.Sprintf("%s:%d", parent.Host.IP, parent.Host.DownloadPort), - PieceInfo: &base.PieceInfo{ + PieceInfo: &commonv1.PieceInfo{ PieceNum: firstPiece.PieceNum, RangeStart: firstPiece.RangeStart, RangeSize: firstPiece.RangeSize, @@ -216,11 +217,11 @@ func (s *Service) RegisterPeerTask(ctx context.Context, req *rpcscheduler.PeerTa } peer.Log.Infof("task size scope is small and return single piece: %#v %#v", singlePiece, singlePiece.PieceInfo) - return &rpcscheduler.RegisterResult{ + return &schedulerv1.RegisterResult{ TaskId: task.ID, TaskType: task.Type, - SizeScope: base.SizeScope_SMALL, - DirectPiece: &rpcscheduler.RegisterResult_SinglePiece{ + SizeScope: commonv1.SizeScope_SMALL, + DirectPiece: &schedulerv1.RegisterResult_SinglePiece{ SinglePiece: singlePiece, }, }, nil @@ -229,13 +230,13 @@ func (s *Service) RegisterPeerTask(ctx context.Context, req *rpcscheduler.PeerTa if err := peer.FSM.Event(resource.PeerEventRegisterNormal); err != nil { msg := fmt.Sprintf("peer %s register is failed: %s", req.PeerId, err.Error()) peer.Log.Error(msg) - return nil, dferrors.New(base.Code_SchedError, msg) + return nil, dferrors.New(commonv1.Code_SchedError, msg) } - return &rpcscheduler.RegisterResult{ + return &schedulerv1.RegisterResult{ TaskId: task.ID, TaskType: task.Type, - SizeScope: base.SizeScope_NORMAL, + SizeScope: commonv1.SizeScope_NORMAL, }, nil } } @@ -245,18 +246,18 @@ func (s *Service) RegisterPeerTask(ctx context.Context, req *rpcscheduler.PeerTa if err := peer.FSM.Event(resource.PeerEventRegisterNormal); err != nil { msg := fmt.Sprintf("peer %s register is failed: %s", req.PeerId, err.Error()) peer.Log.Error(msg) - return nil, dferrors.New(base.Code_SchedError, msg) + return nil, dferrors.New(commonv1.Code_SchedError, msg) } - return &rpcscheduler.RegisterResult{ + return &schedulerv1.RegisterResult{ TaskId: task.ID, TaskType: task.Type, - SizeScope: base.SizeScope_NORMAL, + SizeScope: commonv1.SizeScope_NORMAL, }, nil } // ReportPieceResult handles the piece information reported by dfdaemon. -func (s *Service) ReportPieceResult(stream rpcscheduler.Scheduler_ReportPieceResultServer) error { +func (s *Service) ReportPieceResult(stream schedulerv1.Scheduler_ReportPieceResultServer) error { ctx := stream.Context() var ( peer *resource.Peer @@ -289,7 +290,7 @@ func (s *Service) ReportPieceResult(stream rpcscheduler.Scheduler_ReportPieceRes if !ok { msg := fmt.Sprintf("peer %s not found", piece.SrcPid) logger.Error(msg) - return dferrors.New(base.Code_SchedPeerNotFound, msg) + return dferrors.New(commonv1.Code_SchedPeerNotFound, msg) } // Peer setting stream. @@ -338,8 +339,8 @@ func (s *Service) ReportPieceResult(stream rpcscheduler.Scheduler_ReportPieceRes } // Handle piece download code. - if piece.Code != base.Code_Success { - if piece.Code == base.Code_ClientWaitPieceReady { + if piece.Code != commonv1.Code_Success { + if piece.Code == commonv1.Code_ClientWaitPieceReady { peer.Log.Debugf("receive piece code %d and wait for dfdaemon piece ready", piece.Code) continue } @@ -355,12 +356,12 @@ func (s *Service) ReportPieceResult(stream rpcscheduler.Scheduler_ReportPieceRes } // ReportPeerResult handles peer result reported by dfdaemon. -func (s *Service) ReportPeerResult(ctx context.Context, req *rpcscheduler.PeerResult) error { +func (s *Service) ReportPeerResult(ctx context.Context, req *schedulerv1.PeerResult) error { peer, ok := s.resource.PeerManager().Load(req.PeerId) if !ok { msg := fmt.Sprintf("report peer result and peer %s is not exists", req.PeerId) logger.Error(msg) - return dferrors.New(base.Code_SchedPeerNotFound, msg) + return dferrors.New(commonv1.Code_SchedPeerNotFound, msg) } metrics.DownloadCount.WithLabelValues(peer.Tag).Inc() @@ -397,16 +398,16 @@ func (s *Service) ReportPeerResult(ctx context.Context, req *rpcscheduler.PeerRe } // StatTask checks the current state of the task. -func (s *Service) StatTask(ctx context.Context, req *rpcscheduler.StatTaskRequest) (*rpcscheduler.Task, error) { +func (s *Service) StatTask(ctx context.Context, req *schedulerv1.StatTaskRequest) (*schedulerv1.Task, error) { task, loaded := s.resource.TaskManager().Load(req.TaskId) if !loaded { msg := fmt.Sprintf("task %s not found", req.TaskId) logger.Info(msg) - return nil, dferrors.New(base.Code_PeerTaskNotFound, msg) + return nil, dferrors.New(commonv1.Code_PeerTaskNotFound, msg) } task.Log.Debug("task has been found") - return &rpcscheduler.Task{ + return &schedulerv1.Task{ Id: task.ID, Type: task.Type, ContentLength: task.ContentLength.Load(), @@ -418,7 +419,7 @@ func (s *Service) StatTask(ctx context.Context, req *rpcscheduler.StatTaskReques } // AnnounceTask informs scheduler a peer has completed task. -func (s *Service) AnnounceTask(ctx context.Context, req *rpcscheduler.AnnounceTaskRequest) error { +func (s *Service) AnnounceTask(ctx context.Context, req *schedulerv1.AnnounceTaskRequest) error { taskID := req.TaskId peerID := req.PiecePacket.DstPid @@ -435,7 +436,7 @@ func (s *Service) AnnounceTask(ctx context.Context, req *rpcscheduler.AnnounceTa if err := task.FSM.Event(resource.TaskEventDownload); err != nil { msg := fmt.Sprintf("task fsm event failed: %s", err.Error()) peer.Log.Error(msg) - return dferrors.New(base.Code_SchedError, msg) + return dferrors.New(commonv1.Code_SchedError, msg) } } @@ -446,7 +447,7 @@ func (s *Service) AnnounceTask(ctx context.Context, req *rpcscheduler.AnnounceTa task.StorePiece(pieceInfo) } - s.handleTaskSuccess(ctx, task, &rpcscheduler.PeerResult{ + s.handleTaskSuccess(ctx, task, &schedulerv1.PeerResult{ TotalPieceCount: req.PiecePacket.TotalPiece, ContentLength: req.PiecePacket.ContentLength, }) @@ -459,7 +460,7 @@ func (s *Service) AnnounceTask(ctx context.Context, req *rpcscheduler.AnnounceTa if err := peer.FSM.Event(resource.PeerEventRegisterNormal); err != nil { msg := fmt.Sprintf("peer fsm event failed: %s", err.Error()) peer.Log.Error(msg) - return dferrors.New(base.Code_SchedError, msg) + return dferrors.New(commonv1.Code_SchedError, msg) } } @@ -469,7 +470,7 @@ func (s *Service) AnnounceTask(ctx context.Context, req *rpcscheduler.AnnounceTa if err := peer.FSM.Event(resource.PeerEventDownload); err != nil { msg := fmt.Sprintf("peer fsm event failed: %s", err.Error()) peer.Log.Error(msg) - return dferrors.New(base.Code_SchedError, msg) + return dferrors.New(commonv1.Code_SchedError, msg) } s.handlePeerSuccess(ctx, peer) @@ -480,12 +481,12 @@ func (s *Service) AnnounceTask(ctx context.Context, req *rpcscheduler.AnnounceTa } // LeaveTask makes the peer unschedulable. -func (s *Service) LeaveTask(ctx context.Context, req *rpcscheduler.PeerTarget) error { +func (s *Service) LeaveTask(ctx context.Context, req *schedulerv1.PeerTarget) error { peer, ok := s.resource.PeerManager().Load(req.PeerId) if !ok { msg := fmt.Sprintf("leave task and peer %s is not exists", req.PeerId) logger.Error(msg) - return dferrors.New(base.Code_SchedPeerNotFound, msg) + return dferrors.New(commonv1.Code_SchedPeerNotFound, msg) } metrics.LeaveTaskCount.WithLabelValues(peer.Tag).Inc() @@ -495,7 +496,7 @@ func (s *Service) LeaveTask(ctx context.Context, req *rpcscheduler.PeerTarget) e msg := fmt.Sprintf("peer fsm event failed: %s", err.Error()) peer.Log.Error(msg) - return dferrors.New(base.Code_SchedTaskStatusError, msg) + return dferrors.New(commonv1.Code_SchedTaskStatusError, msg) } // Reschedule a new parent to children of peer to exclude the current leave peer. @@ -509,8 +510,8 @@ func (s *Service) LeaveTask(ctx context.Context, req *rpcscheduler.PeerTarget) e } // registerTask creates a new task or reuses a previous task. -func (s *Service) registerTask(ctx context.Context, req *rpcscheduler.PeerTaskRequest) (*resource.Task, bool, error) { - task := resource.NewTask(req.TaskId, req.Url, base.TaskType_Normal, req.UrlMeta, resource.WithBackToSourceLimit(int32(s.config.Scheduler.BackSourceCount))) +func (s *Service) registerTask(ctx context.Context, req *schedulerv1.PeerTaskRequest) (*resource.Task, bool, error) { + task := resource.NewTask(req.TaskId, req.Url, commonv1.TaskType_Normal, req.UrlMeta, resource.WithBackToSourceLimit(int32(s.config.Scheduler.BackSourceCount))) task, loaded := s.resource.TaskManager().LoadOrStore(task) if loaded && !task.FSM.Is(resource.TaskStateFailed) { task.Log.Infof("task state is %s", task.FSM.Current()) @@ -543,7 +544,7 @@ func (s *Service) registerTask(ctx context.Context, req *rpcscheduler.PeerTaskRe } // registerHost creates a new host or reuses a previous host. -func (s *Service) registerHost(ctx context.Context, rawHost *rpcscheduler.PeerHost) *resource.Host { +func (s *Service) registerHost(ctx context.Context, rawHost *schedulerv1.PeerHost) *resource.Host { host, ok := s.resource.HostManager().Load(rawHost.Id) if !ok { // Get scheduler cluster client config by manager. @@ -636,7 +637,7 @@ func (s *Service) handleBeginOfPiece(ctx context.Context, peer *resource.Peer) { func (s *Service) handleEndOfPiece(ctx context.Context, peer *resource.Peer) {} // handlePieceSuccess handles successful piece. -func (s *Service) handlePieceSuccess(ctx context.Context, peer *resource.Peer, piece *rpcscheduler.PieceResult) { +func (s *Service) handlePieceSuccess(ctx context.Context, peer *resource.Peer, piece *schedulerv1.PieceResult) { // Update peer piece info peer.Pieces.Set(uint(piece.PieceInfo.PieceNum)) peer.AppendPieceCost(pkgtime.SubNano(int64(piece.EndTime), int64(piece.BeginTime)).Milliseconds()) @@ -649,7 +650,7 @@ func (s *Service) handlePieceSuccess(ctx context.Context, peer *resource.Peer, p } // handlePieceFail handles failed piece. -func (s *Service) handlePieceFail(ctx context.Context, peer *resource.Peer, piece *rpcscheduler.PieceResult) { +func (s *Service) handlePieceFail(ctx context.Context, peer *resource.Peer, piece *schedulerv1.PieceResult) { // Failed to download piece back-to-source. if peer.FSM.Is(resource.PeerStateBackToSource) { return @@ -667,12 +668,12 @@ func (s *Service) handlePieceFail(ctx context.Context, peer *resource.Peer, piec // It’s not a case of back-to-source downloading failed, // to help peer to reschedule the parent node. switch piece.Code { - case base.Code_PeerTaskNotFound: + case commonv1.Code_PeerTaskNotFound: if err := parent.FSM.Event(resource.PeerEventDownloadFailed); err != nil { peer.Log.Errorf("peer fsm event failed: %s", err.Error()) break } - case base.Code_ClientPieceNotFound: + case commonv1.Code_ClientPieceNotFound: // Dfdaemon downloading piece data from parent returns http error code 404. // If the parent is not a seed peer, reschedule parent for peer. // If the parent is a seed peer, scheduler need to trigger seed peer to download again. @@ -717,7 +718,7 @@ func (s *Service) handlePeerSuccess(ctx context.Context, peer *resource.Peer) { // If the peer type is tiny and back-to-source, // it need to directly download the tiny file and store the data in task DirectPiece. - if sizeScope == base.SizeScope_TINY && len(peer.Task.DirectPiece) == 0 { + if sizeScope == commonv1.SizeScope_TINY && len(peer.Task.DirectPiece) == 0 { data, err := peer.DownloadTinyFile() if err != nil { peer.Log.Errorf("download tiny task failed: %s", err.Error()) @@ -769,7 +770,7 @@ func (s *Service) handleLegacySeedPeer(ctx context.Context, peer *resource.Peer) // 1. Seed peer downloads the resource successfully. // 2. Dfdaemon back-to-source to download successfully. // 3. Peer announces it has the task. -func (s *Service) handleTaskSuccess(ctx context.Context, task *resource.Task, result *rpcscheduler.PeerResult) { +func (s *Service) handleTaskSuccess(ctx context.Context, task *resource.Task, result *schedulerv1.PeerResult) { if task.FSM.Is(resource.TaskStateSucceeded) { return } @@ -787,15 +788,15 @@ func (s *Service) handleTaskSuccess(ctx context.Context, task *resource.Task, re // Conditions for the task to switch to the TaskStateSucceeded are: // 1. Seed peer downloads the resource falied. // 2. Dfdaemon back-to-source to download failed. -func (s *Service) handleTaskFail(ctx context.Context, task *resource.Task, backToSourceErr *errordetails.SourceError, seedPeerErr error) { +func (s *Service) handleTaskFail(ctx context.Context, task *resource.Task, backToSourceErr *errordetailsv1.SourceError, seedPeerErr error) { // If peer back-to-source fails due to an unrecoverable error, // notify other peers of the failure, // and return the source metadata to peer. if backToSourceErr != nil { if !backToSourceErr.Temporary { - task.NotifyPeers(&rpcscheduler.PeerPacket{ - Code: base.Code_BackToSourceAborted, - ErrorDetail: &rpcscheduler.PeerPacket_SourceError{ + task.NotifyPeers(&schedulerv1.PeerPacket{ + Code: commonv1.Code_BackToSourceAborted, + Errordetails: &schedulerv1.PeerPacket_SourceError{ SourceError: backToSourceErr, }, }, resource.PeerEventDownloadFailed) @@ -808,11 +809,11 @@ func (s *Service) handleTaskFail(ctx context.Context, task *resource.Task, backT if st, ok := status.FromError(seedPeerErr); ok { for _, detail := range st.Details() { switch d := detail.(type) { - case *errordetails.SourceError: + case *errordetailsv1.SourceError: if !d.Temporary { - task.NotifyPeers(&rpcscheduler.PeerPacket{ - Code: base.Code_BackToSourceAborted, - ErrorDetail: &rpcscheduler.PeerPacket_SourceError{ + task.NotifyPeers(&schedulerv1.PeerPacket{ + Code: commonv1.Code_BackToSourceAborted, + Errordetails: &schedulerv1.PeerPacket_SourceError{ SourceError: d, }, }, resource.PeerEventDownloadFailed) @@ -824,8 +825,8 @@ func (s *Service) handleTaskFail(ctx context.Context, task *resource.Task, backT } else if task.PeerFailedCount.Load() > resource.FailedPeerCountLimit { // If the number of failed peers in the task is greater than FailedPeerCountLimit, // then scheduler notifies running peers of failure. - task.NotifyPeers(&rpcscheduler.PeerPacket{ - Code: base.Code_SchedTaskStatusError, + task.NotifyPeers(&schedulerv1.PeerPacket{ + Code: commonv1.Code_SchedTaskStatusError, }, resource.PeerEventDownloadFailed) task.PeerFailedCount.Store(0) } @@ -841,7 +842,7 @@ func (s *Service) handleTaskFail(ctx context.Context, task *resource.Task, backT } // createRecord stores peer download records. -func (s *Service) createRecord(peer *resource.Peer, peerState int, req *rpcscheduler.PeerResult) { +func (s *Service) createRecord(peer *resource.Peer, peerState int, req *schedulerv1.PeerResult) { record := storage.Record{ ID: peer.ID, IP: peer.Host.IP, diff --git a/scheduler/service/service_test.go b/scheduler/service/service_test.go index 060037375..0bbced9a0 100644 --- a/scheduler/service/service_test.go +++ b/scheduler/service/service_test.go @@ -35,15 +35,16 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + commonv1 "d7y.io/api/pkg/apis/common/v1" + errordetailsv1 "d7y.io/api/pkg/apis/errordetails/v1" + schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1" + schedulerv1mocks "d7y.io/api/pkg/apis/scheduler/v1/mocks" + "d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/manager/types" "d7y.io/dragonfly/v2/pkg/container/set" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" - "d7y.io/dragonfly/v2/pkg/rpc/base/common" - "d7y.io/dragonfly/v2/pkg/rpc/errordetails" - rpcscheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler" - rpcschedulermocks "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks" + "d7y.io/dragonfly/v2/pkg/rpc/common" "d7y.io/dragonfly/v2/scheduler/config" configmocks "d7y.io/dragonfly/v2/scheduler/config/mocks" "d7y.io/dragonfly/v2/scheduler/resource" @@ -60,7 +61,7 @@ var ( BackSourceCount: int(mockTaskBackToSourceLimit), } - mockRawHost = &rpcscheduler.PeerHost{ + mockRawHost = &schedulerv1.PeerHost{ Id: idgen.HostID("hostname", 8003), Ip: "127.0.0.1", RpcPort: 8003, @@ -72,7 +73,7 @@ var ( NetTopology: "net_topology", } - mockRawSeedHost = &rpcscheduler.PeerHost{ + mockRawSeedHost = &schedulerv1.PeerHost{ Id: idgen.HostID("hostname_seed", 8003), Ip: "127.0.0.1", RpcPort: 8003, @@ -84,7 +85,7 @@ var ( NetTopology: "net_topology", } - mockTaskURLMeta = &base.UrlMeta{ + mockTaskURLMeta = &commonv1.UrlMeta{ Digest: "digest", Tag: "tag", Range: "range", @@ -132,21 +133,21 @@ func TestService_New(t *testing.T) { func TestService_RegisterPeerTask(t *testing.T) { tests := []struct { name string - req *rpcscheduler.PeerTaskRequest + req *schedulerv1.PeerTaskRequest mock func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) - expect func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) + expect func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) }{ { name: "task register failed", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -156,24 +157,24 @@ func TestService_RegisterPeerTask(t *testing.T) { mt.LoadOrStore(gomock.Any()).Return(mockPeer.Task, false).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) dferr, ok := err.(*dferrors.DfError) assert.True(ok) - assert.Equal(dferr.Code, base.Code_SchedTaskStatusError) + assert.Equal(dferr.Code, commonv1.Code_SchedTaskStatusError) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "task state is TaskStateFailed", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -188,24 +189,24 @@ func TestService_RegisterPeerTask(t *testing.T) { mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) assert.NoError(err) assert.Equal(result.TaskId, peer.Task.ID) - assert.Equal(result.SizeScope, base.SizeScope_NORMAL) + assert.Equal(result.SizeScope, commonv1.SizeScope_NORMAL) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "task state is TaskStateFailed and peer state is PeerStateFailed", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -221,24 +222,24 @@ func TestService_RegisterPeerTask(t *testing.T) { mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) dferr, ok := err.(*dferrors.DfError) assert.True(ok) - assert.Equal(dferr.Code, base.Code_SchedError) + assert.Equal(dferr.Code, commonv1.Code_SchedError) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "get task scope size failed", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -254,25 +255,25 @@ func TestService_RegisterPeerTask(t *testing.T) { mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) assert.NoError(err) assert.Equal(result.TaskId, peer.Task.ID) - assert.Equal(result.SizeScope, base.SizeScope_NORMAL) + assert.Equal(result.SizeScope, commonv1.SizeScope_NORMAL) assert.True(peer.FSM.Is(resource.PeerStateReceivedNormal)) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "task scope size is SizeScope_TINY", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -290,12 +291,12 @@ func TestService_RegisterPeerTask(t *testing.T) { mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) assert.NoError(err) assert.Equal(result.TaskId, peer.Task.ID) - assert.Equal(result.SizeScope, base.SizeScope_TINY) - assert.Equal(result.DirectPiece, &rpcscheduler.RegisterResult_PieceContent{ + assert.Equal(result.SizeScope, commonv1.SizeScope_TINY) + assert.Equal(result.DirectPiece, &schedulerv1.RegisterResult_PieceContent{ PieceContent: peer.Task.DirectPiece, }) assert.Equal(peer.NeedBackToSource.Load(), false) @@ -303,14 +304,14 @@ func TestService_RegisterPeerTask(t *testing.T) { }, { name: "task scope size is SizeScope_TINY and direct piece content is empty", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -329,24 +330,24 @@ func TestService_RegisterPeerTask(t *testing.T) { mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) dferr, ok := err.(*dferrors.DfError) assert.True(ok) - assert.Equal(dferr.Code, base.Code_SchedError) + assert.Equal(dferr.Code, commonv1.Code_SchedError) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "task scope size is SizeScope_TINY and direct piece content is error, peer state is PeerStateFailed", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -365,24 +366,24 @@ func TestService_RegisterPeerTask(t *testing.T) { ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, false).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) dferr, ok := err.(*dferrors.DfError) assert.True(ok) - assert.Equal(dferr.Code, base.Code_SchedError) + assert.Equal(dferr.Code, commonv1.Code_SchedError) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "task scope size is SizeScope_TINY and direct piece content is error", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -400,25 +401,25 @@ func TestService_RegisterPeerTask(t *testing.T) { ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, false).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) assert.NoError(err) assert.Equal(result.TaskId, peer.Task.ID) - assert.Equal(result.SizeScope, base.SizeScope_NORMAL) + assert.Equal(result.SizeScope, commonv1.SizeScope_NORMAL) assert.True(peer.FSM.Is(resource.PeerStateReceivedNormal)) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "task scope size is SizeScope_SMALL and load piece error, parent state is PeerStateRunning", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -426,7 +427,7 @@ func TestService_RegisterPeerTask(t *testing.T) { mockPeer.Task.StorePeer(mockPeer) mockPeer.Task.StorePeer(mockSeedPeer) mockPeer.Task.ContentLength.Store(129) - mockPeer.Task.StorePiece(&base.PieceInfo{ + mockPeer.Task.StorePiece(&commonv1.PieceInfo{ PieceNum: 0, }) mockPeer.Task.TotalPieceCount.Store(1) @@ -443,25 +444,25 @@ func TestService_RegisterPeerTask(t *testing.T) { ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockSeedPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) assert.NoError(err) assert.Equal(result.TaskId, peer.Task.ID) - assert.Equal(result.SizeScope, base.SizeScope_NORMAL) + assert.Equal(result.SizeScope, commonv1.SizeScope_NORMAL) assert.True(peer.FSM.Is(resource.PeerStateReceivedNormal)) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "task scope size is SizeScope_SMALL and load piece error, peer state is PeerStateFailed", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -469,7 +470,7 @@ func TestService_RegisterPeerTask(t *testing.T) { mockPeer.Task.StorePeer(mockPeer) mockPeer.Task.StorePeer(mockSeedPeer) mockPeer.Task.ContentLength.Store(129) - mockPeer.Task.StorePiece(&base.PieceInfo{ + mockPeer.Task.StorePiece(&commonv1.PieceInfo{ PieceNum: 0, }) mockPeer.Task.TotalPieceCount.Store(1) @@ -486,24 +487,24 @@ func TestService_RegisterPeerTask(t *testing.T) { ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockSeedPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) dferr, ok := err.(*dferrors.DfError) assert.True(ok) - assert.Equal(dferr.Code, base.Code_SchedError) + assert.Equal(dferr.Code, commonv1.Code_SchedError) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "task scope size is SizeScope_SMALL and peer state is PeerStateFailed", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -511,7 +512,7 @@ func TestService_RegisterPeerTask(t *testing.T) { mockPeer.Task.StorePeer(mockPeer) mockPeer.Task.StorePeer(mockSeedPeer) mockPeer.Task.ContentLength.Store(129) - mockPeer.Task.StorePiece(&base.PieceInfo{ + mockPeer.Task.StorePiece(&commonv1.PieceInfo{ PieceNum: 0, }) mockPeer.Task.TotalPieceCount.Store(1) @@ -528,31 +529,31 @@ func TestService_RegisterPeerTask(t *testing.T) { ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockSeedPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) dferr, ok := err.(*dferrors.DfError) assert.True(ok) - assert.Equal(dferr.Code, base.Code_SchedError) + assert.Equal(dferr.Code, commonv1.Code_SchedError) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "task scope size is SizeScope_SMALL and vetex not found", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { mockPeer.Task.FSM.SetState(resource.TaskStateSucceeded) mockPeer.Task.StorePeer(mockSeedPeer) mockPeer.Task.ContentLength.Store(129) - mockPeer.Task.StorePiece(&base.PieceInfo{ + mockPeer.Task.StorePiece(&commonv1.PieceInfo{ PieceNum: 0, }) mockPeer.Task.TotalPieceCount.Store(1) @@ -568,30 +569,30 @@ func TestService_RegisterPeerTask(t *testing.T) { ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockSeedPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) dferr, ok := err.(*dferrors.DfError) assert.True(ok) - assert.Equal(dferr.Code, base.Code_SchedError) + assert.Equal(dferr.Code, commonv1.Code_SchedError) }, }, { name: "task scope size is SizeScope_SMALL and add edge failed", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { mockPeer.Task.FSM.SetState(resource.TaskStateSucceeded) mockPeer.Task.StorePeer(mockPeer) mockPeer.Task.ContentLength.Store(129) - mockPeer.Task.StorePiece(&base.PieceInfo{ + mockPeer.Task.StorePiece(&commonv1.PieceInfo{ PieceNum: 0, }) mockPeer.Task.TotalPieceCount.Store(1) @@ -607,7 +608,7 @@ func TestService_RegisterPeerTask(t *testing.T) { ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockSeedPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) assert.NoError(err) assert.Equal(result.TaskId, peer.Task.ID) @@ -617,14 +618,14 @@ func TestService_RegisterPeerTask(t *testing.T) { }, { name: "task scope size is SizeScope_SMALL", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -632,7 +633,7 @@ func TestService_RegisterPeerTask(t *testing.T) { mockPeer.Task.StorePeer(mockPeer) mockPeer.Task.StorePeer(mockSeedPeer) mockPeer.Task.ContentLength.Store(129) - mockPeer.Task.StorePiece(&base.PieceInfo{ + mockPeer.Task.StorePiece(&commonv1.PieceInfo{ PieceNum: 0, }) mockPeer.Task.TotalPieceCount.Store(1) @@ -648,25 +649,25 @@ func TestService_RegisterPeerTask(t *testing.T) { ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockSeedPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) assert.NoError(err) assert.Equal(result.TaskId, peer.Task.ID) - assert.Equal(result.SizeScope, base.SizeScope_SMALL) + assert.Equal(result.SizeScope, commonv1.SizeScope_SMALL) assert.True(peer.FSM.Is(resource.PeerStateReceivedSmall)) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "task scope size is SizeScope_NORMAL and peer state is PeerStateFailed", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -684,24 +685,24 @@ func TestService_RegisterPeerTask(t *testing.T) { mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) dferr, ok := err.(*dferrors.DfError) assert.True(ok) - assert.Equal(dferr.Code, base.Code_SchedError) + assert.Equal(dferr.Code, commonv1.Code_SchedError) assert.Equal(peer.NeedBackToSource.Load(), false) }, }, { name: "task scope size is SizeScope_NORMAL", - req: &rpcscheduler.PeerTaskRequest{ - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + req: &schedulerv1.PeerTaskRequest{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, }, mock: func( - req *rpcscheduler.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, + req *schedulerv1.PeerTaskRequest, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, scheduler scheduler.Scheduler, res resource.Resource, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder, ) { @@ -718,11 +719,11 @@ func TestService_RegisterPeerTask(t *testing.T) { mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1), ) }, - expect: func(t *testing.T, peer *resource.Peer, result *rpcscheduler.RegisterResult, err error) { + expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) { assert := assert.New(t) assert.NoError(err) assert.Equal(result.TaskId, peer.Task.ID) - assert.Equal(result.SizeScope, base.SizeScope_NORMAL) + assert.Equal(result.SizeScope, commonv1.SizeScope_NORMAL) assert.True(peer.FSM.Is(resource.PeerStateReceivedNormal)) assert.Equal(peer.NeedBackToSource.Load(), false) }, @@ -743,7 +744,7 @@ func TestService_RegisterPeerTask(t *testing.T) { svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig, storage) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) mockSeedHost := resource.NewHost(mockRawSeedHost) mockSeedPeer := resource.NewPeer(mockSeedPeerID, mockTask, mockSeedHost) @@ -765,7 +766,7 @@ func TestService_ReportPieceResult(t *testing.T) { mock func( mockPeer *resource.Peer, res resource.Resource, peerManager resource.PeerManager, - mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, + mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *schedulerv1mocks.MockScheduler_ReportPieceResultServerMockRecorder, ) expect func(t *testing.T, peer *resource.Peer, err error) }{ @@ -774,7 +775,7 @@ func TestService_ReportPieceResult(t *testing.T) { mock: func( mockPeer *resource.Peer, res resource.Resource, peerManager resource.PeerManager, - mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, + mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *schedulerv1mocks.MockScheduler_ReportPieceResultServerMockRecorder, ) { ctx, cancel := context.WithCancel(context.Background()) @@ -793,7 +794,7 @@ func TestService_ReportPieceResult(t *testing.T) { mock: func( mockPeer *resource.Peer, res resource.Resource, peerManager resource.PeerManager, - mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, + mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *schedulerv1mocks.MockScheduler_ReportPieceResultServerMockRecorder, ) { gomock.InOrder( @@ -811,7 +812,7 @@ func TestService_ReportPieceResult(t *testing.T) { mock: func( mockPeer *resource.Peer, res resource.Resource, peerManager resource.PeerManager, - mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, + mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *schedulerv1mocks.MockScheduler_ReportPieceResultServerMockRecorder, ) { gomock.InOrder( @@ -829,12 +830,12 @@ func TestService_ReportPieceResult(t *testing.T) { mock: func( mockPeer *resource.Peer, res resource.Resource, peerManager resource.PeerManager, - mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, + mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *schedulerv1mocks.MockScheduler_ReportPieceResultServerMockRecorder, ) { gomock.InOrder( ms.Context().Return(context.Background()).Times(1), - ms.Recv().Return(&rpcscheduler.PieceResult{ + ms.Recv().Return(&schedulerv1.PieceResult{ SrcPid: mockPeerID, }, nil).Times(1), mr.PeerManager().Return(peerManager).Times(1), @@ -845,7 +846,7 @@ func TestService_ReportPieceResult(t *testing.T) { assert := assert.New(t) dferr, ok := err.(*dferrors.DfError) assert.True(ok) - assert.Equal(dferr.Code, base.Code_SchedPeerNotFound) + assert.Equal(dferr.Code, commonv1.Code_SchedPeerNotFound) }, }, { @@ -853,15 +854,15 @@ func TestService_ReportPieceResult(t *testing.T) { mock: func( mockPeer *resource.Peer, res resource.Resource, peerManager resource.PeerManager, - mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, + mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *schedulerv1mocks.MockScheduler_ReportPieceResultServerMockRecorder, ) { mockPeer.FSM.SetState(resource.PeerStateBackToSource) gomock.InOrder( ms.Context().Return(context.Background()).Times(1), - ms.Recv().Return(&rpcscheduler.PieceResult{ + ms.Recv().Return(&schedulerv1.PieceResult{ SrcPid: mockPeerID, - PieceInfo: &base.PieceInfo{ + PieceInfo: &commonv1.PieceInfo{ PieceNum: common.BeginOfPiece, }, }, nil).Times(1), @@ -882,14 +883,14 @@ func TestService_ReportPieceResult(t *testing.T) { mock: func( mockPeer *resource.Peer, res resource.Resource, peerManager resource.PeerManager, - mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, + mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *schedulerv1mocks.MockScheduler_ReportPieceResultServerMockRecorder, ) { gomock.InOrder( ms.Context().Return(context.Background()).Times(1), - ms.Recv().Return(&rpcscheduler.PieceResult{ + ms.Recv().Return(&schedulerv1.PieceResult{ SrcPid: mockPeerID, - PieceInfo: &base.PieceInfo{ + PieceInfo: &commonv1.PieceInfo{ PieceNum: common.EndOfPiece, }, }, nil).Times(1), @@ -910,15 +911,15 @@ func TestService_ReportPieceResult(t *testing.T) { mock: func( mockPeer *resource.Peer, res resource.Resource, peerManager resource.PeerManager, - mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, + mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *schedulerv1mocks.MockScheduler_ReportPieceResultServerMockRecorder, ) { gomock.InOrder( ms.Context().Return(context.Background()).Times(1), - ms.Recv().Return(&rpcscheduler.PieceResult{ + ms.Recv().Return(&schedulerv1.PieceResult{ SrcPid: mockPeerID, Success: true, - PieceInfo: &base.PieceInfo{ + PieceInfo: &commonv1.PieceInfo{ PieceNum: 1, }, }, nil).Times(1), @@ -939,14 +940,14 @@ func TestService_ReportPieceResult(t *testing.T) { mock: func( mockPeer *resource.Peer, res resource.Resource, peerManager resource.PeerManager, - mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, + mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *schedulerv1mocks.MockScheduler_ReportPieceResultServerMockRecorder, ) { gomock.InOrder( ms.Context().Return(context.Background()).Times(1), - ms.Recv().Return(&rpcscheduler.PieceResult{ + ms.Recv().Return(&schedulerv1.PieceResult{ SrcPid: mockPeerID, - Code: base.Code_ClientWaitPieceReady, + Code: commonv1.Code_ClientWaitPieceReady, }, nil).Times(1), mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(mockPeerID)).Return(mockPeer, true).Times(1), @@ -965,15 +966,15 @@ func TestService_ReportPieceResult(t *testing.T) { mock: func( mockPeer *resource.Peer, res resource.Resource, peerManager resource.PeerManager, - mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *rpcschedulermocks.MockScheduler_ReportPieceResultServerMockRecorder, + mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, ms *schedulerv1mocks.MockScheduler_ReportPieceResultServerMockRecorder, ) { mockPeer.FSM.SetState(resource.PeerStateBackToSource) gomock.InOrder( ms.Context().Return(context.Background()).Times(1), - ms.Recv().Return(&rpcscheduler.PieceResult{ + ms.Recv().Return(&schedulerv1.PieceResult{ SrcPid: mockPeerID, - Code: base.Code_PeerTaskNotFound, + Code: commonv1.Code_PeerTaskNotFound, }, nil).Times(1), mr.PeerManager().Return(peerManager).Times(1), mp.Load(gomock.Eq(mockPeerID)).Return(mockPeer, true).Times(1), @@ -998,11 +999,11 @@ func TestService_ReportPieceResult(t *testing.T) { dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) peerManager := resource.NewMockPeerManager(ctl) - stream := rpcschedulermocks.NewMockScheduler_ReportPieceResultServer(ctl) + stream := schedulerv1mocks.NewMockScheduler_ReportPieceResultServer(ctl) svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig, storage) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) tc.mock(mockPeer, res, peerManager, res.EXPECT(), peerManager.EXPECT(), stream.EXPECT()) tc.expect(t, mockPeer, svc.ReportPieceResult(stream)) @@ -1013,7 +1014,7 @@ func TestService_ReportPieceResult(t *testing.T) { func TestService_ReportPeerResult(t *testing.T) { tests := []struct { name string - req *rpcscheduler.PeerResult + req *schedulerv1.PeerResult mock func( mockPeer *resource.Peer, res resource.Resource, peerManager resource.PeerManager, @@ -1023,7 +1024,7 @@ func TestService_ReportPeerResult(t *testing.T) { }{ { name: "peer not found", - req: &rpcscheduler.PeerResult{ + req: &schedulerv1.PeerResult{ PeerId: mockPeerID, }, mock: func( @@ -1040,12 +1041,12 @@ func TestService_ReportPeerResult(t *testing.T) { assert := assert.New(t) dferr, ok := err.(*dferrors.DfError) assert.True(ok) - assert.Equal(dferr.Code, base.Code_SchedPeerNotFound) + assert.Equal(dferr.Code, commonv1.Code_SchedPeerNotFound) }, }, { name: "receive peer failed", - req: &rpcscheduler.PeerResult{ + req: &schedulerv1.PeerResult{ Success: false, PeerId: mockPeerID, }, @@ -1068,7 +1069,7 @@ func TestService_ReportPeerResult(t *testing.T) { }, { name: "receive peer failed and peer state is PeerStateBackToSource", - req: &rpcscheduler.PeerResult{ + req: &schedulerv1.PeerResult{ Success: false, PeerId: mockPeerID, }, @@ -1091,7 +1092,7 @@ func TestService_ReportPeerResult(t *testing.T) { }, { name: "receive peer success", - req: &rpcscheduler.PeerResult{ + req: &schedulerv1.PeerResult{ Success: true, PeerId: mockPeerID, }, @@ -1114,7 +1115,7 @@ func TestService_ReportPeerResult(t *testing.T) { }, { name: "receive peer success, and peer state is PeerStateBackToSource", - req: &rpcscheduler.PeerResult{ + req: &schedulerv1.PeerResult{ Success: true, PeerId: mockPeerID, }, @@ -1137,7 +1138,7 @@ func TestService_ReportPeerResult(t *testing.T) { }, { name: "receive peer success, and peer state is PeerStateBackToSource", - req: &rpcscheduler.PeerResult{ + req: &schedulerv1.PeerResult{ Success: true, PeerId: mockPeerID, }, @@ -1160,7 +1161,7 @@ func TestService_ReportPeerResult(t *testing.T) { }, { name: "receive peer success and create record failed", - req: &rpcscheduler.PeerResult{ + req: &schedulerv1.PeerResult{ Success: true, PeerId: mockPeerID, }, @@ -1195,7 +1196,7 @@ func TestService_ReportPeerResult(t *testing.T) { svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig, storage) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) tc.mock(mockPeer, res, peerManager, res.EXPECT(), peerManager.EXPECT(), storage.EXPECT()) tc.expect(t, mockPeer, svc.ReportPeerResult(context.Background(), tc.req)) @@ -1207,7 +1208,7 @@ func TestService_StatTask(t *testing.T) { tests := []struct { name string mock func(mockTask *resource.Task, taskManager resource.TaskManager, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder) - expect func(t *testing.T, task *rpcscheduler.Task, err error) + expect func(t *testing.T, task *schedulerv1.Task, err error) }{ { name: "task not found", @@ -1217,7 +1218,7 @@ func TestService_StatTask(t *testing.T) { mt.Load(gomock.Any()).Return(nil, false).Times(1), ) }, - expect: func(t *testing.T, task *rpcscheduler.Task, err error) { + expect: func(t *testing.T, task *schedulerv1.Task, err error) { assert := assert.New(t) assert.Error(err) }, @@ -1230,12 +1231,12 @@ func TestService_StatTask(t *testing.T) { mt.Load(gomock.Any()).Return(mockTask, true).Times(1), ) }, - expect: func(t *testing.T, task *rpcscheduler.Task, err error) { + expect: func(t *testing.T, task *schedulerv1.Task, err error) { assert := assert.New(t) assert.NoError(err) - assert.EqualValues(task, &rpcscheduler.Task{ + assert.EqualValues(task, &schedulerv1.Task{ Id: mockTaskID, - Type: base.TaskType_Normal, + Type: commonv1.TaskType_Normal, ContentLength: 0, TotalPieceCount: 0, State: resource.TaskStatePending, @@ -1256,10 +1257,10 @@ func TestService_StatTask(t *testing.T) { storage := storagemocks.NewMockStorage(ctl) taskManager := resource.NewMockTaskManager(ctl) svc := New(&config.Config{Scheduler: mockSchedulerConfig, Metrics: &config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) tc.mock(mockTask, taskManager, res.EXPECT(), taskManager.EXPECT()) - task, err := svc.StatTask(context.Background(), &rpcscheduler.StatTaskRequest{TaskId: mockTaskID}) + task, err := svc.StatTask(context.Background(), &schedulerv1.StatTaskRequest{TaskId: mockTaskID}) tc.expect(t, task, err) }) } @@ -1268,7 +1269,7 @@ func TestService_StatTask(t *testing.T) { func TestService_AnnounceTask(t *testing.T) { tests := []struct { name string - req *rpcscheduler.AnnounceTaskRequest + req *schedulerv1.AnnounceTaskRequest mock func(mockHost *resource.Host, mockTask *resource.Task, mockPeer *resource.Peer, hostManager resource.HostManager, taskManager resource.TaskManager, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, mt *resource.MockTaskManagerMockRecorder, mp *resource.MockPeerManagerMockRecorder) @@ -1276,15 +1277,15 @@ func TestService_AnnounceTask(t *testing.T) { }{ { name: "task state is TaskStateSucceeded and peer state is PeerStateSucceeded", - req: &rpcscheduler.AnnounceTaskRequest{ + req: &schedulerv1.AnnounceTaskRequest{ TaskId: mockTaskID, Url: mockURL, - UrlMeta: &base.UrlMeta{}, - PeerHost: &rpcscheduler.PeerHost{ + UrlMeta: &commonv1.UrlMeta{}, + PeerHost: &schedulerv1.PeerHost{ Id: mockRawHost.Id, }, - PiecePacket: &base.PiecePacket{ - PieceInfos: []*base.PieceInfo{{PieceNum: 1}}, + PiecePacket: &commonv1.PiecePacket{ + PieceInfos: []*commonv1.PieceInfo{{PieceNum: 1}}, TotalPiece: 1, }, }, @@ -1312,13 +1313,13 @@ func TestService_AnnounceTask(t *testing.T) { }, { name: "task state is TaskStatePending and peer state is PeerStateSucceeded", - req: &rpcscheduler.AnnounceTaskRequest{ + req: &schedulerv1.AnnounceTaskRequest{ TaskId: mockTaskID, Url: mockURL, - UrlMeta: &base.UrlMeta{}, + UrlMeta: &commonv1.UrlMeta{}, PeerHost: mockRawHost, - PiecePacket: &base.PiecePacket{ - PieceInfos: []*base.PieceInfo{{PieceNum: 1, DownloadCost: 1}}, + PiecePacket: &commonv1.PiecePacket{ + PieceInfos: []*commonv1.PieceInfo{{PieceNum: 1, DownloadCost: 1}}, TotalPiece: 1, ContentLength: 1000, }, @@ -1346,7 +1347,7 @@ func TestService_AnnounceTask(t *testing.T) { assert.Equal(mockTask.ContentLength.Load(), int64(1000)) piece, ok := mockTask.LoadPiece(1) assert.True(ok) - assert.EqualValues(piece, &base.PieceInfo{PieceNum: 1, DownloadCost: 1}) + assert.EqualValues(piece, &commonv1.PieceInfo{PieceNum: 1, DownloadCost: 1}) assert.Equal(mockPeer.Pieces.Count(), uint(1)) assert.Equal(mockPeer.PieceCosts()[0], int64(1*time.Millisecond)) @@ -1355,13 +1356,13 @@ func TestService_AnnounceTask(t *testing.T) { }, { name: "task state is TaskStateFailed and peer state is PeerStateSucceeded", - req: &rpcscheduler.AnnounceTaskRequest{ + req: &schedulerv1.AnnounceTaskRequest{ TaskId: mockTaskID, Url: mockURL, - UrlMeta: &base.UrlMeta{}, + UrlMeta: &commonv1.UrlMeta{}, PeerHost: mockRawHost, - PiecePacket: &base.PiecePacket{ - PieceInfos: []*base.PieceInfo{{PieceNum: 1, DownloadCost: 1}}, + PiecePacket: &commonv1.PiecePacket{ + PieceInfos: []*commonv1.PieceInfo{{PieceNum: 1, DownloadCost: 1}}, TotalPiece: 1, ContentLength: 1000, }, @@ -1390,7 +1391,7 @@ func TestService_AnnounceTask(t *testing.T) { piece, ok := mockTask.LoadPiece(1) assert.True(ok) - assert.EqualValues(piece, &base.PieceInfo{PieceNum: 1, DownloadCost: 1}) + assert.EqualValues(piece, &commonv1.PieceInfo{PieceNum: 1, DownloadCost: 1}) assert.Equal(mockPeer.Pieces.Count(), uint(1)) assert.Equal(mockPeer.PieceCosts()[0], int64(1*time.Millisecond)) assert.Equal(mockPeer.FSM.Current(), resource.PeerStateSucceeded) @@ -1398,13 +1399,13 @@ func TestService_AnnounceTask(t *testing.T) { }, { name: "task state is TaskStatePending and peer state is PeerStatePending", - req: &rpcscheduler.AnnounceTaskRequest{ + req: &schedulerv1.AnnounceTaskRequest{ TaskId: mockTaskID, Url: mockURL, - UrlMeta: &base.UrlMeta{}, + UrlMeta: &commonv1.UrlMeta{}, PeerHost: mockRawHost, - PiecePacket: &base.PiecePacket{ - PieceInfos: []*base.PieceInfo{{PieceNum: 1, DownloadCost: 1}}, + PiecePacket: &commonv1.PiecePacket{ + PieceInfos: []*commonv1.PieceInfo{{PieceNum: 1, DownloadCost: 1}}, TotalPiece: 1, ContentLength: 1000, }, @@ -1433,7 +1434,7 @@ func TestService_AnnounceTask(t *testing.T) { piece, ok := mockTask.LoadPiece(1) assert.True(ok) - assert.EqualValues(piece, &base.PieceInfo{PieceNum: 1, DownloadCost: 1}) + assert.EqualValues(piece, &commonv1.PieceInfo{PieceNum: 1, DownloadCost: 1}) assert.Equal(mockPeer.Pieces.Count(), uint(1)) assert.Equal(mockPeer.PieceCosts()[0], int64(1*time.Millisecond)) assert.Equal(mockPeer.FSM.Current(), resource.PeerStateSucceeded) @@ -1441,13 +1442,13 @@ func TestService_AnnounceTask(t *testing.T) { }, { name: "task state is TaskStatePending and peer state is PeerStateReceivedNormal", - req: &rpcscheduler.AnnounceTaskRequest{ + req: &schedulerv1.AnnounceTaskRequest{ TaskId: mockTaskID, Url: mockURL, - UrlMeta: &base.UrlMeta{}, + UrlMeta: &commonv1.UrlMeta{}, PeerHost: mockRawHost, - PiecePacket: &base.PiecePacket{ - PieceInfos: []*base.PieceInfo{{PieceNum: 1, DownloadCost: 1}}, + PiecePacket: &commonv1.PiecePacket{ + PieceInfos: []*commonv1.PieceInfo{{PieceNum: 1, DownloadCost: 1}}, TotalPiece: 1, ContentLength: 1000, }, @@ -1476,7 +1477,7 @@ func TestService_AnnounceTask(t *testing.T) { piece, ok := mockTask.LoadPiece(1) assert.True(ok) - assert.EqualValues(piece, &base.PieceInfo{PieceNum: 1, DownloadCost: 1}) + assert.EqualValues(piece, &commonv1.PieceInfo{PieceNum: 1, DownloadCost: 1}) assert.Equal(mockPeer.Pieces.Count(), uint(1)) assert.Equal(mockPeer.PieceCosts()[0], int64(1*time.Millisecond)) assert.Equal(mockPeer.FSM.Current(), resource.PeerStateSucceeded) @@ -1497,7 +1498,7 @@ func TestService_AnnounceTask(t *testing.T) { peerManager := resource.NewMockPeerManager(ctl) svc := New(&config.Config{Scheduler: mockSchedulerConfig, Metrics: &config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) tc.mock(mockHost, mockTask, mockPeer, hostManager, taskManager, peerManager, res.EXPECT(), hostManager.EXPECT(), taskManager.EXPECT(), peerManager.EXPECT()) @@ -1512,23 +1513,6 @@ func TestService_LeaveTask(t *testing.T) { mock func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) expect func(t *testing.T, peer *resource.Peer, err error) }{ - // { - // name: "peer not found", - // mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { - // peer.FSM.SetState(resource.PeerStatePending) - // gomock.InOrder( - // mr.PeerManager().Return(peerManager).Times(1), - // mp.Load(gomock.Any()).Return(nil, false).Times(1), - // ) - // }, - // expect: func(t *testing.T, peer *resource.Peer, err error) { - // assert := assert.New(t) - // dferr, ok := err.(*dferrors.DfError) - // assert.True(ok) - // assert.Equal(dferr.Code, base.Code_SchedPeerNotFound) - // assert.True(peer.FSM.Is(resource.PeerStatePending)) - // }, - // }, { name: "peer state is PeerStatePending", mock: func(peer *resource.Peer, child *resource.Peer, peerManager resource.PeerManager, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { @@ -1637,7 +1621,7 @@ func TestService_LeaveTask(t *testing.T) { assert := assert.New(t) dferr, ok := err.(*dferrors.DfError) assert.True(ok) - assert.Equal(dferr.Code, base.Code_SchedTaskStatusError) + assert.Equal(dferr.Code, commonv1.Code_SchedTaskStatusError) assert.True(peer.FSM.Is(resource.PeerStateLeave)) }, }, @@ -1750,13 +1734,13 @@ func TestService_LeaveTask(t *testing.T) { storage := storagemocks.NewMockStorage(ctl) peerManager := resource.NewMockPeerManager(ctl) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := resource.NewPeer(mockSeedPeerID, mockTask, mockHost) child := resource.NewPeer(mockPeerID, mockTask, mockHost) svc := New(&config.Config{Scheduler: mockSchedulerConfig, Metrics: &config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage) tc.mock(peer, child, peerManager, scheduler.EXPECT(), res.EXPECT(), peerManager.EXPECT()) - tc.expect(t, peer, svc.LeaveTask(context.Background(), &rpcscheduler.PeerTarget{})) + tc.expect(t, peer, svc.LeaveTask(context.Background(), &schedulerv1.PeerTarget{})) }) } } @@ -1765,8 +1749,8 @@ func TestService_registerTask(t *testing.T) { tests := []struct { name string config *config.Config - req *rpcscheduler.PeerTaskRequest - run func(t *testing.T, svc *Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) + req *schedulerv1.PeerTaskRequest + run func(t *testing.T, svc *Service, req *schedulerv1.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) }{ { name: "task already exists and state is TaskStatePending", @@ -1776,11 +1760,11 @@ func TestService_registerTask(t *testing.T) { Enable: true, }, }, - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, }, - run: func(t *testing.T, svc *Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, req *schedulerv1.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { mockTask.FSM.SetState(resource.TaskStateRunning) mockTask.StorePeer(mockPeer) mockPeer.FSM.SetState(resource.PeerStateRunning) @@ -1804,11 +1788,11 @@ func TestService_registerTask(t *testing.T) { Enable: true, }, }, - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, }, - run: func(t *testing.T, svc *Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, req *schedulerv1.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { mockTask.FSM.SetState(resource.TaskStateRunning) mockTask.StorePeer(mockPeer) mockPeer.FSM.SetState(resource.PeerStateRunning) @@ -1832,11 +1816,11 @@ func TestService_registerTask(t *testing.T) { Enable: true, }, }, - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, }, - run: func(t *testing.T, svc *Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, req *schedulerv1.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { mockTask.FSM.SetState(resource.TaskStateSucceeded) mockTask.StorePeer(mockPeer) mockPeer.FSM.SetState(resource.PeerStateRunning) @@ -1860,14 +1844,14 @@ func TestService_registerTask(t *testing.T) { Enable: true, }, }, - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, - PeerHost: &rpcscheduler.PeerHost{ + PeerHost: &schedulerv1.PeerHost{ Id: mockRawSeedHost.Id, }, }, - run: func(t *testing.T, svc *Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, req *schedulerv1.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { var wg sync.WaitGroup wg.Add(2) defer wg.Wait() @@ -1879,7 +1863,7 @@ func TestService_registerTask(t *testing.T) { mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Any()).Return(nil, false).Times(1), mr.SeedPeer().Do(func() { wg.Done() }).Return(seedPeer).Times(1), - mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &rpcscheduler.PeerResult{}, nil).Times(1), + mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &schedulerv1.PeerResult{}, nil).Times(1), ) task, needBackToSource, err := svc.registerTask(context.Background(), req) @@ -1897,14 +1881,14 @@ func TestService_registerTask(t *testing.T) { Enable: true, }, }, - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, - PeerHost: &rpcscheduler.PeerHost{ + PeerHost: &schedulerv1.PeerHost{ Id: mockRawSeedHost.Id, }, }, - run: func(t *testing.T, svc *Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, req *schedulerv1.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { var wg sync.WaitGroup wg.Add(2) defer wg.Wait() @@ -1916,7 +1900,7 @@ func TestService_registerTask(t *testing.T) { mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Any()).Return(nil, false), mr.SeedPeer().Do(func() { wg.Done() }).Return(seedPeer).Times(1), - mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &rpcscheduler.PeerResult{}, nil).Times(1), + mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &schedulerv1.PeerResult{}, nil).Times(1), ) task, needBackToSource, err := svc.registerTask(context.Background(), req) @@ -1934,14 +1918,14 @@ func TestService_registerTask(t *testing.T) { Enable: true, }, }, - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, - PeerHost: &rpcscheduler.PeerHost{ + PeerHost: &schedulerv1.PeerHost{ Id: mockRawSeedHost.Id, }, }, - run: func(t *testing.T, svc *Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, req *schedulerv1.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { mockHost := resource.NewHost(mockRawSeedHost, resource.WithHostType(resource.HostTypeSuperSeed)) mockTask.FSM.SetState(resource.TaskStateFailed) gomock.InOrder( @@ -1966,14 +1950,14 @@ func TestService_registerTask(t *testing.T) { Enable: true, }, }, - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, - PeerHost: &rpcscheduler.PeerHost{ + PeerHost: &schedulerv1.PeerHost{ Id: mockRawSeedHost.Id, }, }, - run: func(t *testing.T, svc *Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, req *schedulerv1.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { var wg sync.WaitGroup wg.Add(2) defer wg.Wait() @@ -1985,7 +1969,7 @@ func TestService_registerTask(t *testing.T) { mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Any()).Return(nil, false).Times(1), mr.SeedPeer().Do(func() { wg.Done() }).Return(seedPeer).Times(1), - mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &rpcscheduler.PeerResult{}, errors.New("foo")).Times(1), + mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &schedulerv1.PeerResult{}, errors.New("foo")).Times(1), ) task, needBackToSource, err := svc.registerTask(context.Background(), req) @@ -2003,14 +1987,14 @@ func TestService_registerTask(t *testing.T) { Enable: true, }, }, - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, - PeerHost: &rpcscheduler.PeerHost{ + PeerHost: &schedulerv1.PeerHost{ Id: mockRawSeedHost.Id, }, }, - run: func(t *testing.T, svc *Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, req *schedulerv1.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { var wg sync.WaitGroup wg.Add(2) defer wg.Wait() @@ -2022,7 +2006,7 @@ func TestService_registerTask(t *testing.T) { mr.HostManager().Return(hostManager).Times(1), mh.Load(gomock.Any()).Return(nil, false).Times(1), mr.SeedPeer().Do(func() { wg.Done() }).Return(seedPeer).Times(1), - mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &rpcscheduler.PeerResult{}, errors.New("foo")).Times(1), + mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &schedulerv1.PeerResult{}, errors.New("foo")).Times(1), ) task, needBackToSource, err := svc.registerTask(context.Background(), req) @@ -2040,14 +2024,14 @@ func TestService_registerTask(t *testing.T) { Enable: false, }, }, - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, - PeerHost: &rpcscheduler.PeerHost{ + PeerHost: &schedulerv1.PeerHost{ Id: mockRawSeedHost.Id, }, }, - run: func(t *testing.T, svc *Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, req *schedulerv1.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { mockTask.FSM.SetState(resource.TaskStatePending) gomock.InOrder( mr.TaskManager().Return(taskManager).Times(1), @@ -2071,14 +2055,14 @@ func TestService_registerTask(t *testing.T) { Enable: false, }, }, - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, - PeerHost: &rpcscheduler.PeerHost{ + PeerHost: &schedulerv1.PeerHost{ Id: mockRawSeedHost.Id, }, }, - run: func(t *testing.T, svc *Service, req *rpcscheduler.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, req *schedulerv1.PeerTaskRequest, mockTask *resource.Task, mockPeer *resource.Peer, taskManager resource.TaskManager, hostManager resource.HostManager, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder, mh *resource.MockHostManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { mockTask.FSM.SetState(resource.TaskStateFailed) gomock.InOrder( mr.TaskManager().Return(taskManager).Times(1), @@ -2109,7 +2093,7 @@ func TestService_registerTask(t *testing.T) { taskManager := resource.NewMockTaskManager(ctl) hostManager := resource.NewMockHostManager(ctl) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) seedPeer := resource.NewMockSeedPeer(ctl) tc.run(t, svc, tc.req, mockTask, mockPeer, taskManager, hostManager, seedPeer, res.EXPECT(), taskManager.EXPECT(), hostManager.EXPECT(), seedPeer.EXPECT()) @@ -2120,13 +2104,13 @@ func TestService_registerTask(t *testing.T) { func TestService_registerHost(t *testing.T) { tests := []struct { name string - req *rpcscheduler.PeerTaskRequest + req *schedulerv1.PeerTaskRequest mock func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) expect func(t *testing.T, host *resource.Host) }{ { name: "host already exists", - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, PeerHost: mockRawHost, @@ -2144,7 +2128,7 @@ func TestService_registerHost(t *testing.T) { }, { name: "host does not exist", - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, PeerHost: mockRawHost, @@ -2166,7 +2150,7 @@ func TestService_registerHost(t *testing.T) { }, { name: "host does not exist and dynconfig get cluster client config failed", - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ Url: mockTaskURL, UrlMeta: mockTaskURLMeta, PeerHost: mockRawHost, @@ -2219,7 +2203,7 @@ func TestService_triggerSeedPeerTask(t *testing.T) { peer.FSM.SetState(resource.PeerStateRunning) gomock.InOrder( mr.SeedPeer().Return(seedPeer).Times(1), - mc.TriggerTask(gomock.Any(), gomock.Any()).Return(peer, &rpcscheduler.PeerResult{ + mc.TriggerTask(gomock.Any(), gomock.Any()).Return(peer, &schedulerv1.PeerResult{ TotalPieceCount: 3, ContentLength: 1024, }, nil).Times(1), @@ -2239,7 +2223,7 @@ func TestService_triggerSeedPeerTask(t *testing.T) { task.FSM.SetState(resource.TaskStateRunning) gomock.InOrder( mr.SeedPeer().Return(seedPeer).Times(1), - mc.TriggerTask(gomock.Any(), gomock.Any()).Return(peer, &rpcscheduler.PeerResult{}, errors.New("foo")).Times(1), + mc.TriggerTask(gomock.Any(), gomock.Any()).Return(peer, &schedulerv1.PeerResult{}, errors.New("foo")).Times(1), ) }, expect: func(t *testing.T, task *resource.Task, peer *resource.Peer) { @@ -2259,7 +2243,7 @@ func TestService_triggerSeedPeerTask(t *testing.T) { storage := storagemocks.NewMockStorage(ctl) seedPeer := resource.NewMockSeedPeer(ctl) mockHost := resource.NewHost(mockRawHost) - task := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + task := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := resource.NewPeer(mockPeerID, task, mockHost) svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig, storage) @@ -2338,7 +2322,7 @@ func TestService_handleBeginOfPiece(t *testing.T) { dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := resource.NewPeer(mockPeerID, mockTask, mockHost) svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig, storage) @@ -2352,15 +2336,15 @@ func TestService_handleBeginOfPiece(t *testing.T) { func TestService_registerPeer(t *testing.T) { tests := []struct { name string - req *rpcscheduler.PeerTaskRequest + req *schedulerv1.PeerTaskRequest mock func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) expect func(t *testing.T, peer *resource.Peer) }{ { name: "peer already exists", - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ PeerId: mockPeerID, - UrlMeta: &base.UrlMeta{}, + UrlMeta: &commonv1.UrlMeta{}, }, mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { gomock.InOrder( @@ -2376,9 +2360,9 @@ func TestService_registerPeer(t *testing.T) { }, { name: "peer does not exists", - req: &rpcscheduler.PeerTaskRequest{ + req: &schedulerv1.PeerTaskRequest{ PeerId: mockPeerID, - UrlMeta: &base.UrlMeta{}, + UrlMeta: &commonv1.UrlMeta{}, }, mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) { gomock.InOrder( @@ -2405,7 +2389,7 @@ func TestService_registerPeer(t *testing.T) { svc := New(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig, storage) peerManager := resource.NewMockPeerManager(ctl) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost) tc.mock(mockPeer, peerManager, res.EXPECT(), peerManager.EXPECT()) @@ -2417,20 +2401,20 @@ func TestService_registerPeer(t *testing.T) { func TestService_handlePieceSuccess(t *testing.T) { mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) now := time.Now() tests := []struct { name string - piece *rpcscheduler.PieceResult + piece *schedulerv1.PieceResult peer *resource.Peer mock func(peer *resource.Peer) expect func(t *testing.T, peer *resource.Peer) }{ { name: "piece success", - piece: &rpcscheduler.PieceResult{ - PieceInfo: &base.PieceInfo{ + piece: &schedulerv1.PieceResult{ + PieceInfo: &commonv1.PieceInfo{ PieceNum: 0, PieceMd5: "ac32345ef819f03710e2105c81106fdd", }, @@ -2449,8 +2433,8 @@ func TestService_handlePieceSuccess(t *testing.T) { }, { name: "piece state is PeerStateBackToSource", - piece: &rpcscheduler.PieceResult{ - PieceInfo: &base.PieceInfo{ + piece: &schedulerv1.PieceResult{ + PieceInfo: &commonv1.PieceInfo{ PieceNum: 0, PieceMd5: "ac32345ef819f03710e2105c81106fdd", }, @@ -2467,7 +2451,7 @@ func TestService_handlePieceSuccess(t *testing.T) { assert.Equal(peer.PieceCosts(), []int64{1}) piece, ok := peer.Task.LoadPiece(0) assert.True(ok) - assert.EqualValues(piece, &base.PieceInfo{ + assert.EqualValues(piece, &commonv1.PieceInfo{ PieceNum: 0, PieceMd5: "ac32345ef819f03710e2105c81106fdd", }) @@ -2494,15 +2478,15 @@ func TestService_handlePieceSuccess(t *testing.T) { func TestService_handlePieceFail(t *testing.T) { mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) tests := []struct { name string config *config.Config - piece *rpcscheduler.PieceResult + piece *schedulerv1.PieceResult peer *resource.Peer parent *resource.Peer - run func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *rpcscheduler.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) + run func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *schedulerv1.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) }{ { name: "peer state is PeerStateBackToSource", @@ -2511,10 +2495,10 @@ func TestService_handlePieceFail(t *testing.T) { SeedPeer: &config.SeedPeerConfig{Enable: true}, Metrics: &config.MetricsConfig{EnablePeerHost: true}, }, - piece: &rpcscheduler.PieceResult{}, + piece: &schedulerv1.PieceResult{}, peer: resource.NewPeer(mockPeerID, mockTask, mockHost), parent: resource.NewPeer(mockSeedPeerID, mockTask, mockHost), - run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *rpcscheduler.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *schedulerv1.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { peer.FSM.SetState(resource.PeerStateBackToSource) svc.handlePieceFail(context.Background(), peer, piece) @@ -2529,13 +2513,13 @@ func TestService_handlePieceFail(t *testing.T) { SeedPeer: &config.SeedPeerConfig{Enable: true}, Metrics: &config.MetricsConfig{EnablePeerHost: true}, }, - piece: &rpcscheduler.PieceResult{ - Code: base.Code_ClientWaitPieceReady, + piece: &schedulerv1.PieceResult{ + Code: commonv1.Code_ClientWaitPieceReady, DstPid: mockSeedPeerID, }, peer: resource.NewPeer(mockPeerID, mockTask, mockHost), parent: resource.NewPeer(mockSeedPeerID, mockTask, mockHost), - run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *rpcscheduler.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *schedulerv1.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) blocklist := set.NewSafeSet[string]() blocklist.Add(mockSeedPeerID) @@ -2557,13 +2541,13 @@ func TestService_handlePieceFail(t *testing.T) { SeedPeer: &config.SeedPeerConfig{Enable: true}, Metrics: &config.MetricsConfig{EnablePeerHost: true}, }, - piece: &rpcscheduler.PieceResult{ - Code: base.Code_PeerTaskNotFound, + piece: &schedulerv1.PieceResult{ + Code: commonv1.Code_PeerTaskNotFound, DstPid: mockSeedPeerID, }, peer: resource.NewPeer(mockPeerID, mockTask, mockHost), parent: resource.NewPeer(mockSeedPeerID, mockTask, mockHost), - run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *rpcscheduler.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *schedulerv1.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) parent.FSM.SetState(resource.PeerStateRunning) blocklist := set.NewSafeSet[string]() @@ -2587,13 +2571,13 @@ func TestService_handlePieceFail(t *testing.T) { SeedPeer: &config.SeedPeerConfig{Enable: true}, Metrics: &config.MetricsConfig{EnablePeerHost: true}, }, - piece: &rpcscheduler.PieceResult{ - Code: base.Code_ClientPieceNotFound, + piece: &schedulerv1.PieceResult{ + Code: commonv1.Code_ClientPieceNotFound, DstPid: mockSeedPeerID, }, peer: resource.NewPeer(mockPeerID, mockTask, mockHost), parent: resource.NewPeer(mockSeedPeerID, mockTask, mockHost), - run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *rpcscheduler.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *schedulerv1.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) peer.Host.Type = resource.HostTypeNormal blocklist := set.NewSafeSet[string]() @@ -2616,13 +2600,13 @@ func TestService_handlePieceFail(t *testing.T) { SeedPeer: &config.SeedPeerConfig{Enable: true}, Metrics: &config.MetricsConfig{EnablePeerHost: true}, }, - piece: &rpcscheduler.PieceResult{ - Code: base.Code_ClientPieceRequestFail, + piece: &schedulerv1.PieceResult{ + Code: commonv1.Code_ClientPieceRequestFail, DstPid: mockSeedPeerID, }, peer: resource.NewPeer(mockPeerID, mockTask, mockHost), parent: resource.NewPeer(mockSeedPeerID, mockTask, mockHost), - run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *rpcscheduler.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *schedulerv1.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) parent.FSM.SetState(resource.PeerStateRunning) blocklist := set.NewSafeSet[string]() @@ -2646,13 +2630,13 @@ func TestService_handlePieceFail(t *testing.T) { SeedPeer: &config.SeedPeerConfig{Enable: true}, Metrics: &config.MetricsConfig{EnablePeerHost: true}, }, - piece: &rpcscheduler.PieceResult{ - Code: base.Code_ClientPieceRequestFail, + piece: &schedulerv1.PieceResult{ + Code: commonv1.Code_ClientPieceRequestFail, DstPid: mockSeedPeerID, }, peer: resource.NewPeer(mockPeerID, mockTask, mockHost), parent: resource.NewPeer(mockSeedPeerID, mockTask, mockHost), - run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *rpcscheduler.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { + run: func(t *testing.T, svc *Service, peer *resource.Peer, parent *resource.Peer, piece *schedulerv1.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) { peer.FSM.SetState(resource.PeerStateRunning) parent.FSM.SetState(resource.PeerStateRunning) blocklist := set.NewSafeSet[string]() @@ -2795,7 +2779,7 @@ func TestService_handlePeerSuccess(t *testing.T) { mockRawHost.Ip = ip mockRawHost.DownPort = int32(port) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := resource.NewPeer(mockPeerID, mockTask, mockHost) svc := New(&config.Config{Scheduler: mockSchedulerConfig, Metrics: &config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage) @@ -2874,7 +2858,7 @@ func TestService_handlePeerFail(t *testing.T) { storage := storagemocks.NewMockStorage(ctl) svc := New(&config.Config{Scheduler: mockSchedulerConfig, Metrics: &config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage) mockHost := resource.NewHost(mockRawHost) - mockTask := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) peer := resource.NewPeer(mockSeedPeerID, mockTask, mockHost) child := resource.NewPeer(mockPeerID, mockTask, mockHost) @@ -2888,13 +2872,13 @@ func TestService_handlePeerFail(t *testing.T) { func TestService_handleTaskSuccess(t *testing.T) { tests := []struct { name string - result *rpcscheduler.PeerResult + result *schedulerv1.PeerResult mock func(task *resource.Task) expect func(t *testing.T, task *resource.Task) }{ { name: "task state is TaskStatePending", - result: &rpcscheduler.PeerResult{}, + result: &schedulerv1.PeerResult{}, mock: func(task *resource.Task) { task.FSM.SetState(resource.TaskStatePending) }, @@ -2905,7 +2889,7 @@ func TestService_handleTaskSuccess(t *testing.T) { }, { name: "task state is TaskStateSucceeded", - result: &rpcscheduler.PeerResult{}, + result: &schedulerv1.PeerResult{}, mock: func(task *resource.Task) { task.FSM.SetState(resource.TaskStateSucceeded) }, @@ -2916,7 +2900,7 @@ func TestService_handleTaskSuccess(t *testing.T) { }, { name: "task state is TaskStateRunning", - result: &rpcscheduler.PeerResult{ + result: &schedulerv1.PeerResult{ TotalPieceCount: 1, ContentLength: 1, }, @@ -2932,7 +2916,7 @@ func TestService_handleTaskSuccess(t *testing.T) { }, { name: "task state is TaskStateFailed", - result: &rpcscheduler.PeerResult{ + result: &schedulerv1.PeerResult{ TotalPieceCount: 1, ContentLength: 1, }, @@ -2957,7 +2941,7 @@ func TestService_handleTaskSuccess(t *testing.T) { dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) svc := New(&config.Config{Scheduler: mockSchedulerConfig, Metrics: &config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage) - task := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + task := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) tc.mock(task) svc.handleTaskSuccess(context.Background(), task, tc.result) @@ -2968,20 +2952,20 @@ func TestService_handleTaskSuccess(t *testing.T) { func TestService_handleTaskFail(t *testing.T) { rst := status.Newf(codes.Aborted, "response is not valid") - st, err := rst.WithDetails(&errordetails.SourceError{Temporary: false}) + st, err := rst.WithDetails(&errordetailsv1.SourceError{Temporary: false}) if err != nil { t.Fatal(err) } rtst := status.Newf(codes.Aborted, "response is not valid") - tst, err := rtst.WithDetails(&errordetails.SourceError{Temporary: true}) + tst, err := rtst.WithDetails(&errordetailsv1.SourceError{Temporary: true}) if err != nil { t.Fatal(err) } tests := []struct { name string - backToSourceErr *errordetails.SourceError + backToSourceErr *errordetailsv1.SourceError seedPeerErr error mock func(task *resource.Task) expect func(t *testing.T, task *resource.Task) @@ -3028,7 +3012,7 @@ func TestService_handleTaskFail(t *testing.T) { }, { name: "peer back-to-source fails due to an unrecoverable error", - backToSourceErr: &errordetails.SourceError{Temporary: false}, + backToSourceErr: &errordetailsv1.SourceError{Temporary: false}, mock: func(task *resource.Task) { task.FSM.SetState(resource.TaskStateRunning) }, @@ -3040,7 +3024,7 @@ func TestService_handleTaskFail(t *testing.T) { }, { name: "peer back-to-source fails due to an temporary error", - backToSourceErr: &errordetails.SourceError{Temporary: true}, + backToSourceErr: &errordetailsv1.SourceError{Temporary: true}, mock: func(task *resource.Task) { task.FSM.SetState(resource.TaskStateRunning) }, @@ -3096,7 +3080,7 @@ func TestService_handleTaskFail(t *testing.T) { dynconfig := configmocks.NewMockDynconfigInterface(ctl) storage := storagemocks.NewMockStorage(ctl) svc := New(&config.Config{Scheduler: mockSchedulerConfig, Metrics: &config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage) - task := resource.NewTask(mockTaskID, mockTaskURL, base.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) + task := resource.NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)) tc.mock(task) svc.handleTaskFail(context.Background(), task, tc.backToSourceErr, tc.seedPeerErr) diff --git a/test/e2e/manager/preheat.go b/test/e2e/manager/preheat.go index 686f2ce4c..d319bbff4 100644 --- a/test/e2e/manager/preheat.go +++ b/test/e2e/manager/preheat.go @@ -28,11 +28,12 @@ import ( . "github.com/onsi/ginkgo/v2" //nolint . "github.com/onsi/gomega" //nolint + commonv1 "d7y.io/api/pkg/apis/common/v1" + internaljob "d7y.io/dragonfly/v2/internal/job" "d7y.io/dragonfly/v2/manager/model" "d7y.io/dragonfly/v2/manager/types" "d7y.io/dragonfly/v2/pkg/idgen" - "d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/structure" "d7y.io/dragonfly/v2/test/e2e/e2eutil" ) @@ -79,7 +80,7 @@ var _ = Describe("Preheat with manager", func() { Expect(done).Should(BeTrue()) // generate task_id, also the filename - seedPeerTaskID := idgen.TaskID(url, &base.UrlMeta{}) + seedPeerTaskID := idgen.TaskID(url, &commonv1.UrlMeta{}) fmt.Println(seedPeerTaskID) sha256sum, err := checkPreheatResult(seedPeerPods, seedPeerTaskID)