feat: add announce proto of the cache task in scheduler (#327)

Signed-off-by: Gaius <gaius.qi@gmail.com>
This commit is contained in:
Gaius 2024-06-17 22:06:34 +08:00 committed by GitHub
parent 89446c47da
commit e1e59d1696
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 5337 additions and 456 deletions

2
Cargo.lock generated
View File

@ -160,7 +160,7 @@ dependencies = [
[[package]]
name = "dragonfly-api"
version = "2.0.120"
version = "2.0.121"
dependencies = [
"prost 0.11.9",
"prost-types 0.12.6",

View File

@ -1,6 +1,6 @@
[package]
name = "dragonfly-api"
version = "2.0.120"
version = "2.0.121"
authors = ["Gaius <gaius.qi@gmail.com>"]
edition = "2021"
license = "Apache-2.0"

View File

@ -138,7 +138,7 @@ message DownloadCacheTaskRequest {
// Task piece length.
uint64 piece_length = 5 [(validate.rules).uint64.gte = 1];
// File path to be exported.
string output_path = 6 [(validate.rules).string = {min_len: 1}];
string output_path = 6 [(validate.rules).string.min_len = 1];
// Download timeout.
optional google.protobuf.Duration timeout = 7;
}

View File

@ -43,6 +43,26 @@ func (m *MockSchedulerClient) EXPECT() *MockSchedulerClientMockRecorder {
return m.recorder
}
// AnnounceCachePeer mocks base method.
func (m *MockSchedulerClient) AnnounceCachePeer(ctx context.Context, opts ...grpc.CallOption) (scheduler.Scheduler_AnnounceCachePeerClient, error) {
m.ctrl.T.Helper()
varargs := []any{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "AnnounceCachePeer", varargs...)
ret0, _ := ret[0].(scheduler.Scheduler_AnnounceCachePeerClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AnnounceCachePeer indicates an expected call of AnnounceCachePeer.
func (mr *MockSchedulerClientMockRecorder) AnnounceCachePeer(ctx any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceCachePeer", reflect.TypeOf((*MockSchedulerClient)(nil).AnnounceCachePeer), varargs...)
}
// AnnounceHost mocks base method.
func (m *MockSchedulerClient) AnnounceHost(ctx context.Context, in *scheduler.AnnounceHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
@ -83,6 +103,46 @@ func (mr *MockSchedulerClientMockRecorder) AnnouncePeer(ctx any, opts ...any) *g
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnouncePeer", reflect.TypeOf((*MockSchedulerClient)(nil).AnnouncePeer), varargs...)
}
// DeleteCachePeer mocks base method.
func (m *MockSchedulerClient) DeleteCachePeer(ctx context.Context, in *scheduler.DeleteCachePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "DeleteCachePeer", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteCachePeer indicates an expected call of DeleteCachePeer.
func (mr *MockSchedulerClientMockRecorder) DeleteCachePeer(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCachePeer", reflect.TypeOf((*MockSchedulerClient)(nil).DeleteCachePeer), varargs...)
}
// DeleteCacheTask mocks base method.
func (m *MockSchedulerClient) DeleteCacheTask(ctx context.Context, in *scheduler.DeleteCacheTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "DeleteCacheTask", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteCacheTask indicates an expected call of DeleteCacheTask.
func (mr *MockSchedulerClientMockRecorder) DeleteCacheTask(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCacheTask", reflect.TypeOf((*MockSchedulerClient)(nil).DeleteCacheTask), varargs...)
}
// DeleteHost mocks base method.
func (m *MockSchedulerClient) DeleteHost(ctx context.Context, in *scheduler.DeleteHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
@ -143,6 +203,46 @@ func (mr *MockSchedulerClientMockRecorder) DeleteTask(ctx, in any, opts ...any)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTask", reflect.TypeOf((*MockSchedulerClient)(nil).DeleteTask), varargs...)
}
// StatCachePeer mocks base method.
func (m *MockSchedulerClient) StatCachePeer(ctx context.Context, in *scheduler.StatCachePeerRequest, opts ...grpc.CallOption) (*common.CachePeer, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "StatCachePeer", varargs...)
ret0, _ := ret[0].(*common.CachePeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StatCachePeer indicates an expected call of StatCachePeer.
func (mr *MockSchedulerClientMockRecorder) StatCachePeer(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatCachePeer", reflect.TypeOf((*MockSchedulerClient)(nil).StatCachePeer), varargs...)
}
// StatCacheTask mocks base method.
func (m *MockSchedulerClient) StatCacheTask(ctx context.Context, in *scheduler.StatCacheTaskRequest, opts ...grpc.CallOption) (*common.CacheTask, error) {
m.ctrl.T.Helper()
varargs := []any{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "StatCacheTask", varargs...)
ret0, _ := ret[0].(*common.CacheTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StatCacheTask indicates an expected call of StatCacheTask.
func (mr *MockSchedulerClientMockRecorder) StatCacheTask(ctx, in any, opts ...any) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]any{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatCacheTask", reflect.TypeOf((*MockSchedulerClient)(nil).StatCacheTask), varargs...)
}
// StatPeer mocks base method.
func (m *MockSchedulerClient) StatPeer(ctx context.Context, in *scheduler.StatPeerRequest, opts ...grpc.CallOption) (*common.Peer, error) {
m.ctrl.T.Helper()
@ -477,6 +577,143 @@ func (mr *MockScheduler_SyncProbesClientMockRecorder) Trailer() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockScheduler_SyncProbesClient)(nil).Trailer))
}
// MockScheduler_AnnounceCachePeerClient is a mock of Scheduler_AnnounceCachePeerClient interface.
type MockScheduler_AnnounceCachePeerClient struct {
ctrl *gomock.Controller
recorder *MockScheduler_AnnounceCachePeerClientMockRecorder
}
// MockScheduler_AnnounceCachePeerClientMockRecorder is the mock recorder for MockScheduler_AnnounceCachePeerClient.
type MockScheduler_AnnounceCachePeerClientMockRecorder struct {
mock *MockScheduler_AnnounceCachePeerClient
}
// NewMockScheduler_AnnounceCachePeerClient creates a new mock instance.
func NewMockScheduler_AnnounceCachePeerClient(ctrl *gomock.Controller) *MockScheduler_AnnounceCachePeerClient {
mock := &MockScheduler_AnnounceCachePeerClient{ctrl: ctrl}
mock.recorder = &MockScheduler_AnnounceCachePeerClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockScheduler_AnnounceCachePeerClient) EXPECT() *MockScheduler_AnnounceCachePeerClientMockRecorder {
return m.recorder
}
// CloseSend mocks base method.
func (m *MockScheduler_AnnounceCachePeerClient) CloseSend() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CloseSend")
ret0, _ := ret[0].(error)
return ret0
}
// CloseSend indicates an expected call of CloseSend.
func (mr *MockScheduler_AnnounceCachePeerClientMockRecorder) CloseSend() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockScheduler_AnnounceCachePeerClient)(nil).CloseSend))
}
// Context mocks base method.
func (m *MockScheduler_AnnounceCachePeerClient) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockScheduler_AnnounceCachePeerClientMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockScheduler_AnnounceCachePeerClient)(nil).Context))
}
// Header mocks base method.
func (m *MockScheduler_AnnounceCachePeerClient) Header() (metadata.MD, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Header")
ret0, _ := ret[0].(metadata.MD)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Header indicates an expected call of Header.
func (mr *MockScheduler_AnnounceCachePeerClientMockRecorder) Header() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockScheduler_AnnounceCachePeerClient)(nil).Header))
}
// Recv mocks base method.
func (m *MockScheduler_AnnounceCachePeerClient) Recv() (*scheduler.AnnounceCachePeerResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*scheduler.AnnounceCachePeerResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockScheduler_AnnounceCachePeerClientMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockScheduler_AnnounceCachePeerClient)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockScheduler_AnnounceCachePeerClient) RecvMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockScheduler_AnnounceCachePeerClientMockRecorder) RecvMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockScheduler_AnnounceCachePeerClient)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockScheduler_AnnounceCachePeerClient) Send(arg0 *scheduler.AnnounceCachePeerRequest) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockScheduler_AnnounceCachePeerClientMockRecorder) Send(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockScheduler_AnnounceCachePeerClient)(nil).Send), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockScheduler_AnnounceCachePeerClient) SendMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockScheduler_AnnounceCachePeerClientMockRecorder) SendMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockScheduler_AnnounceCachePeerClient)(nil).SendMsg), m)
}
// Trailer mocks base method.
func (m *MockScheduler_AnnounceCachePeerClient) Trailer() metadata.MD {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Trailer")
ret0, _ := ret[0].(metadata.MD)
return ret0
}
// Trailer indicates an expected call of Trailer.
func (mr *MockScheduler_AnnounceCachePeerClientMockRecorder) Trailer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockScheduler_AnnounceCachePeerClient)(nil).Trailer))
}
// MockSchedulerServer is a mock of SchedulerServer interface.
type MockSchedulerServer struct {
ctrl *gomock.Controller
@ -500,6 +737,20 @@ func (m *MockSchedulerServer) EXPECT() *MockSchedulerServerMockRecorder {
return m.recorder
}
// AnnounceCachePeer mocks base method.
func (m *MockSchedulerServer) AnnounceCachePeer(arg0 scheduler.Scheduler_AnnounceCachePeerServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AnnounceCachePeer", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// AnnounceCachePeer indicates an expected call of AnnounceCachePeer.
func (mr *MockSchedulerServerMockRecorder) AnnounceCachePeer(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceCachePeer", reflect.TypeOf((*MockSchedulerServer)(nil).AnnounceCachePeer), arg0)
}
// AnnounceHost mocks base method.
func (m *MockSchedulerServer) AnnounceHost(arg0 context.Context, arg1 *scheduler.AnnounceHostRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
@ -529,6 +780,36 @@ func (mr *MockSchedulerServerMockRecorder) AnnouncePeer(arg0 any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnouncePeer", reflect.TypeOf((*MockSchedulerServer)(nil).AnnouncePeer), arg0)
}
// DeleteCachePeer mocks base method.
func (m *MockSchedulerServer) DeleteCachePeer(arg0 context.Context, arg1 *scheduler.DeleteCachePeerRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteCachePeer", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteCachePeer indicates an expected call of DeleteCachePeer.
func (mr *MockSchedulerServerMockRecorder) DeleteCachePeer(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCachePeer", reflect.TypeOf((*MockSchedulerServer)(nil).DeleteCachePeer), arg0, arg1)
}
// DeleteCacheTask mocks base method.
func (m *MockSchedulerServer) DeleteCacheTask(arg0 context.Context, arg1 *scheduler.DeleteCacheTaskRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteCacheTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteCacheTask indicates an expected call of DeleteCacheTask.
func (mr *MockSchedulerServerMockRecorder) DeleteCacheTask(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCacheTask", reflect.TypeOf((*MockSchedulerServer)(nil).DeleteCacheTask), arg0, arg1)
}
// DeleteHost mocks base method.
func (m *MockSchedulerServer) DeleteHost(arg0 context.Context, arg1 *scheduler.DeleteHostRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
@ -574,6 +855,36 @@ func (mr *MockSchedulerServerMockRecorder) DeleteTask(arg0, arg1 any) *gomock.Ca
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTask", reflect.TypeOf((*MockSchedulerServer)(nil).DeleteTask), arg0, arg1)
}
// StatCachePeer mocks base method.
func (m *MockSchedulerServer) StatCachePeer(arg0 context.Context, arg1 *scheduler.StatCachePeerRequest) (*common.CachePeer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StatCachePeer", arg0, arg1)
ret0, _ := ret[0].(*common.CachePeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StatCachePeer indicates an expected call of StatCachePeer.
func (mr *MockSchedulerServerMockRecorder) StatCachePeer(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatCachePeer", reflect.TypeOf((*MockSchedulerServer)(nil).StatCachePeer), arg0, arg1)
}
// StatCacheTask mocks base method.
func (m *MockSchedulerServer) StatCacheTask(arg0 context.Context, arg1 *scheduler.StatCacheTaskRequest) (*common.CacheTask, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StatCacheTask", arg0, arg1)
ret0, _ := ret[0].(*common.CacheTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StatCacheTask indicates an expected call of StatCacheTask.
func (mr *MockSchedulerServerMockRecorder) StatCacheTask(arg0, arg1 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatCacheTask", reflect.TypeOf((*MockSchedulerServer)(nil).StatCacheTask), arg0, arg1)
}
// StatPeer mocks base method.
func (m *MockSchedulerServer) StatPeer(arg0 context.Context, arg1 *scheduler.StatPeerRequest) (*common.Peer, error) {
m.ctrl.T.Helper()
@ -920,3 +1231,137 @@ func (mr *MockScheduler_SyncProbesServerMockRecorder) SetTrailer(arg0 any) *gomo
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockScheduler_SyncProbesServer)(nil).SetTrailer), arg0)
}
// MockScheduler_AnnounceCachePeerServer is a mock of Scheduler_AnnounceCachePeerServer interface.
type MockScheduler_AnnounceCachePeerServer struct {
ctrl *gomock.Controller
recorder *MockScheduler_AnnounceCachePeerServerMockRecorder
}
// MockScheduler_AnnounceCachePeerServerMockRecorder is the mock recorder for MockScheduler_AnnounceCachePeerServer.
type MockScheduler_AnnounceCachePeerServerMockRecorder struct {
mock *MockScheduler_AnnounceCachePeerServer
}
// NewMockScheduler_AnnounceCachePeerServer creates a new mock instance.
func NewMockScheduler_AnnounceCachePeerServer(ctrl *gomock.Controller) *MockScheduler_AnnounceCachePeerServer {
mock := &MockScheduler_AnnounceCachePeerServer{ctrl: ctrl}
mock.recorder = &MockScheduler_AnnounceCachePeerServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockScheduler_AnnounceCachePeerServer) EXPECT() *MockScheduler_AnnounceCachePeerServerMockRecorder {
return m.recorder
}
// Context mocks base method.
func (m *MockScheduler_AnnounceCachePeerServer) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockScheduler_AnnounceCachePeerServerMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockScheduler_AnnounceCachePeerServer)(nil).Context))
}
// Recv mocks base method.
func (m *MockScheduler_AnnounceCachePeerServer) Recv() (*scheduler.AnnounceCachePeerRequest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*scheduler.AnnounceCachePeerRequest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockScheduler_AnnounceCachePeerServerMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockScheduler_AnnounceCachePeerServer)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockScheduler_AnnounceCachePeerServer) RecvMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockScheduler_AnnounceCachePeerServerMockRecorder) RecvMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockScheduler_AnnounceCachePeerServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockScheduler_AnnounceCachePeerServer) Send(arg0 *scheduler.AnnounceCachePeerResponse) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockScheduler_AnnounceCachePeerServerMockRecorder) Send(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockScheduler_AnnounceCachePeerServer)(nil).Send), arg0)
}
// SendHeader mocks base method.
func (m *MockScheduler_AnnounceCachePeerServer) SendHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockScheduler_AnnounceCachePeerServerMockRecorder) SendHeader(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockScheduler_AnnounceCachePeerServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockScheduler_AnnounceCachePeerServer) SendMsg(m any) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockScheduler_AnnounceCachePeerServerMockRecorder) SendMsg(m any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockScheduler_AnnounceCachePeerServer)(nil).SendMsg), m)
}
// SetHeader mocks base method.
func (m *MockScheduler_AnnounceCachePeerServer) SetHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockScheduler_AnnounceCachePeerServerMockRecorder) SetHeader(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockScheduler_AnnounceCachePeerServer)(nil).SetHeader), arg0)
}
// SetTrailer mocks base method.
func (m *MockScheduler_AnnounceCachePeerServer) SetTrailer(arg0 metadata.MD) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetTrailer", arg0)
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockScheduler_AnnounceCachePeerServerMockRecorder) SetTrailer(arg0 any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockScheduler_AnnounceCachePeerServer)(nil).SetTrailer), arg0)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -43,8 +43,8 @@ message DownloadPeerBackToSourceStartedRequest {
optional string description = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// RescheduleRequest represents reschedule request of AnnouncePeerRequest.
message RescheduleRequest {
// ReschedulePeerRequest represents reschedule request of AnnouncePeerRequest.
message ReschedulePeerRequest {
// Candidate parent ids.
repeated common.v2.Peer candidate_parents = 1;
@ -129,7 +129,7 @@ message AnnouncePeerRequest {
RegisterPeerRequest register_peer_request = 4;
DownloadPeerStartedRequest download_peer_started_request = 5;
DownloadPeerBackToSourceStartedRequest download_peer_back_to_source_started_request = 6;
RescheduleRequest reschedule_request = 7;
ReschedulePeerRequest reschedule_peer_request = 7;
DownloadPeerFinishedRequest download_peer_finished_request = 8;
DownloadPeerBackToSourceFinishedRequest download_peer_back_to_source_finished_request = 9;
DownloadPeerFailedRequest download_peer_failed_request = 10;
@ -148,7 +148,7 @@ message EmptyTaskResponse {
// NormalTaskResponse represents normal task response of AnnouncePeerResponse.
message NormalTaskResponse {
// Candidate parents.
repeated common.v2.Peer candidate_parents = 1 [(validate.rules).repeated = {min_items: 1}];
repeated common.v2.Peer candidate_parents = 1 [(validate.rules).repeated.min_items = 1];
}
// NeedBackToSourceResponse represents need back-to-source response of AnnouncePeerResponse.
@ -191,7 +191,7 @@ message DeletePeerRequest {
// StatTaskRequest represents request of StatTask.
message StatTaskRequest {
// Task id.
string id = 1 [(validate.rules).string.min_len = 1];
string task_id = 1 [(validate.rules).string.min_len = 1];
}
// DeleteTaskRequest represents request of DeleteTask.
@ -211,7 +211,7 @@ message AnnounceHostRequest {
// DeleteHostRequest represents request of DeleteHost.
message DeleteHostRequest{
// Host id.
string id = 1 [(validate.rules).string.min_len = 1];
string host_id = 1 [(validate.rules).string.min_len = 1];
}
// ProbeStartedRequest represents started request of SyncProbesRequest.
@ -268,6 +268,125 @@ message SyncProbesResponse {
repeated common.v2.Host hosts = 1 [(validate.rules).repeated = {min_items: 1, ignore_empty: true}];
}
// RegisterCachePeerRequest represents cache peer registered request of AnnounceCachePeerRequest.
message RegisterCachePeerRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Tag is used to distinguish different cache tasks.
optional string tag = 3;
// Application of task.
optional string application = 4;
// Task piece length.
uint64 piece_length = 5 [(validate.rules).uint64.gte = 1];
// File path to be exported.
string output_path = 6 [(validate.rules).string.min_len = 1];
// Download timeout.
optional google.protobuf.Duration timeout = 7;
}
// DownloadCachePeerStartedRequest represents cache peer download started request of AnnounceCachePeerRequest.
message DownloadCachePeerStartedRequest {
}
// RescheduleCachePeerRequest represents reschedule request of AnnounceCachePeerRequest.
message RescheduleCachePeerRequest {
// Candidate parent ids.
repeated common.v2.CachePeer candidate_parents = 1;
// The description of the reschedule reason.
optional string description = 2 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// DownloadCachePeerFinishedRequest represents cache peer download finished request of AnnounceCachePeerRequest.
message DownloadCachePeerFinishedRequest {
// Total piece count.
uint32 piece_count = 1;
}
// DownloadCachePeerFailedRequest represents cache peer download failed request of AnnounceCachePeerRequest.
message DownloadCachePeerFailedRequest {
// The description of the download failed.
optional string description = 1 [(validate.rules).string = {min_len: 1, ignore_empty: true}];
}
// AnnounceCachePeerRequest represents request of AnnounceCachePeer.
message AnnounceCachePeerRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
oneof request {
option (validate.required) = true;
RegisterCachePeerRequest register_cache_peer_request = 4;
DownloadCachePeerStartedRequest download_cache_peer_started_request = 5;
RescheduleCachePeerRequest reschedule_cache_peer_request = 6;
DownloadCachePeerFinishedRequest download_cache_peer_finished_request = 7;
DownloadCachePeerFailedRequest download_cache_peer_failed_request = 8;
DownloadPieceFinishedRequest download_piece_finished_request = 9;
DownloadPieceFailedRequest download_piece_failed_request = 10;
}
}
// EmptyCacheTaskResponse represents empty cache task response of AnnounceCachePeerResponse.
message EmptyCacheTaskResponse {
}
// NormalCacheTaskResponse represents normal cache task response of AnnounceCachePeerResponse.
message NormalCacheTaskResponse {
// Candidate parents.
repeated common.v2.CachePeer candidate_parents = 1 [(validate.rules).repeated.min_items = 1];
}
// AnnounceCachePeerResponse represents response of AnnounceCachePeer.
message AnnounceCachePeerResponse {
oneof response {
option (validate.required) = true;
EmptyCacheTaskResponse empty_task_response = 1;
NormalCacheTaskResponse normal_task_response = 2;
}
}
// StatCachePeerRequest represents request of StatCachePeer.
message StatCachePeerRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
}
// DeleteCachePeerRequest represents request of DeleteCachePeer.
message DeleteCachePeerRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
}
// StatCacheTaskRequest represents request of StatCacheTask.
message StatCacheTaskRequest {
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
}
// DeleteCacheTaskRequest represents request of DeleteCacheTask.
message DeleteCacheTaskRequest {
// Host id.
string host_id = 1 [(validate.rules).string.min_len = 1];
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
}
// Scheduler RPC Service.
service Scheduler {
// AnnouncePeer announces peer to scheduler.
@ -293,4 +412,19 @@ service Scheduler {
// SyncProbes sync probes of the host.
rpc SyncProbes(stream SyncProbesRequest)returns(stream SyncProbesResponse);
// AnnounceCachePeer announces cache peer to scheduler.
rpc AnnounceCachePeer(stream AnnounceCachePeerRequest) returns(stream AnnounceCachePeerResponse);
// Checks information of cache peer.
rpc StatCachePeer(StatCachePeerRequest)returns(common.v2.CachePeer);
// DeleteCachePeer releases cache peer in scheduler.
rpc DeleteCachePeer(DeleteCachePeerRequest)returns(google.protobuf.Empty);
// Checks information of cache task.
rpc StatCacheTask(StatCacheTaskRequest)returns(common.v2.CacheTask);
// DeleteCacheTask releases cache task in scheduler.
rpc DeleteCacheTask(DeleteCacheTaskRequest)returns(google.protobuf.Empty);
}

View File

@ -40,6 +40,16 @@ type SchedulerClient interface {
DeleteHost(ctx context.Context, in *DeleteHostRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// SyncProbes sync probes of the host.
SyncProbes(ctx context.Context, opts ...grpc.CallOption) (Scheduler_SyncProbesClient, error)
// AnnounceCachePeer announces cache peer to scheduler.
AnnounceCachePeer(ctx context.Context, opts ...grpc.CallOption) (Scheduler_AnnounceCachePeerClient, error)
// Checks information of cache peer.
StatCachePeer(ctx context.Context, in *StatCachePeerRequest, opts ...grpc.CallOption) (*v2.CachePeer, error)
// DeleteCachePeer releases cache peer in scheduler.
DeleteCachePeer(ctx context.Context, in *DeleteCachePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Checks information of cache task.
StatCacheTask(ctx context.Context, in *StatCacheTaskRequest, opts ...grpc.CallOption) (*v2.CacheTask, error)
// DeleteCacheTask releases cache task in scheduler.
DeleteCacheTask(ctx context.Context, in *DeleteCacheTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
type schedulerClient struct {
@ -166,6 +176,73 @@ func (x *schedulerSyncProbesClient) Recv() (*SyncProbesResponse, error) {
return m, nil
}
func (c *schedulerClient) AnnounceCachePeer(ctx context.Context, opts ...grpc.CallOption) (Scheduler_AnnounceCachePeerClient, error) {
stream, err := c.cc.NewStream(ctx, &Scheduler_ServiceDesc.Streams[2], "/scheduler.v2.Scheduler/AnnounceCachePeer", opts...)
if err != nil {
return nil, err
}
x := &schedulerAnnounceCachePeerClient{stream}
return x, nil
}
type Scheduler_AnnounceCachePeerClient interface {
Send(*AnnounceCachePeerRequest) error
Recv() (*AnnounceCachePeerResponse, error)
grpc.ClientStream
}
type schedulerAnnounceCachePeerClient struct {
grpc.ClientStream
}
func (x *schedulerAnnounceCachePeerClient) Send(m *AnnounceCachePeerRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *schedulerAnnounceCachePeerClient) Recv() (*AnnounceCachePeerResponse, error) {
m := new(AnnounceCachePeerResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *schedulerClient) StatCachePeer(ctx context.Context, in *StatCachePeerRequest, opts ...grpc.CallOption) (*v2.CachePeer, error) {
out := new(v2.CachePeer)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/StatCachePeer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) DeleteCachePeer(ctx context.Context, in *DeleteCachePeerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/DeleteCachePeer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) StatCacheTask(ctx context.Context, in *StatCacheTaskRequest, opts ...grpc.CallOption) (*v2.CacheTask, error) {
out := new(v2.CacheTask)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/StatCacheTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *schedulerClient) DeleteCacheTask(ctx context.Context, in *DeleteCacheTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/scheduler.v2.Scheduler/DeleteCacheTask", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// SchedulerServer is the server API for Scheduler service.
// All implementations should embed UnimplementedSchedulerServer
// for forward compatibility
@ -186,6 +263,16 @@ type SchedulerServer interface {
DeleteHost(context.Context, *DeleteHostRequest) (*emptypb.Empty, error)
// SyncProbes sync probes of the host.
SyncProbes(Scheduler_SyncProbesServer) error
// AnnounceCachePeer announces cache peer to scheduler.
AnnounceCachePeer(Scheduler_AnnounceCachePeerServer) error
// Checks information of cache peer.
StatCachePeer(context.Context, *StatCachePeerRequest) (*v2.CachePeer, error)
// DeleteCachePeer releases cache peer in scheduler.
DeleteCachePeer(context.Context, *DeleteCachePeerRequest) (*emptypb.Empty, error)
// Checks information of cache task.
StatCacheTask(context.Context, *StatCacheTaskRequest) (*v2.CacheTask, error)
// DeleteCacheTask releases cache task in scheduler.
DeleteCacheTask(context.Context, *DeleteCacheTaskRequest) (*emptypb.Empty, error)
}
// UnimplementedSchedulerServer should be embedded to have forward compatible implementations.
@ -216,6 +303,21 @@ func (UnimplementedSchedulerServer) DeleteHost(context.Context, *DeleteHostReque
func (UnimplementedSchedulerServer) SyncProbes(Scheduler_SyncProbesServer) error {
return status.Errorf(codes.Unimplemented, "method SyncProbes not implemented")
}
func (UnimplementedSchedulerServer) AnnounceCachePeer(Scheduler_AnnounceCachePeerServer) error {
return status.Errorf(codes.Unimplemented, "method AnnounceCachePeer not implemented")
}
func (UnimplementedSchedulerServer) StatCachePeer(context.Context, *StatCachePeerRequest) (*v2.CachePeer, error) {
return nil, status.Errorf(codes.Unimplemented, "method StatCachePeer not implemented")
}
func (UnimplementedSchedulerServer) DeleteCachePeer(context.Context, *DeleteCachePeerRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteCachePeer not implemented")
}
func (UnimplementedSchedulerServer) StatCacheTask(context.Context, *StatCacheTaskRequest) (*v2.CacheTask, error) {
return nil, status.Errorf(codes.Unimplemented, "method StatCacheTask not implemented")
}
func (UnimplementedSchedulerServer) DeleteCacheTask(context.Context, *DeleteCacheTaskRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteCacheTask not implemented")
}
// UnsafeSchedulerServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SchedulerServer will
@ -388,6 +490,104 @@ func (x *schedulerSyncProbesServer) Recv() (*SyncProbesRequest, error) {
return m, nil
}
func _Scheduler_AnnounceCachePeer_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SchedulerServer).AnnounceCachePeer(&schedulerAnnounceCachePeerServer{stream})
}
type Scheduler_AnnounceCachePeerServer interface {
Send(*AnnounceCachePeerResponse) error
Recv() (*AnnounceCachePeerRequest, error)
grpc.ServerStream
}
type schedulerAnnounceCachePeerServer struct {
grpc.ServerStream
}
func (x *schedulerAnnounceCachePeerServer) Send(m *AnnounceCachePeerResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *schedulerAnnounceCachePeerServer) Recv() (*AnnounceCachePeerRequest, error) {
m := new(AnnounceCachePeerRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _Scheduler_StatCachePeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatCachePeerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).StatCachePeer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/StatCachePeer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).StatCachePeer(ctx, req.(*StatCachePeerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_DeleteCachePeer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteCachePeerRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).DeleteCachePeer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/DeleteCachePeer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).DeleteCachePeer(ctx, req.(*DeleteCachePeerRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_StatCacheTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatCacheTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).StatCacheTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/StatCacheTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).StatCacheTask(ctx, req.(*StatCacheTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Scheduler_DeleteCacheTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteCacheTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SchedulerServer).DeleteCacheTask(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/scheduler.v2.Scheduler/DeleteCacheTask",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SchedulerServer).DeleteCacheTask(ctx, req.(*DeleteCacheTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
// Scheduler_ServiceDesc is the grpc.ServiceDesc for Scheduler service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@ -419,6 +619,22 @@ var Scheduler_ServiceDesc = grpc.ServiceDesc{
MethodName: "DeleteHost",
Handler: _Scheduler_DeleteHost_Handler,
},
{
MethodName: "StatCachePeer",
Handler: _Scheduler_StatCachePeer_Handler,
},
{
MethodName: "DeleteCachePeer",
Handler: _Scheduler_DeleteCachePeer_Handler,
},
{
MethodName: "StatCacheTask",
Handler: _Scheduler_StatCacheTask_Handler,
},
{
MethodName: "DeleteCacheTask",
Handler: _Scheduler_DeleteCacheTask_Handler,
},
},
Streams: []grpc.StreamDesc{
{
@ -433,6 +649,12 @@ var Scheduler_ServiceDesc = grpc.ServiceDesc{
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "AnnounceCachePeer",
Handler: _Scheduler_AnnounceCachePeer_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "pkg/apis/scheduler/v2/scheduler.proto",
}

View File

@ -40,11 +40,10 @@ message DownloadPeerBackToSourceStartedRequest {
optional string description = 1;
}
// RescheduleRequest represents reschedule request of AnnouncePeerRequest.
message RescheduleRequest {
// ReschedulePeerRequest represents reschedule request of AnnouncePeerRequest.
message ReschedulePeerRequest {
// Candidate parent ids.
repeated common.v2.Peer candidate_parents = 1;
// The description of the reschedule reason.
optional string description = 2;
}
@ -122,7 +121,7 @@ message AnnouncePeerRequest {
RegisterPeerRequest register_peer_request = 4;
DownloadPeerStartedRequest download_peer_started_request = 5;
DownloadPeerBackToSourceStartedRequest download_peer_back_to_source_started_request = 6;
RescheduleRequest reschedule_request = 7;
ReschedulePeerRequest reschedule_peer_request = 7;
DownloadPeerFinishedRequest download_peer_finished_request = 8;
DownloadPeerBackToSourceFinishedRequest download_peer_back_to_source_finished_request = 9;
DownloadPeerFailedRequest download_peer_failed_request = 10;
@ -182,7 +181,7 @@ message DeletePeerRequest {
// StatTaskRequest represents request of StatTask.
message StatTaskRequest {
// Task id.
string id = 1;
string task_id = 1;
}
// DeleteTaskRequest represents request of DeleteTask.
@ -202,7 +201,7 @@ message AnnounceHostRequest {
// DeleteHostRequest represents request of DeleteHost.
message DeleteHostRequest{
// Host id.
string id = 1;
string host_id = 1;
}
// ProbeStartedRequest represents started request of SyncProbesRequest.
@ -257,6 +256,121 @@ message SyncProbesResponse {
repeated common.v2.Host hosts = 1;
}
// RegisterCachePeerRequest represents cache peer registered request of AnnounceCachePeerRequest.
message RegisterCachePeerRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Tag is used to distinguish different cache tasks.
optional string tag = 3;
// Application of task.
optional string application = 4;
// Task piece length.
uint64 piece_length = 5;
// File path to be exported.
string output_path = 6;
// Download timeout.
optional google.protobuf.Duration timeout = 7;
}
// DownloadCachePeerStartedRequest represents cache peer download started request of AnnounceCachePeerRequest.
message DownloadCachePeerStartedRequest {
}
// RescheduleCachePeerRequest represents reschedule request of AnnounceCachePeerRequest.
message RescheduleCachePeerRequest {
// Candidate parent ids.
repeated common.v2.CachePeer candidate_parents = 1;
// The description of the reschedule reason.
optional string description = 2;
}
// DownloadCachePeerFinishedRequest represents cache peer download finished request of AnnounceCachePeerRequest.
message DownloadCachePeerFinishedRequest {
// Total piece count.
uint32 piece_count = 1;
}
// DownloadCachePeerFailedRequest represents cache peer download failed request of AnnounceCachePeerRequest.
message DownloadCachePeerFailedRequest {
// The description of the download failed.
optional string description = 1;
}
// AnnounceCachePeerRequest represents request of AnnounceCachePeer.
message AnnounceCachePeerRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
oneof request {
RegisterCachePeerRequest register_cache_peer_request = 4;
DownloadCachePeerStartedRequest download_cache_peer_started_request = 5;
RescheduleCachePeerRequest reschedule_cache_peer_request = 6;
DownloadCachePeerFinishedRequest download_cache_peer_finished_request = 7;
DownloadCachePeerFailedRequest download_cache_peer_failed_request = 8;
DownloadPieceFinishedRequest download_piece_finished_request = 9;
DownloadPieceFailedRequest download_piece_failed_request = 10;
}
}
// EmptyCacheTaskResponse represents empty cache task response of AnnounceCachePeerResponse.
message EmptyCacheTaskResponse {
}
// NormalCacheTaskResponse represents normal cache task response of AnnounceCachePeerResponse.
message NormalCacheTaskResponse {
// Candidate parents.
repeated common.v2.CachePeer candidate_cache_parents = 1;
}
// AnnounceCachePeerResponse represents response of AnnounceCachePeer.
message AnnounceCachePeerResponse {
oneof response {
EmptyCacheTaskResponse empty_task_response = 1;
NormalCacheTaskResponse normal_task_response = 2;
}
}
// StatCachePeerRequest represents request of StatCachePeer.
message StatCachePeerRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
}
// DeleteCachePeerRequest represents request of DeleteCachePeer.
message DeleteCachePeerRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
// Peer id.
string peer_id = 3;
}
// StatCacheTaskRequest represents request of StatCacheTask.
message StatCacheTaskRequest {
// Task id.
string task_id = 1;
}
// DeleteCacheTaskRequest represents request of DeleteCacheTask.
message DeleteCacheTaskRequest {
// Host id.
string host_id = 1;
// Task id.
string task_id = 2;
}
// Scheduler RPC Service.
service Scheduler{
// AnnouncePeer announces peer to scheduler.
@ -282,4 +396,19 @@ service Scheduler{
// SyncProbes sync probes of the host.
rpc SyncProbes(stream SyncProbesRequest)returns(stream SyncProbesResponse);
// AnnounceCachePeer announces cache peer to scheduler.
rpc AnnounceCachePeer(stream AnnounceCachePeerRequest) returns(stream AnnounceCachePeerResponse);
// Checks information of cache peer.
rpc StatCachePeer(StatCachePeerRequest)returns(common.v2.CachePeer);
// DeleteCachePeer releases cache peer in scheduler.
rpc DeleteCachePeer(DeleteCachePeerRequest)returns(google.protobuf.Empty);
// Checks information of cache task.
rpc StatCacheTask(StatCacheTaskRequest)returns(common.v2.CacheTask);
// DeleteCacheTask releases cache task in scheduler.
rpc DeleteCacheTask(DeleteCacheTaskRequest)returns(google.protobuf.Empty);
}

Binary file not shown.

View File

@ -21,11 +21,11 @@ pub struct DownloadPeerBackToSourceStartedRequest {
#[prost(string, optional, tag = "1")]
pub description: ::core::option::Option<::prost::alloc::string::String>,
}
/// RescheduleRequest represents reschedule request of AnnouncePeerRequest.
/// ReschedulePeerRequest represents reschedule request of AnnouncePeerRequest.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RescheduleRequest {
pub struct ReschedulePeerRequest {
/// Candidate parent ids.
#[prost(message, repeated, tag = "1")]
pub candidate_parents: ::prost::alloc::vec::Vec<super::super::common::v2::Peer>,
@ -169,7 +169,7 @@ pub mod announce_peer_request {
super::DownloadPeerBackToSourceStartedRequest,
),
#[prost(message, tag = "7")]
RescheduleRequest(super::RescheduleRequest),
ReschedulePeerRequest(super::ReschedulePeerRequest),
#[prost(message, tag = "8")]
DownloadPeerFinishedRequest(super::DownloadPeerFinishedRequest),
#[prost(message, tag = "9")]
@ -278,7 +278,7 @@ pub struct DeletePeerRequest {
pub struct StatTaskRequest {
/// Task id.
#[prost(string, tag = "1")]
pub id: ::prost::alloc::string::String,
pub task_id: ::prost::alloc::string::String,
}
/// DeleteTaskRequest represents request of DeleteTask.
#[derive(serde::Serialize, serde::Deserialize)]
@ -308,7 +308,7 @@ pub struct AnnounceHostRequest {
pub struct DeleteHostRequest {
/// Host id.
#[prost(string, tag = "1")]
pub id: ::prost::alloc::string::String,
pub host_id: ::prost::alloc::string::String,
}
/// ProbeStartedRequest represents started request of SyncProbesRequest.
#[derive(serde::Serialize, serde::Deserialize)]
@ -394,6 +394,197 @@ pub struct SyncProbesResponse {
#[prost(message, repeated, tag = "1")]
pub hosts: ::prost::alloc::vec::Vec<super::super::common::v2::Host>,
}
/// RegisterCachePeerRequest represents cache peer registered request of AnnounceCachePeerRequest.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RegisterCachePeerRequest {
/// Host id.
#[prost(string, tag = "1")]
pub host_id: ::prost::alloc::string::String,
/// Task id.
#[prost(string, tag = "2")]
pub task_id: ::prost::alloc::string::String,
/// Tag is used to distinguish different cache tasks.
#[prost(string, optional, tag = "3")]
pub tag: ::core::option::Option<::prost::alloc::string::String>,
/// Application of task.
#[prost(string, optional, tag = "4")]
pub application: ::core::option::Option<::prost::alloc::string::String>,
/// Task piece length.
#[prost(uint64, tag = "5")]
pub piece_length: u64,
/// File path to be exported.
#[prost(string, tag = "6")]
pub output_path: ::prost::alloc::string::String,
/// Download timeout.
#[prost(message, optional, tag = "7")]
pub timeout: ::core::option::Option<::prost_wkt_types::Duration>,
}
/// DownloadCachePeerStartedRequest represents cache peer download started request of AnnounceCachePeerRequest.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DownloadCachePeerStartedRequest {}
/// RescheduleCachePeerRequest represents reschedule request of AnnounceCachePeerRequest.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RescheduleCachePeerRequest {
/// Candidate parent ids.
#[prost(message, repeated, tag = "1")]
pub candidate_parents: ::prost::alloc::vec::Vec<super::super::common::v2::CachePeer>,
/// The description of the reschedule reason.
#[prost(string, optional, tag = "2")]
pub description: ::core::option::Option<::prost::alloc::string::String>,
}
/// DownloadCachePeerFinishedRequest represents cache peer download finished request of AnnounceCachePeerRequest.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DownloadCachePeerFinishedRequest {
/// Total piece count.
#[prost(uint32, tag = "1")]
pub piece_count: u32,
}
/// DownloadCachePeerFailedRequest represents cache peer download failed request of AnnounceCachePeerRequest.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DownloadCachePeerFailedRequest {
/// The description of the download failed.
#[prost(string, optional, tag = "1")]
pub description: ::core::option::Option<::prost::alloc::string::String>,
}
/// AnnounceCachePeerRequest represents request of AnnounceCachePeer.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnnounceCachePeerRequest {
/// Host id.
#[prost(string, tag = "1")]
pub host_id: ::prost::alloc::string::String,
/// Task id.
#[prost(string, tag = "2")]
pub task_id: ::prost::alloc::string::String,
/// Peer id.
#[prost(string, tag = "3")]
pub peer_id: ::prost::alloc::string::String,
#[prost(
oneof = "announce_cache_peer_request::Request",
tags = "4, 5, 6, 7, 8, 9, 10"
)]
pub request: ::core::option::Option<announce_cache_peer_request::Request>,
}
/// Nested message and enum types in `AnnounceCachePeerRequest`.
pub mod announce_cache_peer_request {
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Request {
#[prost(message, tag = "4")]
RegisterCachePeerRequest(super::RegisterCachePeerRequest),
#[prost(message, tag = "5")]
DownloadCachePeerStartedRequest(super::DownloadCachePeerStartedRequest),
#[prost(message, tag = "6")]
RescheduleCachePeerRequest(super::RescheduleCachePeerRequest),
#[prost(message, tag = "7")]
DownloadCachePeerFinishedRequest(super::DownloadCachePeerFinishedRequest),
#[prost(message, tag = "8")]
DownloadCachePeerFailedRequest(super::DownloadCachePeerFailedRequest),
#[prost(message, tag = "9")]
DownloadPieceFinishedRequest(super::DownloadPieceFinishedRequest),
#[prost(message, tag = "10")]
DownloadPieceFailedRequest(super::DownloadPieceFailedRequest),
}
}
/// EmptyCacheTaskResponse represents empty cache task response of AnnounceCachePeerResponse.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EmptyCacheTaskResponse {}
/// NormalCacheTaskResponse represents normal cache task response of AnnounceCachePeerResponse.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct NormalCacheTaskResponse {
/// Candidate parents.
#[prost(message, repeated, tag = "1")]
pub candidate_cache_parents: ::prost::alloc::vec::Vec<
super::super::common::v2::CachePeer,
>,
}
/// AnnounceCachePeerResponse represents response of AnnounceCachePeer.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnnounceCachePeerResponse {
#[prost(oneof = "announce_cache_peer_response::Response", tags = "1, 2")]
pub response: ::core::option::Option<announce_cache_peer_response::Response>,
}
/// Nested message and enum types in `AnnounceCachePeerResponse`.
pub mod announce_cache_peer_response {
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Response {
#[prost(message, tag = "1")]
EmptyTaskResponse(super::EmptyCacheTaskResponse),
#[prost(message, tag = "2")]
NormalTaskResponse(super::NormalCacheTaskResponse),
}
}
/// StatCachePeerRequest represents request of StatCachePeer.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StatCachePeerRequest {
/// Host id.
#[prost(string, tag = "1")]
pub host_id: ::prost::alloc::string::String,
/// Task id.
#[prost(string, tag = "2")]
pub task_id: ::prost::alloc::string::String,
/// Peer id.
#[prost(string, tag = "3")]
pub peer_id: ::prost::alloc::string::String,
}
/// DeleteCachePeerRequest represents request of DeleteCachePeer.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteCachePeerRequest {
/// Host id.
#[prost(string, tag = "1")]
pub host_id: ::prost::alloc::string::String,
/// Task id.
#[prost(string, tag = "2")]
pub task_id: ::prost::alloc::string::String,
/// Peer id.
#[prost(string, tag = "3")]
pub peer_id: ::prost::alloc::string::String,
}
/// StatCacheTaskRequest represents request of StatCacheTask.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StatCacheTaskRequest {
/// Task id.
#[prost(string, tag = "1")]
pub task_id: ::prost::alloc::string::String,
}
/// DeleteCacheTaskRequest represents request of DeleteCacheTask.
#[derive(serde::Serialize, serde::Deserialize)]
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteCacheTaskRequest {
/// Host id.
#[prost(string, tag = "1")]
pub host_id: ::prost::alloc::string::String,
/// Task id.
#[prost(string, tag = "2")]
pub task_id: ::prost::alloc::string::String,
}
/// Generated client implementations.
pub mod scheduler_client {
#![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)]
@ -678,6 +869,132 @@ pub mod scheduler_client {
.insert(GrpcMethod::new("scheduler.v2.Scheduler", "SyncProbes"));
self.inner.streaming(req, path, codec).await
}
/// AnnounceCachePeer announces cache peer to scheduler.
pub async fn announce_cache_peer(
&mut self,
request: impl tonic::IntoStreamingRequest<
Message = super::AnnounceCachePeerRequest,
>,
) -> std::result::Result<
tonic::Response<tonic::codec::Streaming<super::AnnounceCachePeerResponse>>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/scheduler.v2.Scheduler/AnnounceCachePeer",
);
let mut req = request.into_streaming_request();
req.extensions_mut()
.insert(GrpcMethod::new("scheduler.v2.Scheduler", "AnnounceCachePeer"));
self.inner.streaming(req, path, codec).await
}
/// Checks information of cache peer.
pub async fn stat_cache_peer(
&mut self,
request: impl tonic::IntoRequest<super::StatCachePeerRequest>,
) -> std::result::Result<
tonic::Response<super::super::super::common::v2::CachePeer>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/scheduler.v2.Scheduler/StatCachePeer",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(GrpcMethod::new("scheduler.v2.Scheduler", "StatCachePeer"));
self.inner.unary(req, path, codec).await
}
/// DeleteCachePeer releases cache peer in scheduler.
pub async fn delete_cache_peer(
&mut self,
request: impl tonic::IntoRequest<super::DeleteCachePeerRequest>,
) -> std::result::Result<tonic::Response<()>, tonic::Status> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/scheduler.v2.Scheduler/DeleteCachePeer",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(GrpcMethod::new("scheduler.v2.Scheduler", "DeleteCachePeer"));
self.inner.unary(req, path, codec).await
}
/// Checks information of cache task.
pub async fn stat_cache_task(
&mut self,
request: impl tonic::IntoRequest<super::StatCacheTaskRequest>,
) -> std::result::Result<
tonic::Response<super::super::super::common::v2::CacheTask>,
tonic::Status,
> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/scheduler.v2.Scheduler/StatCacheTask",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(GrpcMethod::new("scheduler.v2.Scheduler", "StatCacheTask"));
self.inner.unary(req, path, codec).await
}
/// DeleteCacheTask releases cache task in scheduler.
pub async fn delete_cache_task(
&mut self,
request: impl tonic::IntoRequest<super::DeleteCacheTaskRequest>,
) -> std::result::Result<tonic::Response<()>, tonic::Status> {
self.inner
.ready()
.await
.map_err(|e| {
tonic::Status::new(
tonic::Code::Unknown,
format!("Service was not ready: {}", e.into()),
)
})?;
let codec = tonic::codec::ProstCodec::default();
let path = http::uri::PathAndQuery::from_static(
"/scheduler.v2.Scheduler/DeleteCacheTask",
);
let mut req = request.into_request();
req.extensions_mut()
.insert(GrpcMethod::new("scheduler.v2.Scheduler", "DeleteCacheTask"));
self.inner.unary(req, path, codec).await
}
}
}
/// Generated server implementations.
@ -748,6 +1065,49 @@ pub mod scheduler_server {
&self,
request: tonic::Request<tonic::Streaming<super::SyncProbesRequest>>,
) -> std::result::Result<tonic::Response<Self::SyncProbesStream>, tonic::Status>;
/// Server streaming response type for the AnnounceCachePeer method.
type AnnounceCachePeerStream: futures_core::Stream<
Item = std::result::Result<
super::AnnounceCachePeerResponse,
tonic::Status,
>,
>
+ Send
+ 'static;
/// AnnounceCachePeer announces cache peer to scheduler.
async fn announce_cache_peer(
&self,
request: tonic::Request<tonic::Streaming<super::AnnounceCachePeerRequest>>,
) -> std::result::Result<
tonic::Response<Self::AnnounceCachePeerStream>,
tonic::Status,
>;
/// Checks information of cache peer.
async fn stat_cache_peer(
&self,
request: tonic::Request<super::StatCachePeerRequest>,
) -> std::result::Result<
tonic::Response<super::super::super::common::v2::CachePeer>,
tonic::Status,
>;
/// DeleteCachePeer releases cache peer in scheduler.
async fn delete_cache_peer(
&self,
request: tonic::Request<super::DeleteCachePeerRequest>,
) -> std::result::Result<tonic::Response<()>, tonic::Status>;
/// Checks information of cache task.
async fn stat_cache_task(
&self,
request: tonic::Request<super::StatCacheTaskRequest>,
) -> std::result::Result<
tonic::Response<super::super::super::common::v2::CacheTask>,
tonic::Status,
>;
/// DeleteCacheTask releases cache task in scheduler.
async fn delete_cache_task(
&self,
request: tonic::Request<super::DeleteCacheTaskRequest>,
) -> std::result::Result<tonic::Response<()>, tonic::Status>;
}
/// Scheduler RPC Service.
#[derive(Debug)]
@ -1191,6 +1551,239 @@ pub mod scheduler_server {
};
Box::pin(fut)
}
"/scheduler.v2.Scheduler/AnnounceCachePeer" => {
#[allow(non_camel_case_types)]
struct AnnounceCachePeerSvc<T: Scheduler>(pub Arc<T>);
impl<
T: Scheduler,
> tonic::server::StreamingService<super::AnnounceCachePeerRequest>
for AnnounceCachePeerSvc<T> {
type Response = super::AnnounceCachePeerResponse;
type ResponseStream = T::AnnounceCachePeerStream;
type Future = BoxFuture<
tonic::Response<Self::ResponseStream>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<
tonic::Streaming<super::AnnounceCachePeerRequest>,
>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).announce_cache_peer(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = AnnounceCachePeerSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.streaming(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/scheduler.v2.Scheduler/StatCachePeer" => {
#[allow(non_camel_case_types)]
struct StatCachePeerSvc<T: Scheduler>(pub Arc<T>);
impl<
T: Scheduler,
> tonic::server::UnaryService<super::StatCachePeerRequest>
for StatCachePeerSvc<T> {
type Response = super::super::super::common::v2::CachePeer;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::StatCachePeerRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).stat_cache_peer(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = StatCachePeerSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/scheduler.v2.Scheduler/DeleteCachePeer" => {
#[allow(non_camel_case_types)]
struct DeleteCachePeerSvc<T: Scheduler>(pub Arc<T>);
impl<
T: Scheduler,
> tonic::server::UnaryService<super::DeleteCachePeerRequest>
for DeleteCachePeerSvc<T> {
type Response = ();
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::DeleteCachePeerRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).delete_cache_peer(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = DeleteCachePeerSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/scheduler.v2.Scheduler/StatCacheTask" => {
#[allow(non_camel_case_types)]
struct StatCacheTaskSvc<T: Scheduler>(pub Arc<T>);
impl<
T: Scheduler,
> tonic::server::UnaryService<super::StatCacheTaskRequest>
for StatCacheTaskSvc<T> {
type Response = super::super::super::common::v2::CacheTask;
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::StatCacheTaskRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).stat_cache_task(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = StatCacheTaskSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
"/scheduler.v2.Scheduler/DeleteCacheTask" => {
#[allow(non_camel_case_types)]
struct DeleteCacheTaskSvc<T: Scheduler>(pub Arc<T>);
impl<
T: Scheduler,
> tonic::server::UnaryService<super::DeleteCacheTaskRequest>
for DeleteCacheTaskSvc<T> {
type Response = ();
type Future = BoxFuture<
tonic::Response<Self::Response>,
tonic::Status,
>;
fn call(
&mut self,
request: tonic::Request<super::DeleteCacheTaskRequest>,
) -> Self::Future {
let inner = Arc::clone(&self.0);
let fut = async move {
(*inner).delete_cache_task(request).await
};
Box::pin(fut)
}
}
let accept_compression_encodings = self.accept_compression_encodings;
let send_compression_encodings = self.send_compression_encodings;
let max_decoding_message_size = self.max_decoding_message_size;
let max_encoding_message_size = self.max_encoding_message_size;
let inner = self.inner.clone();
let fut = async move {
let inner = inner.0;
let method = DeleteCacheTaskSvc(inner);
let codec = tonic::codec::ProstCodec::default();
let mut grpc = tonic::server::Grpc::new(codec)
.apply_compression_config(
accept_compression_encodings,
send_compression_encodings,
)
.apply_max_message_size_config(
max_decoding_message_size,
max_encoding_message_size,
);
let res = grpc.unary(method, req).await;
Ok(res)
};
Box::pin(fut)
}
_ => {
Box::pin(async move {
Ok(