Signed-off-by: Gaius <gaius.qi@gmail.com>
Co-authored-by: Jim Ma <majinjing3@gmail.com>
This commit is contained in:
Gaius 2022-08-02 17:31:39 +08:00
parent a41a524f4c
commit d79f6405ff
No known key found for this signature in database
GPG Key ID: 8B4E5D1290FA2FFB
127 changed files with 1696 additions and 18536 deletions

View File

@ -9,6 +9,7 @@ linters-settings:
sections: sections:
- standard - standard
- default - default
- prefix(d7y.io/api)
- prefix(d7y.io/dragonfly/v2) - prefix(d7y.io/dragonfly/v2)
issues: issues:

View File

@ -375,11 +375,6 @@ generate:
@go generate ${PKG_LIST} @go generate ${PKG_LIST}
.PHONY: generate .PHONY: generate
# Generate grpc protos
protoc:
@./hack/protoc.sh
.PHONY: protoc
# Generate swagger files # Generate swagger files
swag: swag:
@swag init --parseDependency --parseInternal -g cmd/manager/main.go -o api/manager @swag init --parseDependency --parseInternal -g cmd/manager/main.go -o api/manager
@ -441,7 +436,6 @@ help:
@echo "make lint run code lint" @echo "make lint run code lint"
@echo "make markdownlint run markdown lint" @echo "make markdownlint run markdown lint"
@echo "make generate run go generate" @echo "make generate run go generate"
@echo "make protoc generate grpc protos"
@echo "make swag generate swagger api docs" @echo "make swag generate swagger api docs"
@echo "make changelog generate CHANGELOG.md" @echo "make changelog generate CHANGELOG.md"
@echo "make clean clean" @echo "make clean clean"

View File

@ -26,10 +26,11 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
managerv1 "d7y.io/api/pkg/apis/manager/v1"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
internaldynconfig "d7y.io/dragonfly/v2/internal/dynconfig" internaldynconfig "d7y.io/dragonfly/v2/internal/dynconfig"
"d7y.io/dragonfly/v2/manager/searcher" "d7y.io/dragonfly/v2/manager/searcher"
"d7y.io/dragonfly/v2/pkg/rpc/manager"
managerclient "d7y.io/dragonfly/v2/pkg/rpc/manager/client" managerclient "d7y.io/dragonfly/v2/pkg/rpc/manager/client"
) )
@ -42,16 +43,16 @@ var (
) )
type DynconfigData struct { type DynconfigData struct {
Schedulers []*manager.Scheduler Schedulers []*managerv1.Scheduler
ObjectStorage *manager.ObjectStorage ObjectStorage *managerv1.ObjectStorage
} }
type Dynconfig interface { type Dynconfig interface {
// Get the dynamic schedulers config from manager. // Get the dynamic schedulers config from manager.
GetSchedulers() ([]*manager.Scheduler, error) GetSchedulers() ([]*managerv1.Scheduler, error)
// Get the dynamic object storage config from manager. // Get the dynamic object storage config from manager.
GetObjectStorage() (*manager.ObjectStorage, error) GetObjectStorage() (*managerv1.ObjectStorage, error)
// Get the dynamic config from manager. // Get the dynamic config from manager.
Get() (*DynconfigData, error) Get() (*DynconfigData, error)
@ -104,7 +105,7 @@ func NewDynconfig(rawManagerClient managerclient.Client, cacheDir string, hostOp
}, nil }, nil
} }
func (d *dynconfig) GetSchedulers() ([]*manager.Scheduler, error) { func (d *dynconfig) GetSchedulers() ([]*managerv1.Scheduler, error) {
data, err := d.Get() data, err := d.Get()
if err != nil { if err != nil {
return nil, err return nil, err
@ -113,7 +114,7 @@ func (d *dynconfig) GetSchedulers() ([]*manager.Scheduler, error) {
return data.Schedulers, nil return data.Schedulers, nil
} }
func (d *dynconfig) GetObjectStorage() (*manager.ObjectStorage, error) { func (d *dynconfig) GetObjectStorage() (*managerv1.ObjectStorage, error) {
data, err := d.Get() data, err := d.Get()
if err != nil { if err != nil {
return nil, err return nil, err
@ -200,8 +201,8 @@ func newManagerClient(client managerclient.Client, hostOption HostOption) intern
} }
func (mc *managerClient) Get() (any, error) { func (mc *managerClient) Get() (any, error) {
listSchedulersResp, err := mc.ListSchedulers(&manager.ListSchedulersRequest{ listSchedulersResp, err := mc.ListSchedulers(&managerv1.ListSchedulersRequest{
SourceType: manager.SourceType_PEER_SOURCE, SourceType: managerv1.SourceType_PEER_SOURCE,
HostName: mc.hostOption.Hostname, HostName: mc.hostOption.Hostname,
Ip: mc.hostOption.AdvertiseIP, Ip: mc.hostOption.AdvertiseIP,
HostInfo: map[string]string{ HostInfo: map[string]string{
@ -215,8 +216,8 @@ func (mc *managerClient) Get() (any, error) {
return nil, err return nil, err
} }
getObjectStorageResp, err := mc.GetObjectStorage(&manager.GetObjectStorageRequest{ getObjectStorageResp, err := mc.GetObjectStorage(&managerv1.GetObjectStorageRequest{
SourceType: manager.SourceType_PEER_SOURCE, SourceType: managerv1.SourceType_PEER_SOURCE,
HostName: mc.hostOption.Hostname, HostName: mc.hostOption.Hostname,
Ip: mc.hostOption.AdvertiseIP, Ip: mc.hostOption.AdvertiseIP,
}) })

View File

@ -28,7 +28,8 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"d7y.io/dragonfly/v2/pkg/rpc/manager" managerv1 "d7y.io/api/pkg/apis/manager/v1"
"d7y.io/dragonfly/v2/pkg/rpc/manager/client/mocks" "d7y.io/dragonfly/v2/pkg/rpc/manager/client/mocks"
) )
@ -57,8 +58,8 @@ func TestDynconfigNewDynconfig(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder) { mock: func(m *mocks.MockClientMockRecorder) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
) )
}, },
expect: func(t *testing.T, err error) { expect: func(t *testing.T, err error) {
@ -77,8 +78,8 @@ func TestDynconfigNewDynconfig(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder) { mock: func(m *mocks.MockClientMockRecorder) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
) )
}, },
expect: func(t *testing.T, err error) { expect: func(t *testing.T, err error) {
@ -106,7 +107,7 @@ func TestDynconfigNewDynconfig(t *testing.T) {
cleanFileCache: func(t *testing.T) {}, cleanFileCache: func(t *testing.T) {},
mock: func(m *mocks.MockClientMockRecorder) { mock: func(m *mocks.MockClientMockRecorder) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(nil, errors.New("foo")).Times(1), m.GetObjectStorage(gomock.Any()).Return(nil, errors.New("foo")).Times(1),
) )
}, },
@ -126,7 +127,7 @@ func TestDynconfigNewDynconfig(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder) { mock: func(m *mocks.MockClientMockRecorder) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(nil, status.Error(codes.NotFound, "")).Times(1), m.GetObjectStorage(gomock.Any()).Return(nil, status.Error(codes.NotFound, "")).Times(1),
) )
}, },
@ -185,12 +186,12 @@ func TestDynconfigGet(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: "foo", HostName: "foo",
}, },
}, },
ObjectStorage: &manager.ObjectStorage{ ObjectStorage: &managerv1.ObjectStorage{
Name: "foo", Name: "foo",
}, },
}, },
@ -202,14 +203,14 @@ func TestDynconfigGet(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: data.Schedulers[0].HostName, HostName: data.Schedulers[0].HostName,
}, },
}, },
}, nil).Times(1), }, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{
Name: data.ObjectStorage.Name, Name: data.ObjectStorage.Name,
}, nil).Times(1), }, nil).Times(1),
) )
@ -228,12 +229,12 @@ func TestDynconfigGet(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: "foo", HostName: "foo",
}, },
}, },
ObjectStorage: &manager.ObjectStorage{ ObjectStorage: &managerv1.ObjectStorage{
Name: "foo", Name: "foo",
}, },
}, },
@ -247,16 +248,16 @@ func TestDynconfigGet(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: data.Schedulers[0].HostName, HostName: data.Schedulers[0].HostName,
}, },
}, },
}, nil).Times(1), }, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{
Name: data.ObjectStorage.Name, Name: data.ObjectStorage.Name,
}, nil).Times(1), }, nil).Times(1),
) )
@ -275,12 +276,12 @@ func TestDynconfigGet(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: "foo", HostName: "foo",
}, },
}, },
ObjectStorage: &manager.ObjectStorage{ ObjectStorage: &managerv1.ObjectStorage{
Name: "foo", Name: "foo",
}, },
}, },
@ -294,14 +295,14 @@ func TestDynconfigGet(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: data.Schedulers[0].HostName, HostName: data.Schedulers[0].HostName,
}, },
}, },
}, nil).Times(1), }, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{
Name: data.ObjectStorage.Name, Name: data.ObjectStorage.Name,
}, nil).Times(1), }, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(nil, errors.New("foo")).Times(1), m.ListSchedulers(gomock.Any()).Return(nil, errors.New("foo")).Times(1),
@ -321,12 +322,12 @@ func TestDynconfigGet(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: "foo", HostName: "foo",
}, },
}, },
ObjectStorage: &manager.ObjectStorage{ ObjectStorage: &managerv1.ObjectStorage{
Name: "foo", Name: "foo",
}, },
}, },
@ -340,17 +341,17 @@ func TestDynconfigGet(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: data.Schedulers[0].HostName, HostName: data.Schedulers[0].HostName,
}, },
}, },
}, nil).Times(1), }, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{
Name: data.ObjectStorage.Name, Name: data.ObjectStorage.Name,
}, nil).Times(1), }, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(nil, errors.New("foo")).Times(1), m.GetObjectStorage(gomock.Any()).Return(nil, errors.New("foo")).Times(1),
) )
}, },
@ -368,12 +369,12 @@ func TestDynconfigGet(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: "foo", HostName: "foo",
}, },
}, },
ObjectStorage: &manager.ObjectStorage{ ObjectStorage: &managerv1.ObjectStorage{
Name: "foo", Name: "foo",
}, },
}, },
@ -387,18 +388,18 @@ func TestDynconfigGet(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: data.Schedulers[0].HostName, HostName: data.Schedulers[0].HostName,
}, },
}, },
}, nil).Times(1), }, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{
Name: data.ObjectStorage.Name, Name: data.ObjectStorage.Name,
}, nil).Times(1), }, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: data.Schedulers[0].HostName, HostName: data.Schedulers[0].HostName,
}, },
@ -412,7 +413,7 @@ func TestDynconfigGet(t *testing.T) {
result, err := dynconfig.Get() result, err := dynconfig.Get()
assert.NoError(err) assert.NoError(err)
assert.EqualValues(result, &DynconfigData{ assert.EqualValues(result, &DynconfigData{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: data.Schedulers[0].HostName, HostName: data.Schedulers[0].HostName,
}, },
@ -427,8 +428,8 @@ func TestDynconfigGet(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
Schedulers: []*manager.Scheduler(nil), Schedulers: []*managerv1.Scheduler(nil),
ObjectStorage: &manager.ObjectStorage{}, ObjectStorage: &managerv1.ObjectStorage{},
}, },
sleep: func() { sleep: func() {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
@ -440,10 +441,10 @@ func TestDynconfigGet(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
) )
}, },
expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) { expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) {
@ -495,7 +496,7 @@ func TestDynconfigGetSchedulers(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: "foo", HostName: "foo",
}, },
@ -509,14 +510,14 @@ func TestDynconfigGetSchedulers(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: data.Schedulers[0].HostName, HostName: data.Schedulers[0].HostName,
}, },
}, },
}, nil).Times(1), }, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
) )
}, },
expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) { expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) {
@ -533,7 +534,7 @@ func TestDynconfigGetSchedulers(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: "foo", HostName: "foo",
}, },
@ -549,16 +550,16 @@ func TestDynconfigGetSchedulers(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: data.Schedulers[0].HostName, HostName: data.Schedulers[0].HostName,
}, },
}, },
}, nil).Times(1), }, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
) )
}, },
expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) { expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) {
@ -575,7 +576,7 @@ func TestDynconfigGetSchedulers(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: "foo", HostName: "foo",
}, },
@ -591,14 +592,14 @@ func TestDynconfigGetSchedulers(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{ m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{
Schedulers: []*manager.Scheduler{ Schedulers: []*managerv1.Scheduler{
{ {
HostName: data.Schedulers[0].HostName, HostName: data.Schedulers[0].HostName,
}, },
}, },
}, nil).Times(1), }, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(nil, errors.New("foo")).Times(1), m.ListSchedulers(gomock.Any()).Return(nil, errors.New("foo")).Times(1),
) )
}, },
@ -616,7 +617,7 @@ func TestDynconfigGetSchedulers(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
Schedulers: []*manager.Scheduler(nil), Schedulers: []*managerv1.Scheduler(nil),
}, },
sleep: func() { sleep: func() {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
@ -628,10 +629,10 @@ func TestDynconfigGetSchedulers(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
) )
}, },
expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) { expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) {
@ -683,7 +684,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
ObjectStorage: &manager.ObjectStorage{ ObjectStorage: &managerv1.ObjectStorage{
Name: "foo", Name: "foo",
}, },
}, },
@ -695,8 +696,8 @@ func TestDynconfigGetObjectStorage(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{
Name: data.ObjectStorage.Name, Name: data.ObjectStorage.Name,
}, nil).Times(1), }, nil).Times(1),
) )
@ -715,7 +716,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
ObjectStorage: &manager.ObjectStorage{ ObjectStorage: &managerv1.ObjectStorage{
Name: "foo", Name: "foo",
}, },
}, },
@ -729,10 +730,10 @@ func TestDynconfigGetObjectStorage(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{
Name: data.ObjectStorage.Name, Name: data.ObjectStorage.Name,
}, nil).Times(1), }, nil).Times(1),
) )
@ -751,7 +752,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
ObjectStorage: &manager.ObjectStorage{ ObjectStorage: &managerv1.ObjectStorage{
Name: "foo", Name: "foo",
}, },
}, },
@ -765,11 +766,11 @@ func TestDynconfigGetObjectStorage(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{
Name: data.ObjectStorage.Name, Name: data.ObjectStorage.Name,
}, nil).Times(1), }, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(nil, errors.New("foo")).Times(1), m.GetObjectStorage(gomock.Any()).Return(nil, errors.New("foo")).Times(1),
) )
}, },
@ -787,7 +788,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
ObjectStorage: &manager.ObjectStorage{ ObjectStorage: &managerv1.ObjectStorage{
Name: "foo", Name: "foo",
}, },
}, },
@ -801,11 +802,11 @@ func TestDynconfigGetObjectStorage(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{ m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{
Name: data.ObjectStorage.Name, Name: data.ObjectStorage.Name,
}, nil).Times(1), }, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(nil, status.Error(codes.NotFound, "")).Times(1), m.GetObjectStorage(gomock.Any()).Return(nil, status.Error(codes.NotFound, "")).Times(1),
) )
}, },
@ -813,7 +814,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
result, err := dynconfig.GetObjectStorage() result, err := dynconfig.GetObjectStorage()
assert.NoError(err) assert.NoError(err)
assert.EqualValues(result, (*manager.ObjectStorage)(nil)) assert.EqualValues(result, (*managerv1.ObjectStorage)(nil))
}, },
}, },
{ {
@ -823,7 +824,7 @@ func TestDynconfigGetObjectStorage(t *testing.T) {
Hostname: "foo", Hostname: "foo",
}, },
data: &DynconfigData{ data: &DynconfigData{
ObjectStorage: &manager.ObjectStorage{}, ObjectStorage: &managerv1.ObjectStorage{},
}, },
sleep: func() { sleep: func() {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
@ -835,10 +836,10 @@ func TestDynconfigGetObjectStorage(t *testing.T) {
}, },
mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) { mock: func(m *mocks.MockClientMockRecorder, data *DynconfigData) {
gomock.InOrder( gomock.InOrder(
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
m.ListSchedulers(gomock.Any()).Return(&manager.ListSchedulersResponse{}, nil).Times(1), m.ListSchedulers(gomock.Any()).Return(&managerv1.ListSchedulersResponse{}, nil).Times(1),
m.GetObjectStorage(gomock.Any()).Return(&manager.ObjectStorage{}, nil).Times(1), m.GetObjectStorage(gomock.Any()).Return(&managerv1.ObjectStorage{}, nil).Times(1),
) )
}, },
expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) { expect: func(t *testing.T, dynconfig Dynconfig, data *DynconfigData) {

View File

@ -7,8 +7,8 @@ package mocks
import ( import (
reflect "reflect" reflect "reflect"
v1 "d7y.io/api/pkg/apis/manager/v1"
config "d7y.io/dragonfly/v2/client/config" config "d7y.io/dragonfly/v2/client/config"
manager "d7y.io/dragonfly/v2/pkg/rpc/manager"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
) )
@ -63,10 +63,10 @@ func (mr *MockDynconfigMockRecorder) Get() *gomock.Call {
} }
// GetObjectStorage mocks base method. // GetObjectStorage mocks base method.
func (m *MockDynconfig) GetObjectStorage() (*manager.ObjectStorage, error) { func (m *MockDynconfig) GetObjectStorage() (*v1.ObjectStorage, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetObjectStorage") ret := m.ctrl.Call(m, "GetObjectStorage")
ret0, _ := ret[0].(*manager.ObjectStorage) ret0, _ := ret[0].(*v1.ObjectStorage)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -78,10 +78,10 @@ func (mr *MockDynconfigMockRecorder) GetObjectStorage() *gomock.Call {
} }
// GetSchedulers mocks base method. // GetSchedulers mocks base method.
func (m *MockDynconfig) GetSchedulers() ([]*manager.Scheduler, error) { func (m *MockDynconfig) GetSchedulers() ([]*v1.Scheduler, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSchedulers") ret := m.ctrl.Call(m, "GetSchedulers")
ret0, _ := ret[0].([]*manager.Scheduler) ret0, _ := ret[0].([]*v1.Scheduler)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }

View File

@ -33,12 +33,13 @@ import (
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
"d7y.io/dragonfly/v2/cmd/dependency/base" "d7y.io/dragonfly/v2/cmd/dependency/base"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
netip "d7y.io/dragonfly/v2/pkg/net/ip" netip "d7y.io/dragonfly/v2/pkg/net/ip"
rpcbase "d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/unit"
) )
@ -156,18 +157,18 @@ func (p *DaemonOption) Validate() error {
return nil return nil
} }
func ConvertPattern(p string, defaultPattern rpcbase.Pattern) rpcbase.Pattern { func ConvertPattern(p string, defaultPattern commonv1.Pattern) commonv1.Pattern {
switch p { switch p {
case PatternP2P: case PatternP2P:
return rpcbase.Pattern_P2P return commonv1.Pattern_P2P
case PatternSeedPeer: case PatternSeedPeer:
return rpcbase.Pattern_SEED_PEER return commonv1.Pattern_SEED_PEER
case PatternSource: case PatternSource:
return rpcbase.Pattern_SOURCE return commonv1.Pattern_SOURCE
case "": case "":
return defaultPattern return defaultPattern
} }
logger.Warnf("unknown pattern, use default pattern: %s", rpcbase.Pattern_name[int32(defaultPattern)]) logger.Warnf("unknown pattern, use default pattern: %s", commonv1.Pattern_name[int32(defaultPattern)])
return defaultPattern return defaultPattern
} }

View File

@ -37,6 +37,10 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
commonv1 "d7y.io/api/pkg/apis/common/v1"
managerv1 "d7y.io/api/pkg/apis/manager/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/gc" "d7y.io/dragonfly/v2/client/daemon/gc"
"d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/metrics"
@ -54,10 +58,7 @@ import (
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/reachable" "d7y.io/dragonfly/v2/pkg/reachable"
"d7y.io/dragonfly/v2/pkg/rpc" "d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/manager"
managerclient "d7y.io/dragonfly/v2/pkg/rpc/manager/client" managerclient "d7y.io/dragonfly/v2/pkg/rpc/manager/client"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
) )
@ -68,15 +69,15 @@ type Daemon interface {
// ExportTaskManager returns the underlay peer.TaskManager for downloading when embed dragonfly in custom binary // ExportTaskManager returns the underlay peer.TaskManager for downloading when embed dragonfly in custom binary
ExportTaskManager() peer.TaskManager ExportTaskManager() peer.TaskManager
// ExportPeerHost returns the underlay scheduler.PeerHost for scheduling // ExportPeerHost returns the underlay schedulerv1.PeerHost for scheduling
ExportPeerHost() *scheduler.PeerHost ExportPeerHost() *schedulerv1.PeerHost
} }
type clientDaemon struct { type clientDaemon struct {
once *sync.Once once *sync.Once
done chan bool done chan bool
schedPeerHost *scheduler.PeerHost schedPeerHost *schedulerv1.PeerHost
Option config.DaemonOption Option config.DaemonOption
@ -92,7 +93,7 @@ type clientDaemon struct {
dynconfig config.Dynconfig dynconfig config.Dynconfig
dfpath dfpath.Dfpath dfpath dfpath.Dfpath
schedulers []*manager.Scheduler schedulers []*managerv1.Scheduler
managerClient managerclient.Client managerClient managerclient.Client
schedulerClient schedulerclient.Client schedulerClient schedulerclient.Client
} }
@ -101,7 +102,7 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) {
// update plugin directory // update plugin directory
source.UpdatePluginDir(d.PluginDir()) source.UpdatePluginDir(d.PluginDir())
host := &scheduler.PeerHost{ host := &schedulerv1.PeerHost{
Id: idgen.HostID(opt.Host.Hostname, int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start)), Id: idgen.HostID(opt.Host.Hostname, int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start)),
Ip: opt.Host.AdvertiseIP, Ip: opt.Host.AdvertiseIP,
RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start), RpcPort: int32(opt.Download.PeerGRPC.TCPListen.PortRange.Start),
@ -115,10 +116,10 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) {
var ( var (
addrs []dfnet.NetAddr addrs []dfnet.NetAddr
schedulers []*manager.Scheduler schedulers []*managerv1.Scheduler
dynconfig config.Dynconfig dynconfig config.Dynconfig
managerClient managerclient.Client managerClient managerclient.Client
defaultPattern = config.ConvertPattern(opt.Download.DefaultPattern, base.Pattern_P2P) defaultPattern = config.ConvertPattern(opt.Download.DefaultPattern, commonv1.Pattern_P2P)
) )
if opt.Scheduler.Manager.Enable { if opt.Scheduler.Manager.Enable {
@ -162,7 +163,7 @@ func New(opt *config.DaemonOption, d dfpath.Dfpath) (Daemon, error) {
// Storage.Option.DataPath is same with Daemon DataDir // Storage.Option.DataPath is same with Daemon DataDir
opt.Storage.DataPath = d.DataDir() opt.Storage.DataPath = d.DataDir()
gcCallback := func(request storage.CommonTaskRequest) { gcCallback := func(request storage.CommonTaskRequest) {
er := sched.LeaveTask(context.Background(), &scheduler.PeerTarget{ er := sched.LeaveTask(context.Background(), &schedulerv1.PeerTarget{
TaskId: request.TaskID, TaskId: request.TaskID,
PeerId: request.PeerID, PeerId: request.PeerID,
}) })
@ -524,8 +525,8 @@ func (cd *clientDaemon) Serve() error {
g.Go(func() error { g.Go(func() error {
logger.Info("keepalive to manager") logger.Info("keepalive to manager")
cd.managerClient.KeepAlive(cd.Option.Scheduler.Manager.SeedPeer.KeepAlive.Interval, &manager.KeepAliveRequest{ cd.managerClient.KeepAlive(cd.Option.Scheduler.Manager.SeedPeer.KeepAlive.Interval, &managerv1.KeepAliveRequest{
SourceType: manager.SourceType_SEED_PEER_SOURCE, SourceType: managerv1.SourceType_SEED_PEER_SOURCE,
HostName: cd.Option.Host.Hostname, HostName: cd.Option.Host.Hostname,
Ip: cd.Option.Host.AdvertiseIP, Ip: cd.Option.Host.AdvertiseIP,
ClusterId: uint64(cd.Option.Scheduler.Manager.SeedPeer.ClusterID), ClusterId: uint64(cd.Option.Scheduler.Manager.SeedPeer.ClusterID),
@ -564,7 +565,7 @@ func (cd *clientDaemon) Serve() error {
} }
// serve dynconfig service // serve dynconfig service
if cd.dynconfig != nil { if cd.Option.Scheduler.Manager.Enable {
// dynconfig register client daemon // dynconfig register client daemon
cd.dynconfig.Register(cd) cd.dynconfig.Register(cd)
@ -662,7 +663,7 @@ func (cd *clientDaemon) Stop() {
cd.StorageManager.CleanUp() cd.StorageManager.CleanUp()
} }
if cd.dynconfig != nil { if cd.Option.Scheduler.Manager.Enable {
if err := cd.dynconfig.Stop(); err != nil { if err := cd.dynconfig.Stop(); err != nil {
logger.Errorf("dynconfig client closed failed %s", err) logger.Errorf("dynconfig client closed failed %s", err)
} }
@ -697,7 +698,7 @@ func (cd *clientDaemon) OnNotify(data *config.DynconfigData) {
} }
// getSchedulerIPs gets ips by schedulers. // getSchedulerIPs gets ips by schedulers.
func getSchedulerIPs(schedulers []*manager.Scheduler) []string { func getSchedulerIPs(schedulers []*managerv1.Scheduler) []string {
ips := []string{} ips := []string{}
for _, scheduler := range schedulers { for _, scheduler := range schedulers {
ips = append(ips, scheduler.Ip) ips = append(ips, scheduler.Ip)
@ -706,8 +707,8 @@ func getSchedulerIPs(schedulers []*manager.Scheduler) []string {
return ips return ips
} }
// schedulersToAvailableNetAddrs coverts []*manager.Scheduler to available []dfnet.NetAddr. // schedulersToAvailableNetAddrs coverts []*managerv1.Scheduler to available []dfnet.NetAddr.
func schedulersToAvailableNetAddrs(schedulers []*manager.Scheduler) []dfnet.NetAddr { func schedulersToAvailableNetAddrs(schedulers []*managerv1.Scheduler) []dfnet.NetAddr {
var schedulerClusterID uint64 var schedulerClusterID uint64
netAddrs := make([]dfnet.NetAddr, 0, len(schedulers)) netAddrs := make([]dfnet.NetAddr, 0, len(schedulers))
for _, scheduler := range schedulers { for _, scheduler := range schedulers {
@ -752,8 +753,8 @@ func (cd *clientDaemon) announceSeedPeer() error {
objectStoragePort = int32(cd.Option.ObjectStorage.TCPListen.PortRange.Start) objectStoragePort = int32(cd.Option.ObjectStorage.TCPListen.PortRange.Start)
} }
if _, err := cd.managerClient.UpdateSeedPeer(&manager.UpdateSeedPeerRequest{ if _, err := cd.managerClient.UpdateSeedPeer(&managerv1.UpdateSeedPeerRequest{
SourceType: manager.SourceType_SEED_PEER_SOURCE, SourceType: managerv1.SourceType_SEED_PEER_SOURCE,
HostName: cd.Option.Host.Hostname, HostName: cd.Option.Host.Hostname,
Type: cd.Option.Scheduler.Manager.SeedPeer.Type, Type: cd.Option.Scheduler.Manager.SeedPeer.Type,
Idc: cd.Option.Host.IDC, Idc: cd.Option.Host.IDC,
@ -775,6 +776,6 @@ func (cd *clientDaemon) ExportTaskManager() peer.TaskManager {
return cd.PeerTaskManager return cd.PeerTaskManager
} }
func (cd *clientDaemon) ExportPeerHost() *scheduler.PeerHost { func (cd *clientDaemon) ExportPeerHost() *schedulerv1.PeerHost {
return cd.schedPeerHost return cd.schedPeerHost
} }

View File

@ -22,8 +22,9 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
managerv1 "d7y.io/api/pkg/apis/manager/v1"
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc/manager"
) )
func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) { func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) {
@ -35,12 +36,12 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
schedulers []*manager.Scheduler schedulers []*managerv1.Scheduler
expect func(t *testing.T, addrs []dfnet.NetAddr) expect func(t *testing.T, addrs []dfnet.NetAddr)
}{ }{
{ {
name: "available ip", name: "available ip",
schedulers: []*manager.Scheduler{ schedulers: []*managerv1.Scheduler{
{ {
Ip: "127.0.0.1", Ip: "127.0.0.1",
Port: int32(3000), Port: int32(3000),
@ -59,7 +60,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) {
}, },
{ {
name: "available host", name: "available host",
schedulers: []*manager.Scheduler{ schedulers: []*managerv1.Scheduler{
{ {
Ip: "foo", Ip: "foo",
HostName: "localhost", HostName: "localhost",
@ -80,7 +81,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) {
}, },
{ {
name: "available ip and host", name: "available ip and host",
schedulers: []*manager.Scheduler{ schedulers: []*managerv1.Scheduler{
{ {
Ip: "foo", Ip: "foo",
HostName: "localhost", HostName: "localhost",
@ -122,7 +123,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) {
}, },
{ {
name: "unreachable", name: "unreachable",
schedulers: []*manager.Scheduler{ schedulers: []*managerv1.Scheduler{
{ {
Ip: "foo", Ip: "foo",
HostName: "localhost", HostName: "localhost",
@ -143,7 +144,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) {
}, },
{ {
name: "empty schedulers", name: "empty schedulers",
schedulers: []*manager.Scheduler{}, schedulers: []*managerv1.Scheduler{},
expect: func(t *testing.T, addrs []dfnet.NetAddr) { expect: func(t *testing.T, addrs []dfnet.NetAddr) {
assert := assert.New(t) assert := assert.New(t)
assert.EqualValues(addrs, []dfnet.NetAddr{}) assert.EqualValues(addrs, []dfnet.NetAddr{})
@ -151,7 +152,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) {
}, },
{ {
name: "available ip with different scheduler cluster", name: "available ip with different scheduler cluster",
schedulers: []*manager.Scheduler{ schedulers: []*managerv1.Scheduler{
{ {
Ip: "127.0.0.1", Ip: "127.0.0.1",
HostName: "foo", HostName: "foo",
@ -181,7 +182,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) {
}, },
{ {
name: "available host with different scheduler cluster", name: "available host with different scheduler cluster",
schedulers: []*manager.Scheduler{ schedulers: []*managerv1.Scheduler{
{ {
Ip: "127.0.0.1", Ip: "127.0.0.1",
HostName: "foo", HostName: "foo",
@ -211,7 +212,7 @@ func TestDaemonSchedulersToAvailableNetAddrs(t *testing.T) {
}, },
{ {
name: "available host and ip with different scheduler cluster", name: "available host and ip with different scheduler cluster",
schedulers: []*manager.Scheduler{ schedulers: []*managerv1.Scheduler{
{ {
Ip: "foo", Ip: "foo",
HostName: "localhost", HostName: "localhost",

View File

@ -40,6 +40,8 @@ import (
ginprometheus "github.com/mcuadros/go-gin-prometheus" ginprometheus "github.com/mcuadros/go-gin-prometheus"
"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin" "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/peer"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
@ -48,7 +50,6 @@ import (
"d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/objectstorage" "d7y.io/dragonfly/v2/pkg/objectstorage"
"d7y.io/dragonfly/v2/pkg/rpc/base"
pkgstrings "d7y.io/dragonfly/v2/pkg/strings" pkgstrings "d7y.io/dragonfly/v2/pkg/strings"
) )
@ -251,7 +252,7 @@ func (o *objectStorage) getObject(ctx *gin.Context) {
) )
// Initialize filter field. // Initialize filter field.
urlMeta := &base.UrlMeta{Filter: o.config.ObjectStorage.Filter} urlMeta := &commonv1.UrlMeta{Filter: o.config.ObjectStorage.Filter}
if filter != "" { if filter != "" {
urlMeta.Filter = filter urlMeta.Filter = filter
} }
@ -387,7 +388,7 @@ func (o *objectStorage) putObject(ctx *gin.Context) {
} }
// Initialize url meta. // Initialize url meta.
urlMeta := &base.UrlMeta{Filter: o.config.ObjectStorage.Filter} urlMeta := &commonv1.UrlMeta{Filter: o.config.ObjectStorage.Filter}
dgst := o.md5FromFileHeader(fileHeader) dgst := o.md5FromFileHeader(fileHeader)
urlMeta.Digest = dgst.String() urlMeta.Digest = dgst.String()
if filter != "" { if filter != "" {
@ -419,7 +420,7 @@ func (o *objectStorage) putObject(ctx *gin.Context) {
if err := o.peerTaskManager.AnnouncePeerTask(ctx, storage.PeerTaskMetadata{ if err := o.peerTaskManager.AnnouncePeerTask(ctx, storage.PeerTaskMetadata{
TaskID: taskID, TaskID: taskID,
PeerID: peerID, PeerID: peerID,
}, signURL, base.TaskType_DfStore, urlMeta); err != nil { }, signURL, commonv1.TaskType_DfStore, urlMeta); err != nil {
log.Error(err) log.Error(err)
ctx.JSON(http.StatusInternalServerError, gin.H{"errors": err.Error()}) ctx.JSON(http.StatusInternalServerError, gin.H{"errors": err.Error()})
return return

View File

@ -33,6 +33,10 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
commonv1 "d7y.io/api/pkg/apis/common/v1"
errordetailsv1 "d7y.io/api/pkg/apis/errordetails/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/metrics"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
@ -41,9 +45,6 @@ import (
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/errordetails"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
) )
@ -76,9 +77,9 @@ type peerTaskConductor struct {
pieceDownloadCancel context.CancelFunc pieceDownloadCancel context.CancelFunc
// host info about current host // host info about current host
host *scheduler.PeerHost host *schedulerv1.PeerHost
// request is the original PeerTaskRequest // request is the original PeerTaskRequest
request *scheduler.PeerTaskRequest request *schedulerv1.PeerTaskRequest
// needBackSource indicates downloading resource from instead of other peers // needBackSource indicates downloading resource from instead of other peers
needBackSource *atomic.Bool needBackSource *atomic.Bool
@ -107,15 +108,15 @@ type peerTaskConductor struct {
broker *pieceBroker broker *pieceBroker
sizeScope base.SizeScope sizeScope commonv1.SizeScope
singlePiece *scheduler.SinglePiece singlePiece *schedulerv1.SinglePiece
tinyData *TinyData tinyData *TinyData
// peerPacketStream stands schedulerclient.PeerPacketStream from scheduler // peerPacketStream stands schedulerclient.PeerPacketStream from scheduler
peerPacketStream scheduler.Scheduler_ReportPieceResultClient peerPacketStream schedulerv1.Scheduler_ReportPieceResultClient
// peerPacket is the latest available peers from peerPacketCh // peerPacket is the latest available peers from peerPacketCh
// Deprecated: remove in future release // Deprecated: remove in future release
peerPacket atomic.Value // *scheduler.PeerPacket peerPacket atomic.Value // *schedulerv1.PeerPacket
legacyPeerCount *atomic.Int64 legacyPeerCount *atomic.Int64
// peerPacketReady will receive a ready signal for peerPacket ready // peerPacketReady will receive a ready signal for peerPacket ready
peerPacketReady chan bool peerPacketReady chan bool
@ -141,7 +142,7 @@ type peerTaskConductor struct {
// failedReason will be set when peer task failed // failedReason will be set when peer task failed
failedReason string failedReason string
// failedReason will be set when peer task failed // failedReason will be set when peer task failed
failedCode base.Code failedCode commonv1.Code
// readyPieces stands all downloaded pieces // readyPieces stands all downloaded pieces
readyPieces *Bitmap readyPieces *Bitmap
@ -171,7 +172,7 @@ type peerTaskConductor struct {
func (ptm *peerTaskManager) newPeerTaskConductor( func (ptm *peerTaskManager) newPeerTaskConductor(
ctx context.Context, ctx context.Context,
request *scheduler.PeerTaskRequest, request *schedulerv1.PeerTaskRequest,
limit rate.Limit, limit rate.Limit,
parent *peerTaskConductor, parent *peerTaskConductor,
rg *util.Range, rg *util.Range,
@ -229,7 +230,7 @@ func (ptm *peerTaskManager) newPeerTaskConductor(
requestedPieces: NewBitmap(), requestedPieces: NewBitmap(),
failedPieceCh: make(chan int32, config.DefaultPieceChanSize), failedPieceCh: make(chan int32, config.DefaultPieceChanSize),
failedReason: failedReasonNotSet, failedReason: failedReasonNotSet,
failedCode: base.Code_UnknownError, failedCode: commonv1.Code_UnknownError,
contentLength: atomic.NewInt64(-1), contentLength: atomic.NewInt64(-1),
totalPiece: atomic.NewInt32(-1), totalPiece: atomic.NewInt32(-1),
digest: atomic.NewString(""), digest: atomic.NewString(""),
@ -265,8 +266,8 @@ func (pt *peerTaskConductor) register() error {
var ( var (
needBackSource bool needBackSource bool
sizeScope base.SizeScope sizeScope commonv1.SizeScope
singlePiece *scheduler.SinglePiece singlePiece *schedulerv1.SinglePiece
tinyData *TinyData tinyData *TinyData
) )
@ -287,35 +288,35 @@ func (pt *peerTaskConductor) register() error {
pt.peerPacketStream = &dummyPeerPacketStream{} pt.peerPacketStream = &dummyPeerPacketStream{}
pt.Errorf("register peer task failed: %s, peer id: %s, auto back source disabled", err, pt.request.PeerId) pt.Errorf("register peer task failed: %s, peer id: %s, auto back source disabled", err, pt.request.PeerId)
pt.span.RecordError(err) pt.span.RecordError(err)
pt.cancel(base.Code_SchedError, err.Error()) pt.cancel(commonv1.Code_SchedError, err.Error())
return err return err
} }
needBackSource = true needBackSource = true
// can not detect source or scheduler error, create a new dummy scheduler client // can not detect source or scheduler error, create a new dummy scheduler client
pt.schedulerClient = &dummySchedulerClient{} pt.schedulerClient = &dummySchedulerClient{}
result = &scheduler.RegisterResult{TaskId: pt.taskID} result = &schedulerv1.RegisterResult{TaskId: pt.taskID}
pt.Warnf("register peer task failed: %s, peer id: %s, try to back source", err, pt.request.PeerId) pt.Warnf("register peer task failed: %s, peer id: %s, try to back source", err, pt.request.PeerId)
} else { } else {
pt.Infof("register task success, SizeScope: %s", base.SizeScope_name[int32(result.SizeScope)]) pt.Infof("register task success, SizeScope: %s", commonv1.SizeScope_name[int32(result.SizeScope)])
} }
var header map[string]string var header map[string]string
if !needBackSource { if !needBackSource {
sizeScope = result.SizeScope sizeScope = result.SizeScope
switch result.SizeScope { switch result.SizeScope {
case base.SizeScope_NORMAL: case commonv1.SizeScope_NORMAL:
pt.span.SetAttributes(config.AttributePeerTaskSizeScope.String("normal")) pt.span.SetAttributes(config.AttributePeerTaskSizeScope.String("normal"))
case base.SizeScope_SMALL: case commonv1.SizeScope_SMALL:
pt.span.SetAttributes(config.AttributePeerTaskSizeScope.String("small")) pt.span.SetAttributes(config.AttributePeerTaskSizeScope.String("small"))
if piece, ok := result.DirectPiece.(*scheduler.RegisterResult_SinglePiece); ok { if piece, ok := result.DirectPiece.(*schedulerv1.RegisterResult_SinglePiece); ok {
singlePiece = piece.SinglePiece singlePiece = piece.SinglePiece
} }
if result.ExtendAttribute != nil { if result.ExtendAttribute != nil {
header = result.ExtendAttribute.Header header = result.ExtendAttribute.Header
} }
case base.SizeScope_TINY: case commonv1.SizeScope_TINY:
pt.span.SetAttributes(config.AttributePeerTaskSizeScope.String("tiny")) pt.span.SetAttributes(config.AttributePeerTaskSizeScope.String("tiny"))
if piece, ok := result.DirectPiece.(*scheduler.RegisterResult_PieceContent); ok { if piece, ok := result.DirectPiece.(*schedulerv1.RegisterResult_PieceContent); ok {
tinyData = &TinyData{ tinyData = &TinyData{
TaskID: result.TaskId, TaskID: result.TaskId,
PeerID: pt.request.PeerId, PeerID: pt.request.PeerId,
@ -327,7 +328,7 @@ func (pt *peerTaskConductor) register() error {
pt.peerPacketStream = &dummyPeerPacketStream{} pt.peerPacketStream = &dummyPeerPacketStream{}
pt.span.RecordError(err) pt.span.RecordError(err)
pt.Errorf("%s", err) pt.Errorf("%s", err)
pt.cancel(base.Code_SchedError, err.Error()) pt.cancel(commonv1.Code_SchedError, err.Error())
return err return err
} }
if result.ExtendAttribute != nil { if result.ExtendAttribute != nil {
@ -342,7 +343,7 @@ func (pt *peerTaskConductor) register() error {
// when peer register failed, some actions need to do with peerPacketStream // when peer register failed, some actions need to do with peerPacketStream
pt.peerPacketStream = &dummyPeerPacketStream{} pt.peerPacketStream = &dummyPeerPacketStream{}
pt.span.RecordError(err) pt.span.RecordError(err)
pt.cancel(base.Code_SchedError, err.Error()) pt.cancel(commonv1.Code_SchedError, err.Error())
return err return err
} }
@ -363,7 +364,7 @@ func (pt *peerTaskConductor) start() error {
if pt.seed { if pt.seed {
pt.peerPacketStream = &dummyPeerPacketStream{} pt.peerPacketStream = &dummyPeerPacketStream{}
pt.schedulerClient = &dummySchedulerClient{} pt.schedulerClient = &dummySchedulerClient{}
pt.sizeScope = base.SizeScope_NORMAL pt.sizeScope = commonv1.SizeScope_NORMAL
pt.needBackSource = atomic.NewBool(true) pt.needBackSource = atomic.NewBool(true)
} else { } else {
// register to scheduler // register to scheduler
@ -449,7 +450,7 @@ func (pt *peerTaskConductor) UpdateSourceErrorStatus(st *status.Status) {
pt.sourceErrorStatus = st pt.sourceErrorStatus = st
} }
func (pt *peerTaskConductor) cancel(code base.Code, reason string) { func (pt *peerTaskConductor) cancel(code commonv1.Code, reason string) {
pt.statusOnce.Do(func() { pt.statusOnce.Do(func() {
pt.failedCode = code pt.failedCode = code
pt.failedReason = reason pt.failedReason = reason
@ -463,19 +464,19 @@ func (pt *peerTaskConductor) markBackSource() {
// when close peerPacketReady, pullPiecesFromPeers will invoke backSource // when close peerPacketReady, pullPiecesFromPeers will invoke backSource
close(pt.peerPacketReady) close(pt.peerPacketReady)
// let legacy mode exit // let legacy mode exit
pt.peerPacket.Store(&scheduler.PeerPacket{ pt.peerPacket.Store(&schedulerv1.PeerPacket{
TaskId: pt.taskID, TaskId: pt.taskID,
SrcPid: pt.peerID, SrcPid: pt.peerID,
ParallelCount: 1, ParallelCount: 1,
MainPeer: nil, MainPeer: nil,
CandidatePeers: []*scheduler.PeerPacket_DestPeer{ CandidatePeers: []*schedulerv1.PeerPacket_DestPeer{
{ {
Ip: pt.host.Ip, Ip: pt.host.Ip,
RpcPort: pt.host.RpcPort, RpcPort: pt.host.RpcPort,
PeerId: pt.peerID, PeerId: pt.peerID,
}, },
}, },
Code: base.Code_SchedNeedBackSource, Code: commonv1.Code_SchedNeedBackSource,
}) })
} }
@ -501,9 +502,9 @@ func (pt *peerTaskConductor) backSource() {
span.SetAttributes(config.AttributePeerTaskSuccess.Bool(false)) span.SetAttributes(config.AttributePeerTaskSuccess.Bool(false))
span.RecordError(err) span.RecordError(err)
if isBackSourceError(err) { if isBackSourceError(err) {
pt.cancel(base.Code_ClientBackSourceError, err.Error()) pt.cancel(commonv1.Code_ClientBackSourceError, err.Error())
} else { } else {
pt.cancel(base.Code_ClientError, err.Error()) pt.cancel(commonv1.Code_ClientError, err.Error())
} }
span.End() span.End()
return return
@ -521,14 +522,14 @@ func (pt *peerTaskConductor) pullPieces() {
return return
} }
switch pt.sizeScope { switch pt.sizeScope {
case base.SizeScope_TINY: case commonv1.SizeScope_TINY:
pt.storeTinyPeerTask() pt.storeTinyPeerTask()
case base.SizeScope_SMALL: case commonv1.SizeScope_SMALL:
pt.pullSinglePiece() pt.pullSinglePiece()
case base.SizeScope_NORMAL: case commonv1.SizeScope_NORMAL:
pt.pullPiecesWithP2P() pt.pullPiecesWithP2P()
default: default:
pt.cancel(base.Code_ClientError, fmt.Sprintf("unknown size scope: %d", pt.sizeScope)) pt.cancel(commonv1.Code_ClientError, fmt.Sprintf("unknown size scope: %d", pt.sizeScope))
} }
} }
@ -571,7 +572,7 @@ func (pt *peerTaskConductor) storeTinyPeerTask() {
pt.storage = storageDriver pt.storage = storageDriver
if err != nil { if err != nil {
pt.Errorf("register tiny data storage failed: %s", err) pt.Errorf("register tiny data storage failed: %s", err)
pt.cancel(base.Code_ClientError, err.Error()) pt.cancel(commonv1.Code_ClientError, err.Error())
return return
} }
n, err := pt.GetStorage().WritePiece(ctx, n, err := pt.GetStorage().WritePiece(ctx,
@ -598,19 +599,19 @@ func (pt *peerTaskConductor) storeTinyPeerTask() {
}) })
if err != nil { if err != nil {
pt.Errorf("write tiny data storage failed: %s", err) pt.Errorf("write tiny data storage failed: %s", err)
pt.cancel(base.Code_ClientError, err.Error()) pt.cancel(commonv1.Code_ClientError, err.Error())
return return
} }
if n != contentLength { if n != contentLength {
pt.Errorf("write tiny data storage failed, want: %d, wrote: %d", contentLength, n) pt.Errorf("write tiny data storage failed, want: %d, wrote: %d", contentLength, n)
pt.cancel(base.Code_ClientError, err.Error()) pt.cancel(commonv1.Code_ClientError, err.Error())
return return
} }
err = pt.UpdateStorage() err = pt.UpdateStorage()
if err != nil { if err != nil {
pt.Errorf("update tiny data storage failed: %s", err) pt.Errorf("update tiny data storage failed: %s", err)
pt.cancel(base.Code_ClientError, err.Error()) pt.cancel(commonv1.Code_ClientError, err.Error())
return return
} }
@ -621,7 +622,7 @@ func (pt *peerTaskConductor) storeTinyPeerTask() {
func (pt *peerTaskConductor) receivePeerPacket(pieceRequestCh chan *DownloadPieceRequest) { func (pt *peerTaskConductor) receivePeerPacket(pieceRequestCh chan *DownloadPieceRequest) {
var ( var (
lastNotReadyPiece int32 = 0 lastNotReadyPiece int32 = 0
peerPacket *scheduler.PeerPacket peerPacket *schedulerv1.PeerPacket
err error err error
firstPacketReceived bool firstPacketReceived bool
) )
@ -669,8 +670,8 @@ loop:
} }
pt.Debugf("receive peerPacket %v", peerPacket) pt.Debugf("receive peerPacket %v", peerPacket)
if peerPacket.Code != base.Code_Success { if peerPacket.Code != commonv1.Code_Success {
if peerPacket.Code == base.Code_SchedNeedBackSource { if peerPacket.Code == commonv1.Code_SchedNeedBackSource {
pt.markBackSource() pt.markBackSource()
pt.Infof("receive back source code") pt.Infof("receive back source code")
return return
@ -737,8 +738,8 @@ loop:
} }
} }
// updateSynchronizer will convert peers to synchronizer, if failed, will update failed peers to scheduler.PeerPacket // updateSynchronizer will convert peers to synchronizer, if failed, will update failed peers to schedulerv1.PeerPacket
func (pt *peerTaskConductor) updateSynchronizer(lastNum int32, p *scheduler.PeerPacket) int32 { func (pt *peerTaskConductor) updateSynchronizer(lastNum int32, p *schedulerv1.PeerPacket) int32 {
desiredPiece, ok := pt.getNextNotReadyPieceNum(lastNum) desiredPiece, ok := pt.getNextNotReadyPieceNum(lastNum)
if !ok { if !ok {
pt.Infof("all pieces is ready, peer task completed, skip to synchronize") pt.Infof("all pieces is ready, peer task completed, skip to synchronize")
@ -746,7 +747,7 @@ func (pt *peerTaskConductor) updateSynchronizer(lastNum int32, p *scheduler.Peer
p.CandidatePeers = nil p.CandidatePeers = nil
return desiredPiece return desiredPiece
} }
var peers = []*scheduler.PeerPacket_DestPeer{p.MainPeer} var peers = []*schedulerv1.PeerPacket_DestPeer{p.MainPeer}
peers = append(peers, p.CandidatePeers...) peers = append(peers, p.CandidatePeers...)
legacyPeers := pt.pieceTaskSyncManager.newMultiPieceTaskSynchronizer(peers, desiredPiece) legacyPeers := pt.pieceTaskSyncManager.newMultiPieceTaskSynchronizer(peers, desiredPiece)
@ -765,15 +766,15 @@ func (pt *peerTaskConductor) confirmReceivePeerPacketError(err error) {
default: default:
} }
var ( var (
failedCode = base.Code_UnknownError failedCode = commonv1.Code_UnknownError
failedReason string failedReason string
) )
de, ok := err.(*dferrors.DfError) de, ok := err.(*dferrors.DfError)
if ok && de.Code == base.Code_SchedNeedBackSource { if ok && de.Code == commonv1.Code_SchedNeedBackSource {
pt.markBackSource() pt.markBackSource()
pt.Infof("receive back source code") pt.Infof("receive back source code")
return return
} else if ok && de.Code != base.Code_SchedNeedBackSource { } else if ok && de.Code != commonv1.Code_SchedNeedBackSource {
failedCode = de.Code failedCode = de.Code
failedReason = de.Message failedReason = de.Message
pt.Errorf("receive peer packet failed: %s", pt.failedReason) pt.Errorf("receive peer packet failed: %s", pt.failedReason)
@ -784,29 +785,29 @@ func (pt *peerTaskConductor) confirmReceivePeerPacketError(err error) {
return return
} }
func (pt *peerTaskConductor) isExitPeerPacketCode(pp *scheduler.PeerPacket) bool { func (pt *peerTaskConductor) isExitPeerPacketCode(pp *schedulerv1.PeerPacket) bool {
switch pp.Code { switch pp.Code {
case base.Code_ResourceLacked, base.Code_BadRequest, case commonv1.Code_ResourceLacked, commonv1.Code_BadRequest,
base.Code_PeerTaskNotFound, base.Code_UnknownError, base.Code_RequestTimeOut: commonv1.Code_PeerTaskNotFound, commonv1.Code_UnknownError, commonv1.Code_RequestTimeOut:
// 1xxx // 1xxx
pt.failedCode = pp.Code pt.failedCode = pp.Code
pt.failedReason = fmt.Sprintf("receive exit peer packet with code %d", pp.Code) pt.failedReason = fmt.Sprintf("receive exit peer packet with code %d", pp.Code)
return true return true
case base.Code_SchedError, base.Code_SchedTaskStatusError, base.Code_SchedPeerNotFound: case commonv1.Code_SchedError, commonv1.Code_SchedTaskStatusError, commonv1.Code_SchedPeerNotFound:
// 5xxx // 5xxx
pt.failedCode = pp.Code pt.failedCode = pp.Code
pt.failedReason = fmt.Sprintf("receive exit peer packet with code %d", pp.Code) pt.failedReason = fmt.Sprintf("receive exit peer packet with code %d", pp.Code)
return true return true
case base.Code_SchedPeerGone: case commonv1.Code_SchedPeerGone:
pt.failedReason = reasonPeerGoneFromScheduler pt.failedReason = reasonPeerGoneFromScheduler
pt.failedCode = base.Code_SchedPeerGone pt.failedCode = commonv1.Code_SchedPeerGone
return true return true
case base.Code_CDNTaskRegistryFail: case commonv1.Code_CDNTaskRegistryFail:
// 6xxx // 6xxx
pt.failedCode = pp.Code pt.failedCode = pp.Code
pt.failedReason = fmt.Sprintf("receive exit peer packet with code %d", pp.Code) pt.failedReason = fmt.Sprintf("receive exit peer packet with code %d", pp.Code)
return true return true
case base.Code_BackToSourceAborted: case commonv1.Code_BackToSourceAborted:
st := status.Newf(codes.Aborted, "response is not valid") st := status.Newf(codes.Aborted, "response is not valid")
st, err := st.WithDetails(pp.GetSourceError()) st, err := st.WithDetails(pp.GetSourceError())
if err != nil { if err != nil {
@ -877,7 +878,7 @@ func (pt *peerTaskConductor) pullPiecesFromPeers(pieceRequestCh chan *DownloadPi
) )
// ensure first peer packet is not nil // ensure first peer packet is not nil
peerPacket, ok := pt.peerPacket.Load().(*scheduler.PeerPacket) peerPacket, ok := pt.peerPacket.Load().(*schedulerv1.PeerPacket)
if !ok { if !ok {
pt.Warn("pull pieces canceled") pt.Warn("pull pieces canceled")
return return
@ -912,7 +913,7 @@ loop:
// 2, try to get pieces // 2, try to get pieces
pt.Debugf("try to get pieces, number: %d, limit: %d", num, limit) pt.Debugf("try to get pieces, number: %d, limit: %d", num, limit)
piecePacket, err := pt.pieceTaskPoller.preparePieceTasks( piecePacket, err := pt.pieceTaskPoller.preparePieceTasks(
&base.PieceTaskRequest{ &commonv1.PieceTaskRequest{
TaskId: pt.taskID, TaskId: pt.taskID,
SrcPid: pt.peerID, SrcPid: pt.peerID,
StartNum: uint32(num), StartNum: uint32(num),
@ -953,7 +954,7 @@ loop:
} }
} }
func (pt *peerTaskConductor) updateMetadata(piecePacket *base.PiecePacket) { func (pt *peerTaskConductor) updateMetadata(piecePacket *commonv1.PiecePacket) {
// update total piece // update total piece
var metadataChanged bool var metadataChanged bool
if piecePacket.TotalPiece > pt.GetTotalPieces() { if piecePacket.TotalPiece > pt.GetTotalPieces() {
@ -1013,17 +1014,17 @@ func (pt *peerTaskConductor) waitFirstPeerPacket() (done bool, backSource bool)
if ok { if ok {
// preparePieceTasksByPeer func already send piece result with error // preparePieceTasksByPeer func already send piece result with error
pt.Infof("new peer client ready, scheduler time cost: %dus, peer count: %d", pt.Infof("new peer client ready, scheduler time cost: %dus, peer count: %d",
time.Since(pt.startTime).Microseconds(), len(pt.peerPacket.Load().(*scheduler.PeerPacket).CandidatePeers)) time.Since(pt.startTime).Microseconds(), len(pt.peerPacket.Load().(*schedulerv1.PeerPacket).CandidatePeers))
return true, false return true, false
} }
// when scheduler says base.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady // when scheduler says commonv1.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady
pt.Infof("start download from source due to base.Code_SchedNeedBackSource") pt.Infof("start download from source due to commonv1.Code_SchedNeedBackSource")
pt.span.AddEvent("back source due to scheduler says need back source") pt.span.AddEvent("back source due to scheduler says need back source")
pt.backSource() pt.backSource()
return false, true return false, true
case <-time.After(pt.schedulerOption.ScheduleTimeout.Duration): case <-time.After(pt.schedulerOption.ScheduleTimeout.Duration):
if pt.schedulerOption.DisableAutoBackSource { if pt.schedulerOption.DisableAutoBackSource {
pt.cancel(base.Code_ClientScheduleTimeout, reasonBackSourceDisabled) pt.cancel(commonv1.Code_ClientScheduleTimeout, reasonBackSourceDisabled)
err := fmt.Errorf("%s, auto back source disabled", pt.failedReason) err := fmt.Errorf("%s, auto back source disabled", pt.failedReason)
pt.span.RecordError(err) pt.span.RecordError(err)
pt.Errorf(err.Error()) pt.Errorf(err.Error())
@ -1048,12 +1049,12 @@ func (pt *peerTaskConductor) waitAvailablePeerPacket() (int32, bool) {
case _, ok := <-pt.peerPacketReady: case _, ok := <-pt.peerPacketReady:
if ok { if ok {
// preparePieceTasksByPeer func already send piece result with error // preparePieceTasksByPeer func already send piece result with error
pt.Infof("new peer client ready, peer count: %d", len(pt.peerPacket.Load().(*scheduler.PeerPacket).CandidatePeers)) pt.Infof("new peer client ready, peer count: %d", len(pt.peerPacket.Load().(*schedulerv1.PeerPacket).CandidatePeers))
// research from piece 0 // research from piece 0
return 0, true return 0, true
} }
// when scheduler says base.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady // when scheduler says commonv1.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady
pt.Infof("start download from source due to base.Code_SchedNeedBackSource") pt.Infof("start download from source due to commonv1.Code_SchedNeedBackSource")
pt.span.AddEvent("back source due to scheduler says need back source ") pt.span.AddEvent("back source due to scheduler says need back source ")
// TODO optimize back source when already downloaded some pieces // TODO optimize back source when already downloaded some pieces
pt.backSource() pt.backSource()
@ -1062,7 +1063,7 @@ func (pt *peerTaskConductor) waitAvailablePeerPacket() (int32, bool) {
} }
// Deprecated // Deprecated
func (pt *peerTaskConductor) dispatchPieceRequest(pieceRequestCh chan *DownloadPieceRequest, piecePacket *base.PiecePacket) { func (pt *peerTaskConductor) dispatchPieceRequest(pieceRequestCh chan *DownloadPieceRequest, piecePacket *commonv1.PiecePacket) {
pieceCount := len(piecePacket.PieceInfos) pieceCount := len(piecePacket.PieceInfos)
pt.Debugf("dispatch piece request, piece count: %d", pieceCount) pt.Debugf("dispatch piece request, piece count: %d", pieceCount)
// fix cdn return zero piece info, but with total piece count and content length // fix cdn return zero piece info, but with total piece count and content length
@ -1122,8 +1123,8 @@ wait:
pt.Infof("new peer client ready, but all pieces are already downloading, just wait failed pieces") pt.Infof("new peer client ready, but all pieces are already downloading, just wait failed pieces")
goto wait goto wait
} }
// when scheduler says base.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady // when scheduler says commonv1.Code_SchedNeedBackSource, receivePeerPacket will close pt.peerPacketReady
pt.Infof("start download from source due to base.Code_SchedNeedBackSource") pt.Infof("start download from source due to commonv1.Code_SchedNeedBackSource")
pt.span.AddEvent("back source due to scheduler says need back source") pt.span.AddEvent("back source due to scheduler says need back source")
pt.backSource() pt.backSource()
return -1, false return -1, false
@ -1199,7 +1200,7 @@ func (pt *peerTaskConductor) downloadPiece(workerID int32, request *DownloadPiec
return return
} }
attempt, success := pt.pieceTaskSyncManager.acquire( attempt, success := pt.pieceTaskSyncManager.acquire(
&base.PieceTaskRequest{ &commonv1.PieceTaskRequest{
Limit: 1, Limit: 1,
TaskId: pt.taskID, TaskId: pt.taskID,
SrcPid: pt.peerID, SrcPid: pt.peerID,
@ -1249,13 +1250,13 @@ func (pt *peerTaskConductor) waitLimit(ctx context.Context, request *DownloadPie
waitSpan.End() waitSpan.End()
// send error piece result // send error piece result
sendError := pt.sendPieceResult(&scheduler.PieceResult{ sendError := pt.sendPieceResult(&schedulerv1.PieceResult{
TaskId: pt.GetTaskID(), TaskId: pt.GetTaskID(),
SrcPid: pt.GetPeerID(), SrcPid: pt.GetPeerID(),
DstPid: request.DstPid, DstPid: request.DstPid,
PieceInfo: request.piece, PieceInfo: request.piece,
Success: false, Success: false,
Code: base.Code_ClientRequestLimitFail, Code: commonv1.Code_ClientRequestLimitFail,
HostLoad: nil, HostLoad: nil,
FinishedCount: 0, // update by peer task FinishedCount: 0, // update by peer task
}) })
@ -1263,7 +1264,7 @@ func (pt *peerTaskConductor) waitLimit(ctx context.Context, request *DownloadPie
pt.Errorf("report piece result failed %s", err) pt.Errorf("report piece result failed %s", err)
} }
pt.cancel(base.Code_ClientRequestLimitFail, err.Error()) pt.cancel(commonv1.Code_ClientRequestLimitFail, err.Error())
return false return false
} }
@ -1334,13 +1335,13 @@ func (pt *peerTaskConductor) ReportPieceResult(request *DownloadPieceRequest, re
pt.reportSuccessResult(request, result) pt.reportSuccessResult(request, result)
return return
} }
code := base.Code_ClientPieceDownloadFail code := commonv1.Code_ClientPieceDownloadFail
if isConnectionError(err) { if isConnectionError(err) {
code = base.Code_ClientConnectionError code = commonv1.Code_ClientConnectionError
} else if isPieceNotFound(err) { } else if isPieceNotFound(err) {
code = base.Code_ClientPieceNotFound code = commonv1.Code_ClientPieceNotFound
} else if isBackSourceError(err) { } else if isBackSourceError(err) {
code = base.Code_ClientBackSourceError code = commonv1.Code_ClientBackSourceError
} }
pt.reportFailResult(request, result, code) pt.reportFailResult(request, result, code)
} }
@ -1351,7 +1352,7 @@ func (pt *peerTaskConductor) reportSuccessResult(request *DownloadPieceRequest,
span.SetAttributes(config.AttributeWritePieceSuccess.Bool(true)) span.SetAttributes(config.AttributeWritePieceSuccess.Bool(true))
err := pt.sendPieceResult( err := pt.sendPieceResult(
&scheduler.PieceResult{ &schedulerv1.PieceResult{
TaskId: pt.GetTaskID(), TaskId: pt.GetTaskID(),
SrcPid: pt.GetPeerID(), SrcPid: pt.GetPeerID(),
DstPid: request.DstPid, DstPid: request.DstPid,
@ -1359,7 +1360,7 @@ func (pt *peerTaskConductor) reportSuccessResult(request *DownloadPieceRequest,
BeginTime: uint64(result.BeginTime), BeginTime: uint64(result.BeginTime),
EndTime: uint64(result.FinishTime), EndTime: uint64(result.FinishTime),
Success: true, Success: true,
Code: base.Code_Success, Code: commonv1.Code_Success,
HostLoad: nil, // TODO(jim): update host load HostLoad: nil, // TODO(jim): update host load
FinishedCount: pt.readyPieces.Settled(), FinishedCount: pt.readyPieces.Settled(),
// TODO range_start, range_size, piece_md5, piece_offset, piece_style // TODO range_start, range_size, piece_md5, piece_offset, piece_style
@ -1372,12 +1373,12 @@ func (pt *peerTaskConductor) reportSuccessResult(request *DownloadPieceRequest,
span.End() span.End()
} }
func (pt *peerTaskConductor) reportFailResult(request *DownloadPieceRequest, result *DownloadPieceResult, code base.Code) { func (pt *peerTaskConductor) reportFailResult(request *DownloadPieceRequest, result *DownloadPieceResult, code commonv1.Code) {
metrics.PieceTaskFailedCount.Add(1) metrics.PieceTaskFailedCount.Add(1)
_, span := tracer.Start(pt.ctx, config.SpanReportPieceResult) _, span := tracer.Start(pt.ctx, config.SpanReportPieceResult)
span.SetAttributes(config.AttributeWritePieceSuccess.Bool(false)) span.SetAttributes(config.AttributeWritePieceSuccess.Bool(false))
err := pt.sendPieceResult(&scheduler.PieceResult{ err := pt.sendPieceResult(&schedulerv1.PieceResult{
TaskId: pt.GetTaskID(), TaskId: pt.GetTaskID(),
SrcPid: pt.GetPeerID(), SrcPid: pt.GetPeerID(),
DstPid: request.DstPid, DstPid: request.DstPid,
@ -1466,7 +1467,7 @@ func (pt *peerTaskConductor) done() {
var ( var (
cost = time.Since(pt.startTime).Milliseconds() cost = time.Since(pt.startTime).Milliseconds()
success = true success = true
code = base.Code_Success code = commonv1.Code_Success
) )
pt.Log().Infof("peer task done, cost: %dms", cost) pt.Log().Infof("peer task done, cost: %dms", cost)
// TODO merge error handle // TODO merge error handle
@ -1479,8 +1480,8 @@ func (pt *peerTaskConductor) done() {
} else { } else {
close(pt.failCh) close(pt.failCh)
success = false success = false
code = base.Code_ClientError code = commonv1.Code_ClientError
pt.failedCode = base.Code_ClientError pt.failedCode = commonv1.Code_ClientError
pt.failedReason = err.Error() pt.failedReason = err.Error()
pt.span.SetAttributes(config.AttributePeerTaskSuccess.Bool(false)) pt.span.SetAttributes(config.AttributePeerTaskSuccess.Bool(false))
@ -1492,8 +1493,8 @@ func (pt *peerTaskConductor) done() {
} else { } else {
close(pt.failCh) close(pt.failCh)
success = false success = false
code = base.Code_ClientError code = commonv1.Code_ClientError
pt.failedCode = base.Code_ClientError pt.failedCode = commonv1.Code_ClientError
pt.failedReason = err.Error() pt.failedReason = err.Error()
pt.span.SetAttributes(config.AttributePeerTaskSuccess.Bool(false)) pt.span.SetAttributes(config.AttributePeerTaskSuccess.Bool(false))
@ -1517,7 +1518,7 @@ func (pt *peerTaskConductor) done() {
err = pt.schedulerClient.ReportPeerResult( err = pt.schedulerClient.ReportPeerResult(
peerResultCtx, peerResultCtx,
&scheduler.PeerResult{ &schedulerv1.PeerResult{
TaskId: pt.GetTaskID(), TaskId: pt.GetTaskID(),
PeerId: pt.GetPeerID(), PeerId: pt.GetPeerID(),
SrcIp: pt.host.Ip, SrcIp: pt.host.Ip,
@ -1544,7 +1545,7 @@ func (pt *peerTaskConductor) Fail() {
} }
func (pt *peerTaskConductor) fail() { func (pt *peerTaskConductor) fail() {
if pt.failedCode == base.Code_ClientBackSourceError { if pt.failedCode == commonv1.Code_ClientBackSourceError {
metrics.PeerTaskFailedCount.WithLabelValues(metrics.FailTypeBackSource).Add(1) metrics.PeerTaskFailedCount.WithLabelValues(metrics.FailTypeBackSource).Add(1)
} else { } else {
metrics.PeerTaskFailedCount.WithLabelValues(metrics.FailTypeP2P).Add(1) metrics.PeerTaskFailedCount.WithLabelValues(metrics.FailTypeP2P).Add(1)
@ -1574,16 +1575,16 @@ func (pt *peerTaskConductor) fail() {
peerResultCtx, peerResultSpan := tracer.Start(ctx, config.SpanReportPeerResult) peerResultCtx, peerResultSpan := tracer.Start(ctx, config.SpanReportPeerResult)
defer peerResultSpan.End() defer peerResultSpan.End()
var sourceError *errordetails.SourceError var sourceError *errordetailsv1.SourceError
if pt.sourceErrorStatus != nil { if pt.sourceErrorStatus != nil {
for _, detail := range pt.sourceErrorStatus.Details() { for _, detail := range pt.sourceErrorStatus.Details() {
switch d := detail.(type) { switch d := detail.(type) {
case *errordetails.SourceError: case *errordetailsv1.SourceError:
sourceError = d sourceError = d
} }
} }
} }
peerResult := &scheduler.PeerResult{ peerResult := &schedulerv1.PeerResult{
TaskId: pt.GetTaskID(), TaskId: pt.GetTaskID(),
PeerId: pt.GetPeerID(), PeerId: pt.GetPeerID(),
SrcIp: pt.peerTaskManager.host.Ip, SrcIp: pt.peerTaskManager.host.Ip,
@ -1598,7 +1599,7 @@ func (pt *peerTaskConductor) fail() {
Code: pt.failedCode, Code: pt.failedCode,
} }
if sourceError != nil { if sourceError != nil {
peerResult.ErrorDetail = &scheduler.PeerResult_SourceError{ peerResult.Errordetails = &schedulerv1.PeerResult_SourceError{
SourceError: sourceError, SourceError: sourceError,
} }
} }
@ -1672,7 +1673,7 @@ func (pt *peerTaskConductor) PublishPieceInfo(pieceNum int32, size uint32) {
}) })
} }
func (pt *peerTaskConductor) sendPieceResult(pr *scheduler.PieceResult) error { func (pt *peerTaskConductor) sendPieceResult(pr *schedulerv1.PieceResult) error {
pt.sendPieceResultLock.Lock() pt.sendPieceResultLock.Lock()
err := pt.peerPacketStream.Send(pr) err := pt.peerPacketStream.Send(pr)
pt.sendPieceResultLock.Unlock() pt.sendPieceResultLock.Unlock()

View File

@ -21,37 +21,38 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/internal/dferrors"
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
// when scheduler is not available, use dummySchedulerClient to back source // when scheduler is not available, use dummySchedulerClient to back source
type dummySchedulerClient struct { type dummySchedulerClient struct {
} }
func (d *dummySchedulerClient) RegisterPeerTask(ctx context.Context, request *scheduler.PeerTaskRequest, option ...grpc.CallOption) (*scheduler.RegisterResult, error) { func (d *dummySchedulerClient) RegisterPeerTask(ctx context.Context, request *schedulerv1.PeerTaskRequest, option ...grpc.CallOption) (*schedulerv1.RegisterResult, error) {
panic("should not call this function") panic("should not call this function")
} }
func (d *dummySchedulerClient) ReportPieceResult(ctx context.Context, request *scheduler.PeerTaskRequest, option ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) { func (d *dummySchedulerClient) ReportPieceResult(ctx context.Context, request *schedulerv1.PeerTaskRequest, option ...grpc.CallOption) (schedulerv1.Scheduler_ReportPieceResultClient, error) {
return &dummyPeerPacketStream{}, nil return &dummyPeerPacketStream{}, nil
} }
func (d *dummySchedulerClient) ReportPeerResult(ctx context.Context, result *scheduler.PeerResult, option ...grpc.CallOption) error { func (d *dummySchedulerClient) ReportPeerResult(ctx context.Context, result *schedulerv1.PeerResult, option ...grpc.CallOption) error {
return nil return nil
} }
func (d *dummySchedulerClient) LeaveTask(ctx context.Context, target *scheduler.PeerTarget, option ...grpc.CallOption) error { func (d *dummySchedulerClient) LeaveTask(ctx context.Context, target *schedulerv1.PeerTarget, option ...grpc.CallOption) error {
return nil return nil
} }
func (d *dummySchedulerClient) StatTask(ctx context.Context, request *scheduler.StatTaskRequest, option ...grpc.CallOption) (*scheduler.Task, error) { func (d *dummySchedulerClient) StatTask(ctx context.Context, request *schedulerv1.StatTaskRequest, option ...grpc.CallOption) (*schedulerv1.Task, error) {
panic("should not call this function") panic("should not call this function")
} }
func (d *dummySchedulerClient) AnnounceTask(ctx context.Context, request *scheduler.AnnounceTaskRequest, option ...grpc.CallOption) error { func (d *dummySchedulerClient) AnnounceTask(ctx context.Context, request *schedulerv1.AnnounceTaskRequest, option ...grpc.CallOption) error {
panic("should not call this function") panic("should not call this function")
} }
@ -70,12 +71,12 @@ type dummyPeerPacketStream struct {
grpc.ClientStream grpc.ClientStream
} }
func (d *dummyPeerPacketStream) Recv() (*scheduler.PeerPacket, error) { func (d *dummyPeerPacketStream) Recv() (*schedulerv1.PeerPacket, error) {
// TODO set base.Code_SchedNeedBackSource in *scheduler.PeerPacket instead of error // TODO set commonv1.Code_SchedNeedBackSource in *scheduler.PeerPacket instead of error
return nil, dferrors.New(base.Code_SchedNeedBackSource, "") return nil, dferrors.New(commonv1.Code_SchedNeedBackSource, "")
} }
func (d *dummyPeerPacketStream) Send(pr *scheduler.PieceResult) error { func (d *dummyPeerPacketStream) Send(pr *schedulerv1.PieceResult) error {
return nil return nil
} }

View File

@ -23,18 +23,19 @@ import (
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"golang.org/x/time/rate" "golang.org/x/time/rate"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/metrics"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
type FileTaskRequest struct { type FileTaskRequest struct {
scheduler.PeerTaskRequest schedulerv1.PeerTaskRequest
Output string Output string
Limit float64 Limit float64
DisableBackSource bool DisableBackSource bool
@ -68,7 +69,7 @@ type fileTask struct {
type ProgressState struct { type ProgressState struct {
Success bool Success bool
Code base.Code Code commonv1.Code
Msg string Msg string
} }
@ -142,7 +143,7 @@ func (f *fileTask) syncProgress() {
pg := &FileTaskProgress{ pg := &FileTaskProgress{
State: &ProgressState{ State: &ProgressState{
Success: true, Success: true,
Code: base.Code_Success, Code: commonv1.Code_Success,
Msg: "downloading", Msg: "downloading",
}, },
TaskID: f.peerTaskConductor.GetTaskID(), TaskID: f.peerTaskConductor.GetTaskID(),
@ -178,7 +179,7 @@ func (f *fileTask) storeToOutput() {
OriginalOffset: f.request.KeepOriginalOffset, OriginalOffset: f.request.KeepOriginalOffset,
}) })
if err != nil { if err != nil {
f.sendFailProgress(base.Code_ClientError, err.Error()) f.sendFailProgress(commonv1.Code_ClientError, err.Error())
return return
} }
f.sendSuccessProgress() f.sendSuccessProgress()
@ -189,7 +190,7 @@ func (f *fileTask) sendSuccessProgress() {
pg := &FileTaskProgress{ pg := &FileTaskProgress{
State: &ProgressState{ State: &ProgressState{
Success: true, Success: true,
Code: base.Code_Success, Code: commonv1.Code_Success,
Msg: "done", Msg: "done",
}, },
TaskID: f.peerTaskConductor.GetTaskID(), TaskID: f.peerTaskConductor.GetTaskID(),
@ -223,7 +224,7 @@ func (f *fileTask) sendSuccessProgress() {
} }
} }
func (f *fileTask) sendFailProgress(code base.Code, msg string) { func (f *fileTask) sendFailProgress(code commonv1.Code, msg string) {
var progressDone bool var progressDone bool
pg := &FileTaskProgress{ pg := &FileTaskProgress{
State: &ProgressState{ State: &ProgressState{

View File

@ -32,14 +32,15 @@ import (
"golang.org/x/time/rate" "golang.org/x/time/rate"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/metrics"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client"
) )
@ -57,15 +58,15 @@ type TaskManager interface {
StartSeedTask(ctx context.Context, req *SeedTaskRequest) ( StartSeedTask(ctx context.Context, req *SeedTaskRequest) (
seedTaskResult *SeedTaskResponse, reuse bool, err error) seedTaskResult *SeedTaskResponse, reuse bool, err error)
Subscribe(request *base.PieceTaskRequest) (*SubscribeResponse, bool) Subscribe(request *commonv1.PieceTaskRequest) (*SubscribeResponse, bool)
IsPeerTaskRunning(taskID string) (Task, bool) IsPeerTaskRunning(taskID string) (Task, bool)
// StatTask checks whether the given task exists in P2P network // StatTask checks whether the given task exists in P2P network
StatTask(ctx context.Context, taskID string) (*scheduler.Task, error) StatTask(ctx context.Context, taskID string) (*schedulerv1.Task, error)
// AnnouncePeerTask announces peer task info to P2P network // AnnouncePeerTask announces peer task info to P2P network
AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType base.TaskType, urlMeta *base.UrlMeta) error AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType commonv1.TaskType, urlMeta *commonv1.UrlMeta) error
GetPieceManager() PieceManager GetPieceManager() PieceManager
@ -119,7 +120,7 @@ func init() {
} }
type peerTaskManager struct { type peerTaskManager struct {
host *scheduler.PeerHost host *schedulerv1.PeerHost
schedulerClient schedulerclient.Client schedulerClient schedulerclient.Client
schedulerOption config.SchedulerOption schedulerOption config.SchedulerOption
pieceManager PieceManager pieceManager PieceManager
@ -143,7 +144,7 @@ type peerTaskManager struct {
} }
func NewPeerTaskManager( func NewPeerTaskManager(
host *scheduler.PeerHost, host *schedulerv1.PeerHost,
pieceManager PieceManager, pieceManager PieceManager,
storageManager storage.Manager, storageManager storage.Manager,
schedulerClient schedulerclient.Client, schedulerClient schedulerclient.Client,
@ -183,7 +184,7 @@ func (ptm *peerTaskManager) findPeerTaskConductor(taskID string) (*peerTaskCondu
func (ptm *peerTaskManager) getPeerTaskConductor(ctx context.Context, func (ptm *peerTaskManager) getPeerTaskConductor(ctx context.Context,
taskID string, taskID string,
request *scheduler.PeerTaskRequest, request *schedulerv1.PeerTaskRequest,
limit rate.Limit, limit rate.Limit,
parent *peerTaskConductor, parent *peerTaskConductor,
rg *util.Range, rg *util.Range,
@ -206,7 +207,7 @@ func (ptm *peerTaskManager) getPeerTaskConductor(ctx context.Context,
func (ptm *peerTaskManager) getOrCreatePeerTaskConductor( func (ptm *peerTaskManager) getOrCreatePeerTaskConductor(
ctx context.Context, ctx context.Context,
taskID string, taskID string,
request *scheduler.PeerTaskRequest, request *schedulerv1.PeerTaskRequest,
limit rate.Limit, limit rate.Limit,
parent *peerTaskConductor, parent *peerTaskConductor,
rg *util.Range, rg *util.Range,
@ -238,7 +239,7 @@ func (ptm *peerTaskManager) getOrCreatePeerTaskConductor(
err := ptc.initStorage(desiredLocation) err := ptc.initStorage(desiredLocation)
if err != nil { if err != nil {
ptc.Errorf("init storage error: %s", err) ptc.Errorf("init storage error: %s", err)
ptc.cancel(base.Code_ClientError, err.Error()) ptc.cancel(commonv1.Code_ClientError, err.Error())
return nil, false, err return nil, false, err
} }
return ptc, true, nil return ptc, true, nil
@ -248,15 +249,15 @@ func (ptm *peerTaskManager) enabledPrefetch(rg *util.Range) bool {
return ptm.enablePrefetch && rg != nil return ptm.enablePrefetch && rg != nil
} }
func (ptm *peerTaskManager) prefetchParentTask(request *scheduler.PeerTaskRequest, desiredLocation string) *peerTaskConductor { func (ptm *peerTaskManager) prefetchParentTask(request *schedulerv1.PeerTaskRequest, desiredLocation string) *peerTaskConductor {
req := &scheduler.PeerTaskRequest{ req := &schedulerv1.PeerTaskRequest{
Url: request.Url, Url: request.Url,
PeerId: request.PeerId, PeerId: request.PeerId,
PeerHost: ptm.host, PeerHost: ptm.host,
HostLoad: request.HostLoad, HostLoad: request.HostLoad,
IsMigrating: request.IsMigrating, IsMigrating: request.IsMigrating,
Pattern: request.Pattern, Pattern: request.Pattern,
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Digest: request.UrlMeta.Digest, Digest: request.UrlMeta.Digest,
Tag: request.UrlMeta.Tag, Tag: request.UrlMeta.Tag,
Filter: request.UrlMeta.Filter, Filter: request.UrlMeta.Filter,
@ -320,7 +321,7 @@ func (ptm *peerTaskManager) StartFileTask(ctx context.Context, req *FileTaskRequ
} }
func (ptm *peerTaskManager) StartStreamTask(ctx context.Context, req *StreamTaskRequest) (io.ReadCloser, map[string]string, error) { func (ptm *peerTaskManager) StartStreamTask(ctx context.Context, req *StreamTaskRequest) (io.ReadCloser, map[string]string, error) {
peerTaskRequest := &scheduler.PeerTaskRequest{ peerTaskRequest := &schedulerv1.PeerTaskRequest{
Url: req.URL, Url: req.URL,
UrlMeta: req.URLMeta, UrlMeta: req.URLMeta,
PeerId: req.PeerID, PeerId: req.PeerID,
@ -379,7 +380,7 @@ type SubscribeResponse struct {
FailReason func() error FailReason func() error
} }
func (ptm *peerTaskManager) Subscribe(request *base.PieceTaskRequest) (*SubscribeResponse, bool) { func (ptm *peerTaskManager) Subscribe(request *commonv1.PieceTaskRequest) (*SubscribeResponse, bool) {
ptc, ok := ptm.findPeerTaskConductor(request.TaskId) ptc, ok := ptm.findPeerTaskConductor(request.TaskId)
if !ok { if !ok {
return nil, false return nil, false
@ -413,8 +414,8 @@ func (ptm *peerTaskManager) IsPeerTaskRunning(taskID string) (Task, bool) {
return nil, ok return nil, ok
} }
func (ptm *peerTaskManager) StatTask(ctx context.Context, taskID string) (*scheduler.Task, error) { func (ptm *peerTaskManager) StatTask(ctx context.Context, taskID string) (*schedulerv1.Task, error) {
req := &scheduler.StatTaskRequest{ req := &schedulerv1.StatTaskRequest{
TaskId: taskID, TaskId: taskID,
} }
@ -425,7 +426,7 @@ func (ptm *peerTaskManager) GetPieceManager() PieceManager {
return ptm.pieceManager return ptm.pieceManager
} }
func (ptm *peerTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType base.TaskType, urlMeta *base.UrlMeta) error { func (ptm *peerTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType commonv1.TaskType, urlMeta *commonv1.UrlMeta) error {
// Check if the given task is completed in local storageManager. // Check if the given task is completed in local storageManager.
if ptm.storageManager.FindCompletedTask(meta.TaskID) == nil { if ptm.storageManager.FindCompletedTask(meta.TaskID) == nil {
return errors.New("task not found in local storage") return errors.New("task not found in local storage")
@ -437,7 +438,7 @@ func (ptm *peerTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.P
return err return err
} }
piecePacket, err := ptm.storageManager.GetPieces(ctx, &base.PieceTaskRequest{ piecePacket, err := ptm.storageManager.GetPieces(ctx, &commonv1.PieceTaskRequest{
TaskId: meta.TaskID, TaskId: meta.TaskID,
DstPid: meta.PeerID, DstPid: meta.PeerID,
StartNum: 0, StartNum: 0,
@ -449,7 +450,7 @@ func (ptm *peerTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.P
piecePacket.DstAddr = fmt.Sprintf("%s:%d", ptm.host.Ip, ptm.host.DownPort) piecePacket.DstAddr = fmt.Sprintf("%s:%d", ptm.host.Ip, ptm.host.DownPort)
// Announce peer task to scheduler // Announce peer task to scheduler
if err := ptm.schedulerClient.AnnounceTask(ctx, &scheduler.AnnounceTaskRequest{ if err := ptm.schedulerClient.AnnounceTask(ctx, &schedulerv1.AnnounceTaskRequest{
TaskId: meta.TaskID, TaskId: meta.TaskID,
TaskType: taskType, TaskType: taskType,
Url: url, Url: url,

View File

@ -9,10 +9,10 @@ import (
io "io" io "io"
reflect "reflect" reflect "reflect"
v1 "d7y.io/api/pkg/apis/common/v1"
v10 "d7y.io/api/pkg/apis/scheduler/v1"
storage "d7y.io/dragonfly/v2/client/daemon/storage" storage "d7y.io/dragonfly/v2/client/daemon/storage"
dflog "d7y.io/dragonfly/v2/internal/dflog" dflog "d7y.io/dragonfly/v2/internal/dflog"
base "d7y.io/dragonfly/v2/pkg/rpc/base"
scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
status "google.golang.org/grpc/status" status "google.golang.org/grpc/status"
) )
@ -41,7 +41,7 @@ func (m *MockTaskManager) EXPECT() *MockTaskManagerMockRecorder {
} }
// AnnouncePeerTask mocks base method. // AnnouncePeerTask mocks base method.
func (m *MockTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType base.TaskType, urlMeta *base.UrlMeta) error { func (m *MockTaskManager) AnnouncePeerTask(ctx context.Context, meta storage.PeerTaskMetadata, url string, taskType v1.TaskType, urlMeta *v1.UrlMeta) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AnnouncePeerTask", ctx, meta, url, taskType, urlMeta) ret := m.ctrl.Call(m, "AnnouncePeerTask", ctx, meta, url, taskType, urlMeta)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
@ -132,10 +132,10 @@ func (mr *MockTaskManagerMockRecorder) StartStreamTask(ctx, req interface{}) *go
} }
// StatTask mocks base method. // StatTask mocks base method.
func (m *MockTaskManager) StatTask(ctx context.Context, taskID string) (*scheduler.Task, error) { func (m *MockTaskManager) StatTask(ctx context.Context, taskID string) (*v10.Task, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StatTask", ctx, taskID) ret := m.ctrl.Call(m, "StatTask", ctx, taskID)
ret0, _ := ret[0].(*scheduler.Task) ret0, _ := ret[0].(*v10.Task)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -161,7 +161,7 @@ func (mr *MockTaskManagerMockRecorder) Stop(ctx interface{}) *gomock.Call {
} }
// Subscribe mocks base method. // Subscribe mocks base method.
func (m *MockTaskManager) Subscribe(request *base.PieceTaskRequest) (*SubscribeResponse, bool) { func (m *MockTaskManager) Subscribe(request *v1.PieceTaskRequest) (*SubscribeResponse, bool) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Subscribe", request) ret := m.ctrl.Call(m, "Subscribe", request)
ret0, _ := ret[0].(*SubscribeResponse) ret0, _ := ret[0].(*SubscribeResponse)

View File

@ -42,6 +42,11 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
schedulerv1mocks "d7y.io/api/pkg/apis/scheduler/v1/mocks"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/daemon/test" "d7y.io/dragonfly/v2/client/daemon/test"
@ -52,14 +57,10 @@ import (
"d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/rpc" "d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
daemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" daemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server"
servermocks "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server/mocks" servermocks "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server/mocks"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client"
mock_scheduler_client "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client/mocks" schedulerclientmocks "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client/mocks"
mock_scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol" "d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol"
sourcemocks "d7y.io/dragonfly/v2/pkg/source/mocks" sourcemocks "d7y.io/dragonfly/v2/pkg/source/mocks"
@ -79,7 +80,7 @@ type componentsOption struct {
sourceClient source.ResourceClient sourceClient source.ResourceClient
peerPacketDelay []time.Duration peerPacketDelay []time.Duration
backSource bool backSource bool
scope base.SizeScope scope commonv1.SizeScope
content []byte content []byte
getPieceTasks bool getPieceTasks bool
} }
@ -97,8 +98,8 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio
pieces[i] = digest.MD5FromReader(io.LimitReader(r, int64(opt.pieceSize))) pieces[i] = digest.MD5FromReader(io.LimitReader(r, int64(opt.pieceSize)))
} }
totalDigests := digest.SHA256FromStrings(pieces...) totalDigests := digest.SHA256FromStrings(pieces...)
genPiecePacket := func(request *base.PieceTaskRequest) *base.PiecePacket { genPiecePacket := func(request *commonv1.PieceTaskRequest) *commonv1.PiecePacket {
var tasks []*base.PieceInfo var tasks []*commonv1.PieceInfo
for i := uint32(0); i < request.Limit; i++ { for i := uint32(0); i < request.Limit; i++ {
start := opt.pieceSize * (request.StartNum + i) start := opt.pieceSize * (request.StartNum + i)
if int64(start)+1 > opt.contentLength { if int64(start)+1 > opt.contentLength {
@ -109,7 +110,7 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio
size = uint32(opt.contentLength) - start size = uint32(opt.contentLength) - start
} }
tasks = append(tasks, tasks = append(tasks,
&base.PieceInfo{ &commonv1.PieceInfo{
PieceNum: int32(request.StartNum + i), PieceNum: int32(request.StartNum + i),
RangeStart: uint64(start), RangeStart: uint64(start),
RangeSize: size, RangeSize: size,
@ -118,7 +119,7 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio
PieceStyle: 0, PieceStyle: 0,
}) })
} }
return &base.PiecePacket{ return &commonv1.PiecePacket{
TaskId: request.TaskId, TaskId: request.TaskId,
DstPid: "peer-x", DstPid: "peer-x",
PieceInfos: tasks, PieceInfos: tasks,
@ -129,18 +130,18 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio
} }
if opt.getPieceTasks { if opt.getPieceTasks {
daemon.EXPECT().GetPieceTasks(gomock.Any(), gomock.Any()).AnyTimes(). daemon.EXPECT().GetPieceTasks(gomock.Any(), gomock.Any()).AnyTimes().
DoAndReturn(func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { DoAndReturn(func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
return genPiecePacket(request), nil return genPiecePacket(request), nil
}) })
daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(arg0 dfdaemon.Daemon_SyncPieceTasksServer) error { daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(arg0 dfdaemonv1.Daemon_SyncPieceTasksServer) error {
return status.Error(codes.Unimplemented, "TODO") return status.Error(codes.Unimplemented, "TODO")
}) })
} else { } else {
daemon.EXPECT().GetPieceTasks(gomock.Any(), gomock.Any()).AnyTimes(). daemon.EXPECT().GetPieceTasks(gomock.Any(), gomock.Any()).AnyTimes().
DoAndReturn(func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { DoAndReturn(func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
return nil, status.Error(codes.Unimplemented, "TODO") return nil, status.Error(codes.Unimplemented, "TODO")
}) })
daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(s dfdaemon.Daemon_SyncPieceTasksServer) error { daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(s dfdaemonv1.Daemon_SyncPieceTasksServer) error {
request, err := s.Recv() request, err := s.Recv()
if err != nil { if err != nil {
return err return err
@ -177,9 +178,9 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
// 2. setup a scheduler // 2. setup a scheduler
pps := mock_scheduler.NewMockScheduler_ReportPieceResultClient(ctrl) pps := schedulerv1mocks.NewMockScheduler_ReportPieceResultClient(ctrl)
pps.EXPECT().Send(gomock.Any()).AnyTimes().DoAndReturn( pps.EXPECT().Send(gomock.Any()).AnyTimes().DoAndReturn(
func(pr *scheduler.PieceResult) error { func(pr *schedulerv1.PieceResult) error {
return nil return nil
}) })
var ( var (
@ -188,7 +189,7 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio
) )
sent <- struct{}{} sent <- struct{}{}
pps.EXPECT().Recv().AnyTimes().DoAndReturn( pps.EXPECT().Recv().AnyTimes().DoAndReturn(
func() (*scheduler.PeerPacket, error) { func() (*schedulerv1.PeerPacket, error) {
if len(opt.peerPacketDelay) > delayCount { if len(opt.peerPacketDelay) > delayCount {
if delay := opt.peerPacketDelay[delayCount]; delay > 0 { if delay := opt.peerPacketDelay[delayCount]; delay > 0 {
time.Sleep(delay) time.Sleep(delay)
@ -197,14 +198,14 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio
} }
<-sent <-sent
if opt.backSource { if opt.backSource {
return nil, dferrors.Newf(base.Code_SchedNeedBackSource, "fake back source error") return nil, dferrors.Newf(commonv1.Code_SchedNeedBackSource, "fake back source error")
} }
return &scheduler.PeerPacket{ return &schedulerv1.PeerPacket{
Code: base.Code_Success, Code: commonv1.Code_Success,
TaskId: opt.taskID, TaskId: opt.taskID,
SrcPid: "127.0.0.1", SrcPid: "127.0.0.1",
ParallelCount: opt.pieceParallelCount, ParallelCount: opt.pieceParallelCount,
MainPeer: &scheduler.PeerPacket_DestPeer{ MainPeer: &schedulerv1.PeerPacket_DestPeer{
Ip: "127.0.0.1", Ip: "127.0.0.1",
RpcPort: port, RpcPort: port,
PeerId: "peer-x", PeerId: "peer-x",
@ -214,27 +215,27 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio
}) })
pps.EXPECT().CloseSend().AnyTimes() pps.EXPECT().CloseSend().AnyTimes()
sched := mock_scheduler_client.NewMockClient(ctrl) sched := schedulerclientmocks.NewMockClient(ctrl)
sched.EXPECT().RegisterPeerTask(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( sched.EXPECT().RegisterPeerTask(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, ptr *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (*scheduler.RegisterResult, error) { func(ctx context.Context, ptr *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) (*schedulerv1.RegisterResult, error) {
switch opt.scope { switch opt.scope {
case base.SizeScope_TINY: case commonv1.SizeScope_TINY:
return &scheduler.RegisterResult{ return &schedulerv1.RegisterResult{
TaskId: opt.taskID, TaskId: opt.taskID,
SizeScope: base.SizeScope_TINY, SizeScope: commonv1.SizeScope_TINY,
DirectPiece: &scheduler.RegisterResult_PieceContent{ DirectPiece: &schedulerv1.RegisterResult_PieceContent{
PieceContent: opt.content, PieceContent: opt.content,
}, },
}, nil }, nil
case base.SizeScope_SMALL: case commonv1.SizeScope_SMALL:
return &scheduler.RegisterResult{ return &schedulerv1.RegisterResult{
TaskId: opt.taskID, TaskId: opt.taskID,
SizeScope: base.SizeScope_SMALL, SizeScope: commonv1.SizeScope_SMALL,
DirectPiece: &scheduler.RegisterResult_SinglePiece{ DirectPiece: &schedulerv1.RegisterResult_SinglePiece{
SinglePiece: &scheduler.SinglePiece{ SinglePiece: &schedulerv1.SinglePiece{
DstPid: "fake-pid", DstPid: "fake-pid",
DstAddr: "fake-addr", DstAddr: "fake-addr",
PieceInfo: &base.PieceInfo{ PieceInfo: &commonv1.PieceInfo{
PieceNum: 0, PieceNum: 0,
RangeStart: 0, RangeStart: 0,
RangeSize: uint32(opt.contentLength), RangeSize: uint32(opt.contentLength),
@ -246,19 +247,19 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio
}, },
}, nil }, nil
} }
return &scheduler.RegisterResult{ return &schedulerv1.RegisterResult{
TaskId: opt.taskID, TaskId: opt.taskID,
SizeScope: base.SizeScope_NORMAL, SizeScope: commonv1.SizeScope_NORMAL,
DirectPiece: nil, DirectPiece: nil,
}, nil }, nil
}) })
sched.EXPECT().ReportPieceResult(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( sched.EXPECT().ReportPieceResult(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, ptr *scheduler.PeerTaskRequest, opts ...grpc.CallOption) ( func(ctx context.Context, ptr *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) (
scheduler.Scheduler_ReportPieceResultClient, error) { schedulerv1.Scheduler_ReportPieceResultClient, error) {
return pps, nil return pps, nil
}) })
sched.EXPECT().ReportPeerResult(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( sched.EXPECT().ReportPeerResult(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, pr *scheduler.PeerResult, opts ...grpc.CallOption) error { func(ctx context.Context, pr *schedulerv1.PeerResult, opts ...grpc.CallOption) error {
return nil return nil
}) })
tempDir, _ := os.MkdirTemp("", "d7y-test-*") tempDir, _ := os.MkdirTemp("", "d7y-test-*")
@ -295,7 +296,7 @@ func setupMockManager(ctrl *gomock.Controller, ts *testSpec, opt componentsOptio
} }
ptm := &peerTaskManager{ ptm := &peerTaskManager{
calculateDigest: true, calculateDigest: true,
host: &scheduler.PeerHost{ host: &schedulerv1.PeerHost{
Ip: "127.0.0.1", Ip: "127.0.0.1",
}, },
conductorLock: &sync.Mutex{}, conductorLock: &sync.Mutex{},
@ -336,7 +337,7 @@ type testSpec struct {
httpRange *util.Range // only used in back source cases httpRange *util.Range // only used in back source cases
pieceParallelCount int32 pieceParallelCount int32
pieceSize int pieceSize int
sizeScope base.SizeScope sizeScope commonv1.SizeScope
peerID string peerID string
url string url string
legacyFeature bool legacyFeature bool
@ -386,7 +387,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) {
pieceSize: 1024, pieceSize: 1024,
peerID: "normal-size-peer", peerID: "normal-size-peer",
url: "http://localhost/test/data", url: "http://localhost/test/data",
sizeScope: base.SizeScope_NORMAL, sizeScope: commonv1.SizeScope_NORMAL,
mockPieceDownloader: commonPieceDownloader, mockPieceDownloader: commonPieceDownloader,
mockHTTPSourceClient: nil, mockHTTPSourceClient: nil,
}, },
@ -397,7 +398,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) {
pieceSize: 16384, pieceSize: 16384,
peerID: "small-size-peer", peerID: "small-size-peer",
url: "http://localhost/test/data", url: "http://localhost/test/data",
sizeScope: base.SizeScope_SMALL, sizeScope: commonv1.SizeScope_SMALL,
mockPieceDownloader: commonPieceDownloader, mockPieceDownloader: commonPieceDownloader,
mockHTTPSourceClient: nil, mockHTTPSourceClient: nil,
}, },
@ -408,7 +409,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) {
pieceSize: 1024, pieceSize: 1024,
peerID: "tiny-size-peer", peerID: "tiny-size-peer",
url: "http://localhost/test/data", url: "http://localhost/test/data",
sizeScope: base.SizeScope_TINY, sizeScope: commonv1.SizeScope_TINY,
mockPieceDownloader: nil, mockPieceDownloader: nil,
mockHTTPSourceClient: nil, mockHTTPSourceClient: nil,
}, },
@ -421,7 +422,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) {
peerID: "normal-size-peer-back-source", peerID: "normal-size-peer-back-source",
backSource: true, backSource: true,
url: "http://localhost/test/data", url: "http://localhost/test/data",
sizeScope: base.SizeScope_NORMAL, sizeScope: commonv1.SizeScope_NORMAL,
mockPieceDownloader: nil, mockPieceDownloader: nil,
mockHTTPSourceClient: func(t *testing.T, ctrl *gomock.Controller, rg *util.Range, taskData []byte, url string) source.ResourceClient { mockHTTPSourceClient: func(t *testing.T, ctrl *gomock.Controller, rg *util.Range, taskData []byte, url string) source.ResourceClient {
sourceClient := sourcemocks.NewMockResourceClient(ctrl) sourceClient := sourcemocks.NewMockResourceClient(ctrl)
@ -445,7 +446,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) {
peerID: "normal-size-peer-range-back-source", peerID: "normal-size-peer-range-back-source",
backSource: true, backSource: true,
url: "http://localhost/test/data", url: "http://localhost/test/data",
sizeScope: base.SizeScope_NORMAL, sizeScope: commonv1.SizeScope_NORMAL,
httpRange: &util.Range{ httpRange: &util.Range{
Start: 0, Start: 0,
Length: 4096, Length: 4096,
@ -487,7 +488,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) {
peerID: "normal-size-peer-back-source-no-length", peerID: "normal-size-peer-back-source-no-length",
backSource: true, backSource: true,
url: "http://localhost/test/data", url: "http://localhost/test/data",
sizeScope: base.SizeScope_NORMAL, sizeScope: commonv1.SizeScope_NORMAL,
mockPieceDownloader: nil, mockPieceDownloader: nil,
mockHTTPSourceClient: func(t *testing.T, ctrl *gomock.Controller, rg *util.Range, taskData []byte, url string) source.ResourceClient { mockHTTPSourceClient: func(t *testing.T, ctrl *gomock.Controller, rg *util.Range, taskData []byte, url string) source.ResourceClient {
sourceClient := sourcemocks.NewMockResourceClient(ctrl) sourceClient := sourcemocks.NewMockResourceClient(ctrl)
@ -511,7 +512,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) {
peerID: "normal-size-peer-back-source-aligning-no-length", peerID: "normal-size-peer-back-source-aligning-no-length",
backSource: true, backSource: true,
url: "http://localhost/test/data", url: "http://localhost/test/data",
sizeScope: base.SizeScope_NORMAL, sizeScope: commonv1.SizeScope_NORMAL,
mockPieceDownloader: nil, mockPieceDownloader: nil,
mockHTTPSourceClient: func(t *testing.T, ctrl *gomock.Controller, rg *util.Range, taskData []byte, url string) source.ResourceClient { mockHTTPSourceClient: func(t *testing.T, ctrl *gomock.Controller, rg *util.Range, taskData []byte, url string) source.ResourceClient {
sourceClient := sourcemocks.NewMockResourceClient(ctrl) sourceClient := sourcemocks.NewMockResourceClient(ctrl)
@ -546,7 +547,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) {
}) })
return server.URL return server.URL
}, },
sizeScope: base.SizeScope_NORMAL, sizeScope: commonv1.SizeScope_NORMAL,
mockPieceDownloader: nil, mockPieceDownloader: nil,
mockHTTPSourceClient: nil, mockHTTPSourceClient: nil,
}, },
@ -573,7 +574,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) {
defer ctrl.Finish() defer ctrl.Finish()
mockContentLength := len(tc.taskData) mockContentLength := len(tc.taskData)
urlMeta := &base.UrlMeta{ urlMeta := &commonv1.UrlMeta{
Tag: "d7y-test", Tag: "d7y-test",
} }
@ -636,7 +637,7 @@ func TestPeerTaskManager_TaskSuite(t *testing.T) {
} }
} }
func (ts *testSpec) run(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *base.UrlMeta) { func (ts *testSpec) run(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *commonv1.UrlMeta) {
switch ts.taskType { switch ts.taskType {
case taskTypeFile: case taskTypeFile:
ts.runFileTaskTest(assert, require, mm, urlMeta) ts.runFileTaskTest(assert, require, mm, urlMeta)
@ -651,7 +652,7 @@ func (ts *testSpec) run(assert *testifyassert.Assertions, require *testifyrequir
} }
} }
func (ts *testSpec) runFileTaskTest(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *base.UrlMeta) { func (ts *testSpec) runFileTaskTest(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *commonv1.UrlMeta) {
var output = "../test/testdata/test.output" var output = "../test/testdata/test.output"
defer func() { defer func() {
assert.Nil(os.Remove(output)) assert.Nil(os.Remove(output))
@ -659,11 +660,11 @@ func (ts *testSpec) runFileTaskTest(assert *testifyassert.Assertions, require *t
progress, _, err := mm.peerTaskManager.StartFileTask( progress, _, err := mm.peerTaskManager.StartFileTask(
context.Background(), context.Background(),
&FileTaskRequest{ &FileTaskRequest{
PeerTaskRequest: scheduler.PeerTaskRequest{ PeerTaskRequest: schedulerv1.PeerTaskRequest{
Url: ts.url, Url: ts.url,
UrlMeta: urlMeta, UrlMeta: urlMeta,
PeerId: ts.peerID, PeerId: ts.peerID,
PeerHost: &scheduler.PeerHost{}, PeerHost: &schedulerv1.PeerHost{},
}, },
Output: output, Output: output,
}) })
@ -685,7 +686,7 @@ func (ts *testSpec) runFileTaskTest(assert *testifyassert.Assertions, require *t
require.Equal(ts.taskData, outputBytes, "output and desired output must match") require.Equal(ts.taskData, outputBytes, "output and desired output must match")
} }
func (ts *testSpec) runStreamTaskTest(_ *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *base.UrlMeta) { func (ts *testSpec) runStreamTaskTest(_ *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *commonv1.UrlMeta) {
r, _, err := mm.peerTaskManager.StartStreamTask( r, _, err := mm.peerTaskManager.StartStreamTask(
context.Background(), context.Background(),
&StreamTaskRequest{ &StreamTaskRequest{
@ -700,15 +701,15 @@ func (ts *testSpec) runStreamTaskTest(_ *testifyassert.Assertions, require *test
require.Equal(ts.taskData, outputBytes, "output and desired output must match") require.Equal(ts.taskData, outputBytes, "output and desired output must match")
} }
func (ts *testSpec) runSeedTaskTest(_ *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *base.UrlMeta) { func (ts *testSpec) runSeedTaskTest(_ *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *commonv1.UrlMeta) {
r, _, err := mm.peerTaskManager.StartSeedTask( r, _, err := mm.peerTaskManager.StartSeedTask(
context.Background(), context.Background(),
&SeedTaskRequest{ &SeedTaskRequest{
PeerTaskRequest: scheduler.PeerTaskRequest{ PeerTaskRequest: schedulerv1.PeerTaskRequest{
Url: ts.url, Url: ts.url,
UrlMeta: urlMeta, UrlMeta: urlMeta,
PeerId: ts.peerID, PeerId: ts.peerID,
PeerHost: &scheduler.PeerHost{}, PeerHost: &schedulerv1.PeerHost{},
HostLoad: nil, HostLoad: nil,
IsMigrating: false, IsMigrating: false,
}, },
@ -746,7 +747,7 @@ loop:
require.True(success, "seed task should success") require.True(success, "seed task should success")
} }
func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *base.UrlMeta) { func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require *testifyrequire.Assertions, mm *mockManager, urlMeta *commonv1.UrlMeta) {
var ( var (
ptm = mm.peerTaskManager ptm = mm.peerTaskManager
pieceSize = ts.pieceSize pieceSize = ts.pieceSize
@ -757,11 +758,11 @@ func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require *
assert.Nil(os.Remove(output)) assert.Nil(os.Remove(output))
}() }()
peerTaskRequest := &scheduler.PeerTaskRequest{ peerTaskRequest := &schedulerv1.PeerTaskRequest{
Url: ts.url, Url: ts.url,
UrlMeta: urlMeta, UrlMeta: urlMeta,
PeerId: ts.peerID, PeerId: ts.peerID,
PeerHost: &scheduler.PeerHost{}, PeerHost: &schedulerv1.PeerHost{},
} }
ptc, created, err := ptm.getOrCreatePeerTaskConductor( ptc, created, err := ptm.getOrCreatePeerTaskConductor(
@ -803,11 +804,11 @@ func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require *
} }
for i := 0; i < ptcCount; i++ { for i := 0; i < ptcCount; i++ {
request := &scheduler.PeerTaskRequest{ request := &schedulerv1.PeerTaskRequest{
Url: ts.url, Url: ts.url,
UrlMeta: urlMeta, UrlMeta: urlMeta,
PeerId: fmt.Sprintf("should-not-use-peer-%d", i), PeerId: fmt.Sprintf("should-not-use-peer-%d", i),
PeerHost: &scheduler.PeerHost{}, PeerHost: &schedulerv1.PeerHost{},
} }
p, created, err := ptm.getOrCreatePeerTaskConductor( p, created, err := ptm.getOrCreatePeerTaskConductor(
context.Background(), taskID, request, rate.Limit(pieceSize*3), nil, nil, "", false) context.Background(), taskID, request, rate.Limit(pieceSize*3), nil, nil, "", false)
@ -820,9 +821,9 @@ func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require *
require.Nil(ptc.start(), "peerTaskConductor start should be ok") require.Nil(ptc.start(), "peerTaskConductor start should be ok")
switch ts.sizeScope { switch ts.sizeScope {
case base.SizeScope_TINY: case commonv1.SizeScope_TINY:
require.NotNil(ptc.tinyData) require.NotNil(ptc.tinyData)
case base.SizeScope_SMALL: case commonv1.SizeScope_SMALL:
require.NotNil(ptc.singlePiece) require.NotNil(ptc.singlePiece)
} }
@ -880,11 +881,11 @@ func (ts *testSpec) runConductorTest(assert *testifyassert.Assertions, require *
progress, ok := ptm.tryReuseFilePeerTask( progress, ok := ptm.tryReuseFilePeerTask(
context.Background(), context.Background(),
&FileTaskRequest{ &FileTaskRequest{
PeerTaskRequest: scheduler.PeerTaskRequest{ PeerTaskRequest: schedulerv1.PeerTaskRequest{
Url: ts.url, Url: ts.url,
UrlMeta: urlMeta, UrlMeta: urlMeta,
PeerId: ts.peerID, PeerId: ts.peerID,
PeerHost: &scheduler.PeerHost{}, PeerHost: &schedulerv1.PeerHost{},
}, },
Output: output, Output: output,
}) })

View File

@ -25,12 +25,13 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/internal/dferrors"
"d7y.io/dragonfly/v2/pkg/retry" "d7y.io/dragonfly/v2/pkg/retry"
"d7y.io/dragonfly/v2/pkg/rpc/base"
dfclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" dfclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
type pieceTaskPoller struct { type pieceTaskPoller struct {
@ -39,14 +40,14 @@ type pieceTaskPoller struct {
getPiecesMaxRetry int getPiecesMaxRetry int
} }
func (poller *pieceTaskPoller) preparePieceTasks(request *base.PieceTaskRequest) (pp *base.PiecePacket, err error) { func (poller *pieceTaskPoller) preparePieceTasks(request *commonv1.PieceTaskRequest) (pp *commonv1.PiecePacket, err error) {
ptc := poller.peerTaskConductor ptc := poller.peerTaskConductor
defer ptc.recoverFromPanic() defer ptc.recoverFromPanic()
var retryCount int var retryCount int
prepare: prepare:
retryCount++ retryCount++
poller.peerTaskConductor.Debugf("prepare piece tasks, retry count: %d", retryCount) poller.peerTaskConductor.Debugf("prepare piece tasks, retry count: %d", retryCount)
peerPacket := ptc.peerPacket.Load().(*scheduler.PeerPacket) peerPacket := ptc.peerPacket.Load().(*schedulerv1.PeerPacket)
if poller.peerTaskConductor.needBackSource.Load() { if poller.peerTaskConductor.needBackSource.Load() {
return nil, fmt.Errorf("need back source") return nil, fmt.Errorf("need back source")
@ -74,8 +75,8 @@ prepare:
} }
func (poller *pieceTaskPoller) preparePieceTasksByPeer( func (poller *pieceTaskPoller) preparePieceTasksByPeer(
curPeerPacket *scheduler.PeerPacket, curPeerPacket *schedulerv1.PeerPacket,
peer *scheduler.PeerPacket_DestPeer, request *base.PieceTaskRequest) (*base.PiecePacket, error) { peer *schedulerv1.PeerPacket_DestPeer, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
ptc := poller.peerTaskConductor ptc := poller.peerTaskConductor
if peer == nil { if peer == nil {
return nil, fmt.Errorf("empty peer") return nil, fmt.Errorf("empty peer")
@ -89,7 +90,7 @@ func (poller *pieceTaskPoller) preparePieceTasksByPeer(
defer span.End() defer span.End()
var maxRetries = 60 var maxRetries = 60
// when cdn returns base.Code_CDNTaskNotFound, report it to scheduler and wait cdn download it. // when cdn returns commonv1.Code_CDNTaskNotFound, report it to scheduler and wait cdn download it.
retry: retry:
ptc.Debugf("try get piece task from peer %s, piece num: %d, limit: %d\"", peer.PeerId, request.StartNum, request.Limit) ptc.Debugf("try get piece task from peer %s, piece num: %d, limit: %d\"", peer.PeerId, request.StartNum, request.Limit)
p, err := poller.getPieceTasksByPeer(span, curPeerPacket, peer, request) p, err := poller.getPieceTasksByPeer(span, curPeerPacket, peer, request)
@ -115,18 +116,18 @@ retry:
return nil, err return nil, err
} }
} }
code := base.Code_ClientPieceRequestFail code := commonv1.Code_ClientPieceRequestFail
// not grpc error // not grpc error
if de, ok := err.(*dferrors.DfError); ok && uint32(de.Code) > uint32(codes.Unauthenticated) { if de, ok := err.(*dferrors.DfError); ok && uint32(de.Code) > uint32(codes.Unauthenticated) {
ptc.Debugf("get piece task from peer %s with df error, code: %d", peer.PeerId, de.Code) ptc.Debugf("get piece task from peer %s with df error, code: %d", peer.PeerId, de.Code)
code = de.Code code = de.Code
} }
ptc.Errorf("get piece task from peer %s error: %s, code: %d", peer.PeerId, err, code) ptc.Errorf("get piece task from peer %s error: %s, code: %d", peer.PeerId, err, code)
sendError := ptc.sendPieceResult(&scheduler.PieceResult{ sendError := ptc.sendPieceResult(&schedulerv1.PieceResult{
TaskId: ptc.taskID, TaskId: ptc.taskID,
SrcPid: ptc.peerID, SrcPid: ptc.peerID,
DstPid: peer.PeerId, DstPid: peer.PeerId,
PieceInfo: &base.PieceInfo{}, PieceInfo: &commonv1.PieceInfo{},
Success: false, Success: false,
Code: code, Code: code,
HostLoad: nil, HostLoad: nil,
@ -134,14 +135,14 @@ retry:
}) })
// error code should be sent to scheduler and the scheduler can schedule a new peer // error code should be sent to scheduler and the scheduler can schedule a new peer
if sendError != nil { if sendError != nil {
ptc.cancel(base.Code_SchedError, sendError.Error()) ptc.cancel(commonv1.Code_SchedError, sendError.Error())
span.RecordError(sendError) span.RecordError(sendError)
ptc.Errorf("send piece result error: %s, code to send: %d", sendError, code) ptc.Errorf("send piece result error: %s, code to send: %d", sendError, code)
return nil, sendError return nil, sendError
} }
// currently, before cdn gc tasks, it did not notify scheduler, when cdn complains Code_CDNTaskNotFound, retry // currently, before cdn gc tasks, it did not notify scheduler, when cdn complains Code_CDNTaskNotFound, retry
if maxRetries > 0 && code == base.Code_CDNTaskNotFound && curPeerPacket == ptc.peerPacket.Load().(*scheduler.PeerPacket) { if maxRetries > 0 && code == commonv1.Code_CDNTaskNotFound && curPeerPacket == ptc.peerPacket.Load().(*schedulerv1.PeerPacket) {
span.AddEvent("retry for CdnTaskNotFound") span.AddEvent("retry for CdnTaskNotFound")
time.Sleep(time.Second) time.Sleep(time.Second)
maxRetries-- maxRetries--
@ -152,9 +153,9 @@ retry:
func (poller *pieceTaskPoller) getPieceTasksByPeer( func (poller *pieceTaskPoller) getPieceTasksByPeer(
span trace.Span, span trace.Span,
curPeerPacket *scheduler.PeerPacket, curPeerPacket *schedulerv1.PeerPacket,
peer *scheduler.PeerPacket_DestPeer, peer *schedulerv1.PeerPacket_DestPeer,
request *base.PieceTaskRequest) (*base.PiecePacket, error) { request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
var ( var (
peerPacketChanged bool peerPacketChanged bool
count int count int
@ -174,7 +175,7 @@ func (poller *pieceTaskPoller) getPieceTasksByPeer(
if de, ok := getError.(*dferrors.DfError); ok { if de, ok := getError.(*dferrors.DfError); ok {
ptc.Debugf("get piece task with grpc error, code: %d", de.Code) ptc.Debugf("get piece task with grpc error, code: %d", de.Code)
// bad request, like invalid piece num, just exit // bad request, like invalid piece num, just exit
if de.Code == base.Code_BadRequest { if de.Code == commonv1.Code_BadRequest {
span.AddEvent("bad request") span.AddEvent("bad request")
ptc.Warnf("get piece task from peer %s canceled: %s", peer.PeerId, getError) ptc.Warnf("get piece task from peer %s canceled: %s", peer.PeerId, getError)
return nil, true, getError return nil, true, getError
@ -182,7 +183,7 @@ func (poller *pieceTaskPoller) getPieceTasksByPeer(
} }
// fast way 2 to exit retry // fast way 2 to exit retry
lastPeerPacket := ptc.peerPacket.Load().(*scheduler.PeerPacket) lastPeerPacket := ptc.peerPacket.Load().(*schedulerv1.PeerPacket)
if curPeerPacket.CandidatePeers[0].PeerId != lastPeerPacket.CandidatePeers[0].PeerId { if curPeerPacket.CandidatePeers[0].PeerId != lastPeerPacket.CandidatePeers[0].PeerId {
ptc.Warnf("get piece tasks with error: %s, but peer packet changed, switch to new peer packet, current destPeer %s, new destPeer %s", getError, ptc.Warnf("get piece tasks with error: %s, but peer packet changed, switch to new peer packet, current destPeer %s, new destPeer %s", getError,
curPeerPacket.CandidatePeers[0].PeerId, lastPeerPacket.CandidatePeers[0].PeerId) curPeerPacket.CandidatePeers[0].PeerId, lastPeerPacket.CandidatePeers[0].PeerId)
@ -206,24 +207,24 @@ func (poller *pieceTaskPoller) getPieceTasksByPeer(
} }
// by santong: when peer return empty, retry later // by santong: when peer return empty, retry later
sendError := ptc.sendPieceResult(&scheduler.PieceResult{ sendError := ptc.sendPieceResult(&schedulerv1.PieceResult{
TaskId: ptc.taskID, TaskId: ptc.taskID,
SrcPid: ptc.peerID, SrcPid: ptc.peerID,
DstPid: peer.PeerId, DstPid: peer.PeerId,
PieceInfo: &base.PieceInfo{}, PieceInfo: &commonv1.PieceInfo{},
Success: false, Success: false,
Code: base.Code_ClientWaitPieceReady, Code: commonv1.Code_ClientWaitPieceReady,
HostLoad: nil, HostLoad: nil,
FinishedCount: ptc.readyPieces.Settled(), FinishedCount: ptc.readyPieces.Settled(),
}) })
if sendError != nil { if sendError != nil {
ptc.cancel(base.Code_ClientPieceRequestFail, sendError.Error()) ptc.cancel(commonv1.Code_ClientPieceRequestFail, sendError.Error())
span.RecordError(sendError) span.RecordError(sendError)
ptc.Errorf("send piece result with base.Code_ClientWaitPieceReady error: %s", sendError) ptc.Errorf("send piece result with commonv1.Code_ClientWaitPieceReady error: %s", sendError)
return nil, true, sendError return nil, true, sendError
} }
// fast way to exit retry // fast way to exit retry
lastPeerPacket := ptc.peerPacket.Load().(*scheduler.PeerPacket) lastPeerPacket := ptc.peerPacket.Load().(*schedulerv1.PeerPacket)
if curPeerPacket.CandidatePeers[0].PeerId != lastPeerPacket.CandidatePeers[0].PeerId { if curPeerPacket.CandidatePeers[0].PeerId != lastPeerPacket.CandidatePeers[0].PeerId {
ptc.Warnf("get empty pieces and peer packet changed, switch to new peer packet, current destPeer %s, new destPeer %s", ptc.Warnf("get empty pieces and peer packet changed, switch to new peer packet, current destPeer %s, new destPeer %s",
curPeerPacket.CandidatePeers[0].PeerId, lastPeerPacket.CandidatePeers[0].PeerId) curPeerPacket.CandidatePeers[0].PeerId, lastPeerPacket.CandidatePeers[0].PeerId)
@ -241,7 +242,7 @@ func (poller *pieceTaskPoller) getPieceTasksByPeer(
} }
if err == nil { if err == nil {
return p.(*base.PiecePacket), nil return p.(*commonv1.PiecePacket), nil
} }
return nil, err return nil, err
} }

View File

@ -29,12 +29,13 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
dfclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" dfclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
type pieceTaskSyncManager struct { type pieceTaskSyncManager struct {
@ -50,8 +51,8 @@ type pieceTaskSyncManager struct {
type pieceTaskSynchronizer struct { type pieceTaskSynchronizer struct {
*logger.SugaredLoggerOnWith *logger.SugaredLoggerOnWith
span trace.Span span trace.Span
client dfdaemon.Daemon_SyncPieceTasksClient client dfdaemonv1.Daemon_SyncPieceTasksClient
dstPeer *scheduler.PeerPacket_DestPeer dstPeer *schedulerv1.PeerPacket_DestPeer
error atomic.Value error atomic.Value
peerTaskConductor *peerTaskConductor peerTaskConductor *peerTaskConductor
pieceRequestCh chan *DownloadPieceRequest pieceRequestCh chan *DownloadPieceRequest
@ -59,7 +60,7 @@ type pieceTaskSynchronizer struct {
type synchronizerWatchdog struct { type synchronizerWatchdog struct {
done chan struct{} done chan struct{}
mainPeer atomic.Value // save *scheduler.PeerPacket_DestPeer mainPeer atomic.Value // save *schedulerv1.PeerPacket_DestPeer
syncSuccess *atomic.Bool syncSuccess *atomic.Bool
peerTaskConductor *peerTaskConductor peerTaskConductor *peerTaskConductor
} }
@ -69,7 +70,7 @@ type pieceTaskSynchronizerError struct {
} }
// FIXME for compatibility, sync will be called after the dfclient.GetPieceTasks deprecated and the pieceTaskPoller removed // FIXME for compatibility, sync will be called after the dfclient.GetPieceTasks deprecated and the pieceTaskPoller removed
func (s *pieceTaskSyncManager) sync(pp *scheduler.PeerPacket, desiredPiece int32) error { func (s *pieceTaskSyncManager) sync(pp *schedulerv1.PeerPacket, desiredPiece int32) error {
var ( var (
peers = map[string]bool{} peers = map[string]bool{}
errs []error errs []error
@ -116,7 +117,7 @@ func (s *pieceTaskSyncManager) sync(pp *scheduler.PeerPacket, desiredPiece int32
return nil return nil
} }
func (s *pieceTaskSyncManager) cleanStaleWorker(destPeers []*scheduler.PeerPacket_DestPeer) { func (s *pieceTaskSyncManager) cleanStaleWorker(destPeers []*schedulerv1.PeerPacket_DestPeer) {
var ( var (
peers = map[string]bool{} peers = map[string]bool{}
) )
@ -141,9 +142,9 @@ func (s *pieceTaskSyncManager) cleanStaleWorker(destPeers []*scheduler.PeerPacke
func (s *pieceTaskSyncManager) newPieceTaskSynchronizer( func (s *pieceTaskSyncManager) newPieceTaskSynchronizer(
ctx context.Context, ctx context.Context,
dstPeer *scheduler.PeerPacket_DestPeer, dstPeer *schedulerv1.PeerPacket_DestPeer,
desiredPiece int32) error { desiredPiece int32) error {
request := &base.PieceTaskRequest{ request := &commonv1.PieceTaskRequest{
TaskId: s.peerTaskConductor.taskID, TaskId: s.peerTaskConductor.taskID,
SrcPid: s.peerTaskConductor.peerID, SrcPid: s.peerTaskConductor.peerID,
DstPid: dstPeer.PeerId, DstPid: dstPeer.PeerId,
@ -197,8 +198,8 @@ func (s *pieceTaskSyncManager) newPieceTaskSynchronizer(
} }
func (s *pieceTaskSyncManager) newMultiPieceTaskSynchronizer( func (s *pieceTaskSyncManager) newMultiPieceTaskSynchronizer(
destPeers []*scheduler.PeerPacket_DestPeer, destPeers []*schedulerv1.PeerPacket_DestPeer,
desiredPiece int32) (legacyPeers []*scheduler.PeerPacket_DestPeer) { desiredPiece int32) (legacyPeers []*schedulerv1.PeerPacket_DestPeer) {
s.Lock() s.Lock()
defer func() { defer func() {
if s.peerTaskConductor.ptm.watchdogTimeout > 0 { if s.peerTaskConductor.ptm.watchdogTimeout > 0 {
@ -225,11 +226,11 @@ func (s *pieceTaskSyncManager) newMultiPieceTaskSynchronizer(
// other errors, report to scheduler // other errors, report to scheduler
if errors.Is(err, context.DeadlineExceeded) { if errors.Is(err, context.DeadlineExceeded) {
// connect timeout error, report to scheduler to get more available peers // connect timeout error, report to scheduler to get more available peers
s.reportInvalidPeer(peer, base.Code_ClientConnectionError) s.reportInvalidPeer(peer, commonv1.Code_ClientConnectionError)
s.peerTaskConductor.Infof("connect to peer %s with error: %s, peer is invalid, skip legacy grpc", peer.PeerId, err) s.peerTaskConductor.Infof("connect to peer %s with error: %s, peer is invalid, skip legacy grpc", peer.PeerId, err)
} else { } else {
// other errors, report to scheduler to get more available peers // other errors, report to scheduler to get more available peers
s.reportInvalidPeer(peer, base.Code_ClientPieceRequestFail) s.reportInvalidPeer(peer, commonv1.Code_ClientPieceRequestFail)
s.peerTaskConductor.Errorf("connect peer %s error: %s, not codes.Unimplemented", peer.PeerId, err) s.peerTaskConductor.Errorf("connect peer %s error: %s, not codes.Unimplemented", peer.PeerId, err)
} }
} }
@ -237,7 +238,7 @@ func (s *pieceTaskSyncManager) newMultiPieceTaskSynchronizer(
return legacyPeers return legacyPeers
} }
func (s *pieceTaskSyncManager) resetWatchdog(mainPeer *scheduler.PeerPacket_DestPeer) { func (s *pieceTaskSyncManager) resetWatchdog(mainPeer *schedulerv1.PeerPacket_DestPeer) {
if s.watchdog != nil { if s.watchdog != nil {
close(s.watchdog.done) close(s.watchdog.done)
s.peerTaskConductor.Debugf("close old watchdog") s.peerTaskConductor.Debugf("close old watchdog")
@ -253,12 +254,12 @@ func (s *pieceTaskSyncManager) resetWatchdog(mainPeer *scheduler.PeerPacket_Dest
go s.watchdog.watch(s.peerTaskConductor.ptm.watchdogTimeout) go s.watchdog.watch(s.peerTaskConductor.ptm.watchdogTimeout)
} }
func compositePieceResult(peerTaskConductor *peerTaskConductor, destPeer *scheduler.PeerPacket_DestPeer, code base.Code) *scheduler.PieceResult { func compositePieceResult(peerTaskConductor *peerTaskConductor, destPeer *schedulerv1.PeerPacket_DestPeer, code commonv1.Code) *schedulerv1.PieceResult {
return &scheduler.PieceResult{ return &schedulerv1.PieceResult{
TaskId: peerTaskConductor.taskID, TaskId: peerTaskConductor.taskID,
SrcPid: peerTaskConductor.peerID, SrcPid: peerTaskConductor.peerID,
DstPid: destPeer.PeerId, DstPid: destPeer.PeerId,
PieceInfo: &base.PieceInfo{}, PieceInfo: &commonv1.PieceInfo{},
Success: false, Success: false,
Code: code, Code: code,
HostLoad: nil, HostLoad: nil,
@ -266,18 +267,18 @@ func compositePieceResult(peerTaskConductor *peerTaskConductor, destPeer *schedu
} }
} }
func (s *pieceTaskSyncManager) reportInvalidPeer(destPeer *scheduler.PeerPacket_DestPeer, code base.Code) { func (s *pieceTaskSyncManager) reportInvalidPeer(destPeer *schedulerv1.PeerPacket_DestPeer, code commonv1.Code) {
sendError := s.peerTaskConductor.sendPieceResult(compositePieceResult(s.peerTaskConductor, destPeer, code)) sendError := s.peerTaskConductor.sendPieceResult(compositePieceResult(s.peerTaskConductor, destPeer, code))
if sendError != nil { if sendError != nil {
s.peerTaskConductor.Errorf("connect peer %s failed and send piece result with error: %s", destPeer.PeerId, sendError) s.peerTaskConductor.Errorf("connect peer %s failed and send piece result with error: %s", destPeer.PeerId, sendError)
go s.peerTaskConductor.cancel(base.Code_SchedError, sendError.Error()) go s.peerTaskConductor.cancel(commonv1.Code_SchedError, sendError.Error())
} else { } else {
s.peerTaskConductor.Debugf("report invalid peer %s/%d to scheduler", destPeer.PeerId, code) s.peerTaskConductor.Debugf("report invalid peer %s/%d to scheduler", destPeer.PeerId, code)
} }
} }
// acquire send the target piece to other peers // acquire send the target piece to other peers
func (s *pieceTaskSyncManager) acquire(request *base.PieceTaskRequest) (attempt int, success int) { func (s *pieceTaskSyncManager) acquire(request *commonv1.PieceTaskRequest) (attempt int, success int) {
s.RLock() s.RLock()
for _, p := range s.workers { for _, p := range s.workers {
attempt++ attempt++
@ -308,7 +309,7 @@ func (s *pieceTaskSynchronizer) close() {
s.span.End() s.span.End()
} }
func (s *pieceTaskSynchronizer) dispatchPieceRequest(piecePacket *base.PiecePacket) { func (s *pieceTaskSynchronizer) dispatchPieceRequest(piecePacket *commonv1.PiecePacket) {
s.peerTaskConductor.updateMetadata(piecePacket) s.peerTaskConductor.updateMetadata(piecePacket)
pieceCount := len(piecePacket.PieceInfos) pieceCount := len(piecePacket.PieceInfos)
@ -350,7 +351,7 @@ func (s *pieceTaskSynchronizer) dispatchPieceRequest(piecePacket *base.PiecePack
} }
} }
func (s *pieceTaskSynchronizer) receive(piecePacket *base.PiecePacket) { func (s *pieceTaskSynchronizer) receive(piecePacket *commonv1.PiecePacket) {
var err error var err error
for { for {
s.dispatchPieceRequest(piecePacket) s.dispatchPieceRequest(piecePacket)
@ -373,7 +374,7 @@ func (s *pieceTaskSynchronizer) receive(piecePacket *base.PiecePacket) {
} }
} }
func (s *pieceTaskSynchronizer) acquire(request *base.PieceTaskRequest) error { func (s *pieceTaskSynchronizer) acquire(request *commonv1.PieceTaskRequest) error {
if s.error.Load() != nil { if s.error.Load() != nil {
err := s.error.Load().(*pieceTaskSynchronizerError).err err := s.error.Load().(*pieceTaskSynchronizerError).err
s.Debugf("synchronizer already error %s, skip acquire more pieces", err) s.Debugf("synchronizer already error %s, skip acquire more pieces", err)
@ -393,10 +394,10 @@ func (s *pieceTaskSynchronizer) acquire(request *base.PieceTaskRequest) error {
func (s *pieceTaskSynchronizer) reportError(err error) { func (s *pieceTaskSynchronizer) reportError(err error) {
s.span.RecordError(err) s.span.RecordError(err)
sendError := s.peerTaskConductor.sendPieceResult(compositePieceResult(s.peerTaskConductor, s.dstPeer, base.Code_ClientPieceRequestFail)) sendError := s.peerTaskConductor.sendPieceResult(compositePieceResult(s.peerTaskConductor, s.dstPeer, commonv1.Code_ClientPieceRequestFail))
if sendError != nil { if sendError != nil {
s.Errorf("sync piece info failed and send piece result with error: %s", sendError) s.Errorf("sync piece info failed and send piece result with error: %s", sendError)
go s.peerTaskConductor.cancel(base.Code_SchedError, sendError.Error()) go s.peerTaskConductor.cancel(commonv1.Code_SchedError, sendError.Error())
} else { } else {
s.Debugf("report sync piece error to scheduler") s.Debugf("report sync piece error to scheduler")
} }
@ -438,10 +439,10 @@ func (s *synchronizerWatchdog) watch(timeout time.Duration) {
func (s *synchronizerWatchdog) reportWatchFailed() { func (s *synchronizerWatchdog) reportWatchFailed() {
sendError := s.peerTaskConductor.sendPieceResult(compositePieceResult( sendError := s.peerTaskConductor.sendPieceResult(compositePieceResult(
s.peerTaskConductor, s.mainPeer.Load().(*scheduler.PeerPacket_DestPeer), base.Code_ClientPieceRequestFail)) s.peerTaskConductor, s.mainPeer.Load().(*schedulerv1.PeerPacket_DestPeer), commonv1.Code_ClientPieceRequestFail))
if sendError != nil { if sendError != nil {
s.peerTaskConductor.Errorf("watchdog sync piece info failed and send piece result with error: %s", sendError) s.peerTaskConductor.Errorf("watchdog sync piece info failed and send piece result with error: %s", sendError)
go s.peerTaskConductor.cancel(base.Code_SchedError, sendError.Error()) go s.peerTaskConductor.cancel(commonv1.Code_SchedError, sendError.Error())
} else { } else {
s.peerTaskConductor.Debugf("report watchdog sync piece error to scheduler") s.peerTaskConductor.Debugf("report watchdog sync piece error to scheduler")
} }

View File

@ -25,9 +25,10 @@ import (
testifyassert "github.com/stretchr/testify/assert" testifyassert "github.com/stretchr/testify/assert"
"go.uber.org/atomic" "go.uber.org/atomic"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/api/pkg/apis/scheduler/v1/mocks"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks"
) )
func Test_watchdog(t *testing.T) { func Test_watchdog(t *testing.T) {
@ -53,7 +54,7 @@ func Test_watchdog(t *testing.T) {
for _, tt := range testCases { for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
peer := &scheduler.PeerPacket_DestPeer{} peer := &schedulerv1.PeerPacket_DestPeer{}
pps := mocks.NewMockScheduler_ReportPieceResultClient(ctrl) pps := mocks.NewMockScheduler_ReportPieceResultClient(ctrl)
watchdog := &synchronizerWatchdog{ watchdog := &synchronizerWatchdog{
done: make(chan struct{}), done: make(chan struct{}),
@ -71,7 +72,7 @@ func Test_watchdog(t *testing.T) {
if tt.ok { if tt.ok {
watchdog.peerTaskConductor.readyPieces.Set(0) watchdog.peerTaskConductor.readyPieces.Set(0)
} else { } else {
pps.EXPECT().Send(gomock.Any()).DoAndReturn(func(pr *scheduler.PieceResult) error { pps.EXPECT().Send(gomock.Any()).DoAndReturn(func(pr *schedulerv1.PieceResult) error {
assert.Equal(peer.PeerId, pr.DstPid) assert.Equal(peer.PeerId, pr.DstPid)
return nil return nil
}) })

View File

@ -27,12 +27,13 @@ import (
semconv "go.opentelemetry.io/otel/semconv/v1.7.0" semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/rpc/base"
) )
var _ *logger.SugaredLoggerOnWith // pin this package for no log code generation var _ *logger.SugaredLoggerOnWith // pin this package for no log code generation
@ -141,7 +142,7 @@ func (ptm *peerTaskManager) tryReuseFilePeerTask(ctx context.Context,
pg := &FileTaskProgress{ pg := &FileTaskProgress{
State: &ProgressState{ State: &ProgressState{
Success: true, Success: true,
Code: base.Code_Success, Code: commonv1.Code_Success,
Msg: "Success", Msg: "Success",
}, },
TaskID: taskID, TaskID: taskID,

View File

@ -29,12 +29,13 @@ import (
"github.com/golang/mock/gomock" "github.com/golang/mock/gomock"
testifyassert "github.com/stretchr/testify/assert" testifyassert "github.com/stretchr/testify/assert"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/daemon/storage/mocks" "d7y.io/dragonfly/v2/client/daemon/storage/mocks"
"d7y.io/dragonfly/v2/client/daemon/test" "d7y.io/dragonfly/v2/client/daemon/test"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
func TestReuseFilePeerTask(t *testing.T) { func TestReuseFilePeerTask(t *testing.T) {
@ -56,10 +57,10 @@ func TestReuseFilePeerTask(t *testing.T) {
{ {
name: "normal completed task found", name: "normal completed task found",
request: &FileTaskRequest{ request: &FileTaskRequest{
PeerTaskRequest: scheduler.PeerTaskRequest{ PeerTaskRequest: schedulerv1.PeerTaskRequest{
PeerId: "", PeerId: "",
Url: "http://example.com/1", Url: "http://example.com/1",
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -100,10 +101,10 @@ func TestReuseFilePeerTask(t *testing.T) {
{ {
name: "normal completed task not found", name: "normal completed task not found",
request: &FileTaskRequest{ request: &FileTaskRequest{
PeerTaskRequest: scheduler.PeerTaskRequest{ PeerTaskRequest: schedulerv1.PeerTaskRequest{
PeerId: "", PeerId: "",
Url: "http://example.com/1", Url: "http://example.com/1",
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -137,10 +138,10 @@ func TestReuseFilePeerTask(t *testing.T) {
{ {
name: "normal completed subtask found", name: "normal completed subtask found",
request: &FileTaskRequest{ request: &FileTaskRequest{
PeerTaskRequest: scheduler.PeerTaskRequest{ PeerTaskRequest: schedulerv1.PeerTaskRequest{
PeerId: "", PeerId: "",
Url: "http://example.com/1", Url: "http://example.com/1",
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -181,10 +182,10 @@ func TestReuseFilePeerTask(t *testing.T) {
{ {
name: "normal completed subtask not found", name: "normal completed subtask not found",
request: &FileTaskRequest{ request: &FileTaskRequest{
PeerTaskRequest: scheduler.PeerTaskRequest{ PeerTaskRequest: schedulerv1.PeerTaskRequest{
PeerId: "", PeerId: "",
Url: "http://example.com/1", Url: "http://example.com/1",
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -214,10 +215,10 @@ func TestReuseFilePeerTask(t *testing.T) {
{ {
name: "partial task found", name: "partial task found",
request: &FileTaskRequest{ request: &FileTaskRequest{
PeerTaskRequest: scheduler.PeerTaskRequest{ PeerTaskRequest: schedulerv1.PeerTaskRequest{
PeerId: "", PeerId: "",
Url: "http://example.com/1", Url: "http://example.com/1",
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -263,10 +264,10 @@ func TestReuseFilePeerTask(t *testing.T) {
{ {
name: "partial task found - out of range", name: "partial task found - out of range",
request: &FileTaskRequest{ request: &FileTaskRequest{
PeerTaskRequest: scheduler.PeerTaskRequest{ PeerTaskRequest: schedulerv1.PeerTaskRequest{
PeerId: "", PeerId: "",
Url: "http://example.com/1", Url: "http://example.com/1",
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -317,7 +318,7 @@ func TestReuseFilePeerTask(t *testing.T) {
sm := mocks.NewMockManager(ctrl) sm := mocks.NewMockManager(ctrl)
tc.storageManager(sm) tc.storageManager(sm)
ptm := &peerTaskManager{ ptm := &peerTaskManager{
host: &scheduler.PeerHost{}, host: &schedulerv1.PeerHost{},
enablePrefetch: tc.enablePrefetch, enablePrefetch: tc.enablePrefetch,
storageManager: sm, storageManager: sm,
} }
@ -344,7 +345,7 @@ func TestReuseStreamPeerTask(t *testing.T) {
name: "normal completed task found", name: "normal completed task found",
request: &StreamTaskRequest{ request: &StreamTaskRequest{
URL: "http://example.com/1", URL: "http://example.com/1",
URLMeta: &base.UrlMeta{ URLMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -376,8 +377,8 @@ func TestReuseStreamPeerTask(t *testing.T) {
}) })
sm.EXPECT().GetExtendAttribute(gomock.Any(), sm.EXPECT().GetExtendAttribute(gomock.Any(),
gomock.Any()).AnyTimes().DoAndReturn( gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) {
return &base.ExtendAttribute{ return &commonv1.ExtendAttribute{
Header: map[string]string{ Header: map[string]string{
"Test": "test", "Test": "test",
}, },
@ -397,7 +398,7 @@ func TestReuseStreamPeerTask(t *testing.T) {
name: "normal completed task not found", name: "normal completed task not found",
request: &StreamTaskRequest{ request: &StreamTaskRequest{
URL: "http://example.com/1", URL: "http://example.com/1",
URLMeta: &base.UrlMeta{ URLMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -432,7 +433,7 @@ func TestReuseStreamPeerTask(t *testing.T) {
name: "normal completed subtask found", name: "normal completed subtask found",
request: &StreamTaskRequest{ request: &StreamTaskRequest{
URL: "http://example.com/1", URL: "http://example.com/1",
URLMeta: &base.UrlMeta{ URLMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -464,8 +465,8 @@ func TestReuseStreamPeerTask(t *testing.T) {
}) })
sm.EXPECT().GetExtendAttribute(gomock.Any(), sm.EXPECT().GetExtendAttribute(gomock.Any(),
gomock.Any()).AnyTimes().DoAndReturn( gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) {
return &base.ExtendAttribute{ return &commonv1.ExtendAttribute{
Header: map[string]string{ Header: map[string]string{
"Test": "test", "Test": "test",
}, },
@ -484,7 +485,7 @@ func TestReuseStreamPeerTask(t *testing.T) {
name: "normal completed subtask not found", name: "normal completed subtask not found",
request: &StreamTaskRequest{ request: &StreamTaskRequest{
URL: "http://example.com/1", URL: "http://example.com/1",
URLMeta: &base.UrlMeta{ URLMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -515,7 +516,7 @@ func TestReuseStreamPeerTask(t *testing.T) {
name: "partial task found", name: "partial task found",
request: &StreamTaskRequest{ request: &StreamTaskRequest{
URL: "http://example.com/1", URL: "http://example.com/1",
URLMeta: &base.UrlMeta{ URLMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -551,8 +552,8 @@ func TestReuseStreamPeerTask(t *testing.T) {
}) })
sm.EXPECT().GetExtendAttribute(gomock.Any(), sm.EXPECT().GetExtendAttribute(gomock.Any(),
gomock.Any()).AnyTimes().DoAndReturn( gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) {
return &base.ExtendAttribute{ return &commonv1.ExtendAttribute{
Header: map[string]string{ Header: map[string]string{
"Test": "test", "Test": "test",
}, },
@ -571,7 +572,7 @@ func TestReuseStreamPeerTask(t *testing.T) {
name: "partial task found - 2", name: "partial task found - 2",
request: &StreamTaskRequest{ request: &StreamTaskRequest{
URL: "http://example.com/1", URL: "http://example.com/1",
URLMeta: &base.UrlMeta{ URLMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -607,8 +608,8 @@ func TestReuseStreamPeerTask(t *testing.T) {
}) })
sm.EXPECT().GetExtendAttribute(gomock.Any(), sm.EXPECT().GetExtendAttribute(gomock.Any(),
gomock.Any()).AnyTimes().DoAndReturn( gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) {
return &base.ExtendAttribute{ return &commonv1.ExtendAttribute{
Header: map[string]string{ Header: map[string]string{
"Test": "test", "Test": "test",
}, },
@ -631,7 +632,7 @@ func TestReuseStreamPeerTask(t *testing.T) {
name: "partial task found - out of range", name: "partial task found - out of range",
request: &StreamTaskRequest{ request: &StreamTaskRequest{
URL: "http://example.com/1", URL: "http://example.com/1",
URLMeta: &base.UrlMeta{ URLMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Tag: "", Tag: "",
Range: "", Range: "",
@ -667,8 +668,8 @@ func TestReuseStreamPeerTask(t *testing.T) {
}) })
sm.EXPECT().GetExtendAttribute(gomock.Any(), sm.EXPECT().GetExtendAttribute(gomock.Any(),
gomock.Any()).AnyTimes().DoAndReturn( gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) {
return &base.ExtendAttribute{ return &commonv1.ExtendAttribute{
Header: map[string]string{ Header: map[string]string{
"Test": "test", "Test": "test",
}, },
@ -694,7 +695,7 @@ func TestReuseStreamPeerTask(t *testing.T) {
sm := mocks.NewMockManager(ctrl) sm := mocks.NewMockManager(ctrl)
tc.storageManager(sm) tc.storageManager(sm)
ptm := &peerTaskManager{ ptm := &peerTaskManager{
host: &scheduler.PeerHost{}, host: &schedulerv1.PeerHost{},
enablePrefetch: tc.enablePrefetch, enablePrefetch: tc.enablePrefetch,
storageManager: sm, storageManager: sm,
} }

View File

@ -22,14 +22,15 @@ import (
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"golang.org/x/time/rate" "golang.org/x/time/rate"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
type SeedTaskRequest struct { type SeedTaskRequest struct {
scheduler.PeerTaskRequest schedulerv1.PeerTaskRequest
Limit float64 Limit float64
Callsystem string Callsystem string
Range *util.Range Range *util.Range

View File

@ -25,27 +25,28 @@ import (
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"golang.org/x/time/rate" "golang.org/x/time/rate"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/metrics"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
type StreamTaskRequest struct { type StreamTaskRequest struct {
// universal resource locator for different kind of storage // universal resource locator for different kind of storage
URL string URL string
// url meta info // url meta info
URLMeta *base.UrlMeta URLMeta *commonv1.UrlMeta
// http range // http range
Range *util.Range Range *util.Range
// peer's id and must be global uniqueness // peer's id and must be global uniqueness
PeerID string PeerID string
// Pattern to register to scheduler // Pattern to register to scheduler
Pattern base.Pattern Pattern commonv1.Pattern
} }
// StreamTask represents a peer task with stream io for reading directly without once more disk io // StreamTask represents a peer task with stream io for reading directly without once more disk io
@ -66,7 +67,7 @@ type streamTask struct {
func (ptm *peerTaskManager) newStreamTask( func (ptm *peerTaskManager) newStreamTask(
ctx context.Context, ctx context.Context,
request *scheduler.PeerTaskRequest, request *schedulerv1.PeerTaskRequest,
rg *util.Range) (*streamTask, error) { rg *util.Range) (*streamTask, error) {
metrics.StreamTaskCount.Add(1) metrics.StreamTaskCount.Add(1)
var limit = rate.Inf var limit = rate.Inf

View File

@ -38,6 +38,11 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
schedulerv1mocks "d7y.io/api/pkg/apis/scheduler/v1/mocks"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/daemon/test" "d7y.io/dragonfly/v2/client/daemon/test"
@ -46,14 +51,10 @@ import (
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
"d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/rpc" "d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
daemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" daemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server"
servermocks "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server/mocks" servermocks "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server/mocks"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client"
mock_scheduler_client "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client/mocks" clientmocks "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client/mocks"
mock_scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler/mocks"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol" "d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol"
sourcemocks "d7y.io/dragonfly/v2/pkg/source/mocks" sourcemocks "d7y.io/dragonfly/v2/pkg/source/mocks"
@ -74,12 +75,12 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte,
piecesMd5 = append(piecesMd5, digest.MD5FromBytes(testBytes[int(i)*int(opt.pieceSize):int(i+1)*int(opt.pieceSize)])) piecesMd5 = append(piecesMd5, digest.MD5FromBytes(testBytes[int(i)*int(opt.pieceSize):int(i+1)*int(opt.pieceSize)]))
} }
} }
daemon.EXPECT().GetPieceTasks(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { daemon.EXPECT().GetPieceTasks(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
var tasks []*base.PieceInfo var tasks []*commonv1.PieceInfo
// only return first piece // only return first piece
if request.StartNum == 0 { if request.StartNum == 0 {
tasks = append(tasks, tasks = append(tasks,
&base.PieceInfo{ &commonv1.PieceInfo{
PieceNum: int32(request.StartNum), PieceNum: int32(request.StartNum),
RangeStart: uint64(0), RangeStart: uint64(0),
RangeSize: opt.pieceSize, RangeSize: opt.pieceSize,
@ -88,7 +89,7 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte,
PieceStyle: 0, PieceStyle: 0,
}) })
} }
return &base.PiecePacket{ return &commonv1.PiecePacket{
PieceMd5Sign: digest.SHA256FromStrings(piecesMd5...), PieceMd5Sign: digest.SHA256FromStrings(piecesMd5...),
TaskId: request.TaskId, TaskId: request.TaskId,
DstPid: "peer-x", DstPid: "peer-x",
@ -97,7 +98,7 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte,
TotalPiece: pieceCount, TotalPiece: pieceCount,
}, nil }, nil
}) })
daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(arg0 dfdaemon.Daemon_SyncPieceTasksServer) error { daemon.EXPECT().SyncPieceTasks(gomock.Any()).AnyTimes().DoAndReturn(func(arg0 dfdaemonv1.Daemon_SyncPieceTasksServer) error {
return status.Error(codes.Unimplemented, "TODO") return status.Error(codes.Unimplemented, "TODO")
}) })
ln, _ := rpc.Listen(dfnet.NetAddr{ ln, _ := rpc.Listen(dfnet.NetAddr{
@ -112,7 +113,7 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte,
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
// 2. setup a scheduler // 2. setup a scheduler
pps := mock_scheduler.NewMockScheduler_ReportPieceResultClient(ctrl) pps := schedulerv1mocks.NewMockScheduler_ReportPieceResultClient(ctrl)
var ( var (
wg = sync.WaitGroup{} wg = sync.WaitGroup{}
backSourceSent = atomic.Bool{} backSourceSent = atomic.Bool{}
@ -120,7 +121,7 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte,
wg.Add(1) wg.Add(1)
pps.EXPECT().Send(gomock.Any()).AnyTimes().DoAndReturn( pps.EXPECT().Send(gomock.Any()).AnyTimes().DoAndReturn(
func(pr *scheduler.PieceResult) error { func(pr *schedulerv1.PieceResult) error {
if pr.PieceInfo.PieceNum == 0 && pr.Success { if pr.PieceInfo.PieceNum == 0 && pr.Success {
if !backSourceSent.Load() { if !backSourceSent.Load() {
wg.Done() wg.Done()
@ -134,7 +135,7 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte,
schedPeerPacket bool schedPeerPacket bool
) )
pps.EXPECT().Recv().AnyTimes().DoAndReturn( pps.EXPECT().Recv().AnyTimes().DoAndReturn(
func() (*scheduler.PeerPacket, error) { func() (*schedulerv1.PeerPacket, error) {
if len(opt.peerPacketDelay) > delayCount { if len(opt.peerPacketDelay) > delayCount {
if delay := opt.peerPacketDelay[delayCount]; delay > 0 { if delay := opt.peerPacketDelay[delayCount]; delay > 0 {
time.Sleep(delay) time.Sleep(delay)
@ -144,15 +145,15 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte,
if schedPeerPacket { if schedPeerPacket {
// send back source after piece 0 is done // send back source after piece 0 is done
wg.Wait() wg.Wait()
return nil, dferrors.New(base.Code_SchedNeedBackSource, "") return nil, dferrors.New(commonv1.Code_SchedNeedBackSource, "")
} }
schedPeerPacket = true schedPeerPacket = true
return &scheduler.PeerPacket{ return &schedulerv1.PeerPacket{
Code: base.Code_Success, Code: commonv1.Code_Success,
TaskId: opt.taskID, TaskId: opt.taskID,
SrcPid: "127.0.0.1", SrcPid: "127.0.0.1",
ParallelCount: opt.pieceParallelCount, ParallelCount: opt.pieceParallelCount,
MainPeer: &scheduler.PeerPacket_DestPeer{ MainPeer: &schedulerv1.PeerPacket_DestPeer{
Ip: "127.0.0.1", Ip: "127.0.0.1",
RpcPort: port, RpcPort: port,
PeerId: "peer-x", PeerId: "peer-x",
@ -161,21 +162,21 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte,
}, nil }, nil
}) })
pps.EXPECT().CloseSend().AnyTimes() pps.EXPECT().CloseSend().AnyTimes()
sched := mock_scheduler_client.NewMockClient(ctrl) sched := clientmocks.NewMockClient(ctrl)
sched.EXPECT().RegisterPeerTask(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( sched.EXPECT().RegisterPeerTask(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, ptr *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (*scheduler.RegisterResult, error) { func(ctx context.Context, ptr *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) (*schedulerv1.RegisterResult, error) {
return &scheduler.RegisterResult{ return &schedulerv1.RegisterResult{
TaskId: opt.taskID, TaskId: opt.taskID,
SizeScope: base.SizeScope_NORMAL, SizeScope: commonv1.SizeScope_NORMAL,
DirectPiece: nil, DirectPiece: nil,
}, nil }, nil
}) })
sched.EXPECT().ReportPieceResult(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( sched.EXPECT().ReportPieceResult(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, ptr *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) { func(ctx context.Context, ptr *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) (schedulerv1.Scheduler_ReportPieceResultClient, error) {
return pps, nil return pps, nil
}) })
sched.EXPECT().ReportPeerResult(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn( sched.EXPECT().ReportPeerResult(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, pr *scheduler.PeerResult, opts ...grpc.CallOption) error { func(ctx context.Context, pr *schedulerv1.PeerResult, opts ...grpc.CallOption) error {
return nil return nil
}) })
tempDir, _ := os.MkdirTemp("", "d7y-test-*") tempDir, _ := os.MkdirTemp("", "d7y-test-*")
@ -250,7 +251,7 @@ func TestStreamPeerTask_BackSource_Partial_WithContentLength(t *testing.T) {
} }
ptm := &peerTaskManager{ ptm := &peerTaskManager{
calculateDigest: true, calculateDigest: true,
host: &scheduler.PeerHost{ host: &schedulerv1.PeerHost{
Ip: "127.0.0.1", Ip: "127.0.0.1",
}, },
conductorLock: &sync.Mutex{}, conductorLock: &sync.Mutex{},
@ -262,13 +263,13 @@ func TestStreamPeerTask_BackSource_Partial_WithContentLength(t *testing.T) {
ScheduleTimeout: util.Duration{Duration: 10 * time.Minute}, ScheduleTimeout: util.Duration{Duration: 10 * time.Minute},
}, },
} }
req := &scheduler.PeerTaskRequest{ req := &schedulerv1.PeerTaskRequest{
Url: url, Url: url,
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Tag: "d7y-test", Tag: "d7y-test",
}, },
PeerId: peerID, PeerId: peerID,
PeerHost: &scheduler.PeerHost{}, PeerHost: &schedulerv1.PeerHost{},
} }
ctx := context.Background() ctx := context.Background()
pt, err := ptm.newStreamTask(ctx, req, nil) pt, err := ptm.newStreamTask(ctx, req, nil)

View File

@ -29,15 +29,16 @@ import (
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
) )
type DownloadPieceRequest struct { type DownloadPieceRequest struct {
piece *base.PieceInfo piece *commonv1.PieceInfo
log *logger.SugaredLoggerOnWith log *logger.SugaredLoggerOnWith
storage storage.TaskStorageDriver storage storage.TaskStorageDriver
TaskID string TaskID string

View File

@ -34,10 +34,11 @@ import (
testifyassert "github.com/stretchr/testify/assert" testifyassert "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/client/daemon/test" "d7y.io/dragonfly/v2/client/daemon/test"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol" "d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol"
) )
@ -138,13 +139,13 @@ func TestPieceDownloader_DownloadPiece(t *testing.T) {
DstPid: "", DstPid: "",
DstAddr: addr.Host, DstAddr: addr.Host,
CalcDigest: true, CalcDigest: true,
piece: &base.PieceInfo{ piece: &commonv1.PieceInfo{
PieceNum: 0, PieceNum: 0,
RangeStart: tt.rangeStart, RangeStart: tt.rangeStart,
RangeSize: tt.rangeSize, RangeSize: tt.rangeSize,
PieceMd5: digest, PieceMd5: digest,
PieceOffset: tt.rangeStart, PieceOffset: tt.rangeStart,
PieceStyle: base.PieceStyle_PLAIN, PieceStyle: commonv1.PieceStyle_PLAIN,
}, },
log: logger.With("test", "test"), log: logger.With("test", "test"),
}) })

View File

@ -34,6 +34,11 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
errordetailsv1 "d7y.io/api/pkg/apis/errordetails/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
clientutil "d7y.io/dragonfly/v2/client/util" clientutil "d7y.io/dragonfly/v2/client/util"
@ -41,17 +46,13 @@ import (
"d7y.io/dragonfly/v2/internal/util" "d7y.io/dragonfly/v2/internal/util"
"d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/retry" "d7y.io/dragonfly/v2/pkg/retry"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
"d7y.io/dragonfly/v2/pkg/rpc/errordetails"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
) )
type PieceManager interface { type PieceManager interface {
DownloadSource(ctx context.Context, pt Task, request *scheduler.PeerTaskRequest, parsedRange *clientutil.Range) error DownloadSource(ctx context.Context, pt Task, request *schedulerv1.PeerTaskRequest, parsedRange *clientutil.Range) error
DownloadPiece(ctx context.Context, request *DownloadPieceRequest) (*DownloadPieceResult, error) DownloadPiece(ctx context.Context, request *DownloadPieceRequest) (*DownloadPieceResult, error)
ImportFile(ctx context.Context, ptm storage.PeerTaskMetadata, tsd storage.TaskStorageDriver, req *dfdaemon.ImportTaskRequest) error ImportFile(ctx context.Context, ptm storage.PeerTaskMetadata, tsd storage.TaskStorageDriver, req *dfdaemonv1.ImportTaskRequest) error
Import(ctx context.Context, ptm storage.PeerTaskMetadata, tsd storage.TaskStorageDriver, contentLength int64, reader io.Reader) error Import(ctx context.Context, ptm storage.PeerTaskMetadata, tsd storage.TaskStorageDriver, contentLength int64, reader io.Reader) error
} }
@ -271,9 +272,9 @@ func (pm *pieceManager) processPieceFromSource(pt Task,
return return
} }
func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, peerTaskRequest *scheduler.PeerTaskRequest, parsedRange *clientutil.Range) error { func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, peerTaskRequest *schedulerv1.PeerTaskRequest, parsedRange *clientutil.Range) error {
if peerTaskRequest.UrlMeta == nil { if peerTaskRequest.UrlMeta == nil {
peerTaskRequest.UrlMeta = &base.UrlMeta{ peerTaskRequest.UrlMeta = &commonv1.UrlMeta{
Header: map[string]string{}, Header: map[string]string{},
} }
} else if peerTaskRequest.UrlMeta.Header == nil { } else if peerTaskRequest.UrlMeta.Header == nil {
@ -362,9 +363,9 @@ singleDownload:
hdr[k] = response.Header.Get(k) hdr[k] = response.Header.Get(k)
} }
} }
srcErr := &errordetails.SourceError{ srcErr := &errordetailsv1.SourceError{
Temporary: response.Temporary != nil && response.Temporary(), Temporary: response.Temporary != nil && response.Temporary(),
Metadata: &base.ExtendAttribute{ Metadata: &commonv1.ExtendAttribute{
Header: hdr, Header: hdr,
StatusCode: int32(response.StatusCode), StatusCode: int32(response.StatusCode),
Status: response.Status, Status: response.Status,
@ -422,7 +423,7 @@ singleDownload:
return pm.downloadKnownLengthSource(ctx, pt, contentLength, pieceSize, reader, response, peerTaskRequest, parsedRange, metadata, supportConcurrent, targetContentLength) return pm.downloadKnownLengthSource(ctx, pt, contentLength, pieceSize, reader, response, peerTaskRequest, parsedRange, metadata, supportConcurrent, targetContentLength)
} }
func (pm *pieceManager) downloadKnownLengthSource(ctx context.Context, pt Task, contentLength int64, pieceSize uint32, reader io.Reader, response *source.Response, peerTaskRequest *scheduler.PeerTaskRequest, parsedRange *clientutil.Range, metadata *source.Metadata, supportConcurrent bool, targetContentLength int64) error { func (pm *pieceManager) downloadKnownLengthSource(ctx context.Context, pt Task, contentLength int64, pieceSize uint32, reader io.Reader, response *source.Response, peerTaskRequest *schedulerv1.PeerTaskRequest, parsedRange *clientutil.Range, metadata *source.Metadata, supportConcurrent bool, targetContentLength int64) error {
log := pt.Log() log := pt.Log()
maxPieceNum := util.ComputePieceCount(contentLength, pieceSize) maxPieceNum := util.ComputePieceCount(contentLength, pieceSize)
pt.SetContentLength(contentLength) pt.SetContentLength(contentLength)
@ -445,7 +446,7 @@ func (pm *pieceManager) downloadKnownLengthSource(ctx context.Context, pt Task,
request := &DownloadPieceRequest{ request := &DownloadPieceRequest{
TaskID: pt.GetTaskID(), TaskID: pt.GetTaskID(),
PeerID: pt.GetPeerID(), PeerID: pt.GetPeerID(),
piece: &base.PieceInfo{ piece: &commonv1.PieceInfo{
PieceNum: pieceNum, PieceNum: pieceNum,
RangeStart: offset, RangeStart: offset,
RangeSize: uint32(result.Size), RangeSize: uint32(result.Size),
@ -525,7 +526,7 @@ func (pm *pieceManager) downloadUnknownLengthSource(pt Task, pieceSize uint32, r
request := &DownloadPieceRequest{ request := &DownloadPieceRequest{
TaskID: pt.GetTaskID(), TaskID: pt.GetTaskID(),
PeerID: pt.GetPeerID(), PeerID: pt.GetPeerID(),
piece: &base.PieceInfo{ piece: &commonv1.PieceInfo{
PieceNum: pieceNum, PieceNum: pieceNum,
RangeStart: offset, RangeStart: offset,
RangeSize: uint32(result.Size), RangeSize: uint32(result.Size),
@ -617,7 +618,7 @@ func (pm *pieceManager) processPieceFromFile(ctx context.Context, ptm storage.Pe
return n, nil return n, nil
} }
func (pm *pieceManager) ImportFile(ctx context.Context, ptm storage.PeerTaskMetadata, tsd storage.TaskStorageDriver, req *dfdaemon.ImportTaskRequest) error { func (pm *pieceManager) ImportFile(ctx context.Context, ptm storage.PeerTaskMetadata, tsd storage.TaskStorageDriver, req *dfdaemonv1.ImportTaskRequest) error {
log := logger.With("function", "ImportFile", "URL", req.Url, "taskID", ptm.TaskID) log := logger.With("function", "ImportFile", "URL", req.Url, "taskID", ptm.TaskID)
// get file size and compute piece size and piece count // get file size and compute piece size and piece count
stat, err := os.Stat(req.Path) stat, err := os.Stat(req.Path)
@ -747,7 +748,7 @@ func (pm *pieceManager) Import(ctx context.Context, ptm storage.PeerTaskMetadata
return nil return nil
} }
func (pm *pieceManager) concurrentDownloadSource(ctx context.Context, pt Task, peerTaskRequest *scheduler.PeerTaskRequest, parsedRange *clientutil.Range, metadata *source.Metadata, startPieceNum int32) error { func (pm *pieceManager) concurrentDownloadSource(ctx context.Context, pt Task, peerTaskRequest *schedulerv1.PeerTaskRequest, parsedRange *clientutil.Range, metadata *source.Metadata, startPieceNum int32) error {
// parsedRange is always exist // parsedRange is always exist
pieceSize := pm.computePieceSize(parsedRange.Length) pieceSize := pm.computePieceSize(parsedRange.Length)
pieceCount := util.ComputePieceCount(parsedRange.Length, pieceSize) pieceCount := util.ComputePieceCount(parsedRange.Length, pieceSize)
@ -836,7 +837,7 @@ func (pm *pieceManager) concurrentDownloadSource(ctx context.Context, pt Task, p
func (pm *pieceManager) downloadPieceFromSource(ctx context.Context, func (pm *pieceManager) downloadPieceFromSource(ctx context.Context,
pt Task, log *logger.SugaredLoggerOnWith, pt Task, log *logger.SugaredLoggerOnWith,
peerTaskRequest *scheduler.PeerTaskRequest, peerTaskRequest *schedulerv1.PeerTaskRequest,
pieceSize uint32, num int32, pieceSize uint32, num int32,
parsedRange *clientutil.Range, parsedRange *clientutil.Range,
pieceCount int32, pieceCount int32,
@ -881,7 +882,7 @@ func (pm *pieceManager) downloadPieceFromSource(ctx context.Context,
request := &DownloadPieceRequest{ request := &DownloadPieceRequest{
TaskID: pt.GetTaskID(), TaskID: pt.GetTaskID(),
PeerID: pt.GetPeerID(), PeerID: pt.GetPeerID(),
piece: &base.PieceInfo{ piece: &commonv1.PieceInfo{
PieceNum: num, PieceNum: num,
RangeStart: offset, RangeStart: offset,
RangeSize: uint32(result.Size), RangeSize: uint32(result.Size),

View File

@ -36,15 +36,16 @@ import (
"go.uber.org/atomic" "go.uber.org/atomic"
"golang.org/x/time/rate" "golang.org/x/time/rate"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/daemon/test" "d7y.io/dragonfly/v2/client/daemon/test"
clientutil "d7y.io/dragonfly/v2/client/util" clientutil "d7y.io/dragonfly/v2/client/util"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/internal/util" "d7y.io/dragonfly/v2/internal/util"
"d7y.io/dragonfly/v2/pkg/rpc/base"
_ "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" _ "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol" "d7y.io/dragonfly/v2/pkg/source/clients/httpprotocol"
) )
@ -443,9 +444,9 @@ func TestPieceManager_DownloadSource(t *testing.T) {
return tc.pieceSize return tc.pieceSize
} }
request := &scheduler.PeerTaskRequest{ request := &schedulerv1.PeerTaskRequest{
Url: ts.URL, Url: ts.URL,
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Digest: "", Digest: "",
Range: "", Range: "",
Header: nil, Header: nil,

View File

@ -39,13 +39,14 @@ import (
"go.uber.org/atomic" "go.uber.org/atomic"
"golang.org/x/sync/semaphore" "golang.org/x/sync/semaphore"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/metrics"
"d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/peer"
"d7y.io/dragonfly/v2/client/daemon/transport" "d7y.io/dragonfly/v2/client/daemon/transport"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
pkgstrings "d7y.io/dragonfly/v2/pkg/strings" pkgstrings "d7y.io/dragonfly/v2/pkg/strings"
) )
@ -85,7 +86,7 @@ type Proxy struct {
peerTaskManager peer.TaskManager peerTaskManager peer.TaskManager
// peerHost is the peer host info // peerHost is the peer host info
peerHost *scheduler.PeerHost peerHost *schedulerv1.PeerHost
// whiteList is the proxy white list // whiteList is the proxy white list
whiteList []*config.WhiteList whiteList []*config.WhiteList
@ -100,7 +101,7 @@ type Proxy struct {
defaultTag string defaultTag string
// defaultFilter is used for registering steam task // defaultFilter is used for registering steam task
defaultPattern base.Pattern defaultPattern commonv1.Pattern
// tracer is used for telemetry // tracer is used for telemetry
tracer trace.Tracer tracer trace.Tracer
@ -116,8 +117,8 @@ type Proxy struct {
// Option is a functional option for configuring the proxy // Option is a functional option for configuring the proxy
type Option func(p *Proxy) *Proxy type Option func(p *Proxy) *Proxy
// WithPeerHost sets the *scheduler.PeerHost // WithPeerHost sets the *schedulerv1.PeerHost
func WithPeerHost(peerHost *scheduler.PeerHost) Option { func WithPeerHost(peerHost *schedulerv1.PeerHost) Option {
return func(p *Proxy) *Proxy { return func(p *Proxy) *Proxy {
p.peerHost = peerHost p.peerHost = peerHost
return p return p
@ -226,7 +227,7 @@ func WithDefaultTag(t string) Option {
} }
// WithDefaultPattern sets default pattern for downloading // WithDefaultPattern sets default pattern for downloading
func WithDefaultPattern(pattern base.Pattern) Option { func WithDefaultPattern(pattern commonv1.Pattern) Option {
return func(p *Proxy) *Proxy { return func(p *Proxy) *Proxy {
p.defaultPattern = pattern p.defaultPattern = pattern
return p return p

View File

@ -32,11 +32,12 @@ import (
"github.com/spf13/viper" "github.com/spf13/viper"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/peer"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
type Manager interface { type Manager interface {
@ -59,7 +60,7 @@ type proxyManager struct {
var _ Manager = (*proxyManager)(nil) var _ Manager = (*proxyManager)(nil)
func NewProxyManager(peerHost *scheduler.PeerHost, peerTaskManager peer.TaskManager, defaultPattern base.Pattern, proxyOption *config.ProxyOption) (Manager, error) { func NewProxyManager(peerHost *schedulerv1.PeerHost, peerTaskManager peer.TaskManager, defaultPattern commonv1.Pattern, proxyOption *config.ProxyOption) (Manager, error) {
// proxy is option, when nil, just disable it // proxy is option, when nil, just disable it
if proxyOption == nil { if proxyOption == nil {
logger.Infof("proxy config is empty, disabled") logger.Infof("proxy config is empty, disabled")

View File

@ -35,6 +35,11 @@ import (
healthpb "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/peer"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
@ -43,11 +48,7 @@ import (
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/net/http" "d7y.io/dragonfly/v2/pkg/net/http"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
dfdaemongrpc "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
dfdaemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" dfdaemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
"d7y.io/dragonfly/v2/pkg/safe" "d7y.io/dragonfly/v2/pkg/safe"
"d7y.io/dragonfly/v2/scheduler/resource" "d7y.io/dragonfly/v2/scheduler/resource"
) )
@ -61,18 +62,18 @@ type Server interface {
type server struct { type server struct {
util.KeepAlive util.KeepAlive
peerHost *scheduler.PeerHost peerHost *schedulerv1.PeerHost
peerTaskManager peer.TaskManager peerTaskManager peer.TaskManager
storageManager storage.Manager storageManager storage.Manager
defaultPattern base.Pattern defaultPattern commonv1.Pattern
downloadServer *grpc.Server downloadServer *grpc.Server
peerServer *grpc.Server peerServer *grpc.Server
uploadAddr string uploadAddr string
} }
func New(peerHost *scheduler.PeerHost, peerTaskManager peer.TaskManager, func New(peerHost *schedulerv1.PeerHost, peerTaskManager peer.TaskManager,
storageManager storage.Manager, defaultPattern base.Pattern, storageManager storage.Manager, defaultPattern commonv1.Pattern,
downloadOpts []grpc.ServerOption, peerOpts []grpc.ServerOption) (Server, error) { downloadOpts []grpc.ServerOption, peerOpts []grpc.ServerOption) (Server, error) {
s := &server{ s := &server{
KeepAlive: util.NewKeepAlive("rpc server"), KeepAlive: util.NewKeepAlive("rpc server"),
@ -92,7 +93,7 @@ func New(peerHost *scheduler.PeerHost, peerTaskManager peer.TaskManager,
s.peerServer = dfdaemonserver.New(s, peerOpts...) s.peerServer = dfdaemonserver.New(s, peerOpts...)
healthpb.RegisterHealthServer(s.peerServer, health.NewServer()) healthpb.RegisterHealthServer(s.peerServer, health.NewServer())
cdnsystem.RegisterSeederServer(s.peerServer, sd) cdnsystemv1.RegisterSeederServer(s.peerServer, sd)
return s, nil return s, nil
} }
@ -110,13 +111,13 @@ func (s *server) Stop() {
s.downloadServer.GracefulStop() s.downloadServer.GracefulStop()
} }
func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { func (s *server) GetPieceTasks(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
s.Keep() s.Keep()
p, err := s.storageManager.GetPieces(ctx, request) p, err := s.storageManager.GetPieces(ctx, request)
if err != nil { if err != nil {
code := base.Code_UnknownError code := commonv1.Code_UnknownError
if err == dferrors.ErrInvalidArgument { if err == dferrors.ErrInvalidArgument {
code = base.Code_BadRequest code = commonv1.Code_BadRequest
} }
if err != storage.ErrTaskNotFound { if err != storage.ErrTaskNotFound {
logger.Errorf("get piece tasks error: %s, task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d", logger.Errorf("get piece tasks error: %s, task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d",
@ -126,7 +127,7 @@ func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskReque
// dst peer is not running // dst peer is not running
task, ok := s.peerTaskManager.IsPeerTaskRunning(request.TaskId) task, ok := s.peerTaskManager.IsPeerTaskRunning(request.TaskId)
if !ok { if !ok {
code = base.Code_PeerTaskNotFound code = commonv1.Code_PeerTaskNotFound
logger.Errorf("get piece tasks error: target peer task not found, task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d", logger.Errorf("get piece tasks error: target peer task not found, task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d",
request.TaskId, request.SrcPid, request.DstPid, request.StartNum, request.Limit) request.TaskId, request.SrcPid, request.DstPid, request.StartNum, request.Limit)
return nil, dferrors.New(code, err.Error()) return nil, dferrors.New(code, err.Error())
@ -134,7 +135,7 @@ func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskReque
if task.GetPeerID() != request.GetDstPid() { if task.GetPeerID() != request.GetDstPid() {
// there is only one running task in same time, redirect request to running peer task // there is only one running task in same time, redirect request to running peer task
r := base.PieceTaskRequest{ r := commonv1.PieceTaskRequest{
TaskId: request.TaskId, TaskId: request.TaskId,
SrcPid: request.SrcPid, SrcPid: request.SrcPid,
DstPid: task.GetPeerID(), // replace to running task peer id DstPid: task.GetPeerID(), // replace to running task peer id
@ -148,7 +149,7 @@ func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskReque
p.DstAddr = s.uploadAddr p.DstAddr = s.uploadAddr
return p, nil return p, nil
} }
code = base.Code_PeerTaskNotFound code = commonv1.Code_PeerTaskNotFound
logger.Errorf("get piece tasks error: target peer task and replaced peer task storage not found wit error: %s, task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d", logger.Errorf("get piece tasks error: target peer task and replaced peer task storage not found wit error: %s, task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d",
err, request.TaskId, request.SrcPid, request.DstPid, request.StartNum, request.Limit) err, request.TaskId, request.SrcPid, request.DstPid, request.StartNum, request.Limit)
return nil, dferrors.New(code, err.Error()) return nil, dferrors.New(code, err.Error())
@ -160,7 +161,7 @@ func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskReque
"task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d", "task id: %s, src peer: %s, dst peer: %s, piece num: %d, limit: %d",
request.TaskId, request.SrcPid, request.DstPid, request.StartNum, request.Limit) request.TaskId, request.SrcPid, request.DstPid, request.StartNum, request.Limit)
// dst peer is running, send empty result, src peer will retry later // dst peer is running, send empty result, src peer will retry later
return &base.PiecePacket{ return &commonv1.PiecePacket{
TaskId: request.TaskId, TaskId: request.TaskId,
DstPid: request.DstPid, DstPid: request.DstPid,
DstAddr: s.uploadAddr, DstAddr: s.uploadAddr,
@ -180,9 +181,9 @@ func (s *server) GetPieceTasks(ctx context.Context, request *base.PieceTaskReque
// sendExistPieces will send as much as possible pieces // sendExistPieces will send as much as possible pieces
func (s *server) sendExistPieces( func (s *server) sendExistPieces(
log *logger.SugaredLoggerOnWith, log *logger.SugaredLoggerOnWith,
request *base.PieceTaskRequest, request *commonv1.PieceTaskRequest,
sync dfdaemongrpc.Daemon_SyncPieceTasksServer, sync dfdaemonv1.Daemon_SyncPieceTasksServer,
get func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error), get func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error),
sentMap map[int32]struct{}) (total int32, err error) { sentMap map[int32]struct{}) (total int32, err error) {
return sendExistPieces(sync.Context(), log, get, request, sync, sentMap, true) return sendExistPieces(sync.Context(), log, get, request, sync, sentMap, true)
} }
@ -190,14 +191,14 @@ func (s *server) sendExistPieces(
// sendFirstPieceTasks will send as much as possible pieces, even if no available pieces // sendFirstPieceTasks will send as much as possible pieces, even if no available pieces
func (s *server) sendFirstPieceTasks( func (s *server) sendFirstPieceTasks(
log *logger.SugaredLoggerOnWith, log *logger.SugaredLoggerOnWith,
request *base.PieceTaskRequest, request *commonv1.PieceTaskRequest,
sync dfdaemongrpc.Daemon_SyncPieceTasksServer, sync dfdaemonv1.Daemon_SyncPieceTasksServer,
get func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error), get func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error),
sentMap map[int32]struct{}) (total int32, err error) { sentMap map[int32]struct{}) (total int32, err error) {
return sendExistPieces(sync.Context(), log, get, request, sync, sentMap, false) return sendExistPieces(sync.Context(), log, get, request, sync, sentMap, false)
} }
func (s *server) SyncPieceTasks(sync dfdaemongrpc.Daemon_SyncPieceTasksServer) error { func (s *server) SyncPieceTasks(sync dfdaemonv1.Daemon_SyncPieceTasksServer) error {
request, err := sync.Recv() request, err := sync.Recv()
if err != nil { if err != nil {
logger.Errorf("receive first sync piece tasks request error: %s", err.Error()) logger.Errorf("receive first sync piece tasks request error: %s", err.Error())
@ -212,7 +213,7 @@ func (s *server) SyncPieceTasks(sync dfdaemongrpc.Daemon_SyncPieceTasksServer) e
attributeSent bool attributeSent bool
) )
getPieces := func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { getPieces := func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
p, e := s.GetPieceTasks(ctx, request) p, e := s.GetPieceTasks(ctx, request)
if e != nil { if e != nil {
return nil, e return nil, e
@ -305,15 +306,15 @@ func (s *server) CheckHealth(context.Context) error {
} }
func (s *server) Download(ctx context.Context, func (s *server) Download(ctx context.Context,
req *dfdaemongrpc.DownRequest, results chan<- *dfdaemongrpc.DownResult) error { req *dfdaemonv1.DownRequest, results chan<- *dfdaemonv1.DownResult) error {
s.Keep() s.Keep()
return s.doDownload(ctx, req, results, "") return s.doDownload(ctx, req, results, "")
} }
func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest, func (s *server) doDownload(ctx context.Context, req *dfdaemonv1.DownRequest,
results chan<- *dfdaemongrpc.DownResult, peerID string) error { results chan<- *dfdaemonv1.DownResult, peerID string) error {
if req.UrlMeta == nil { if req.UrlMeta == nil {
req.UrlMeta = &base.UrlMeta{} req.UrlMeta = &commonv1.UrlMeta{}
} }
// init peer task request, peer uses different peer id to generate every request // init peer task request, peer uses different peer id to generate every request
@ -322,7 +323,7 @@ func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest,
peerID = idgen.PeerID(s.peerHost.Ip) peerID = idgen.PeerID(s.peerHost.Ip)
} }
peerTask := &peer.FileTaskRequest{ peerTask := &peer.FileTaskRequest{
PeerTaskRequest: scheduler.PeerTaskRequest{ PeerTaskRequest: schedulerv1.PeerTaskRequest{
Url: req.Url, Url: req.Url,
UrlMeta: req.UrlMeta, UrlMeta: req.UrlMeta,
PeerId: peerID, PeerId: peerID,
@ -350,10 +351,10 @@ func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest,
peerTaskProgress, tiny, err := s.peerTaskManager.StartFileTask(ctx, peerTask) peerTaskProgress, tiny, err := s.peerTaskManager.StartFileTask(ctx, peerTask)
if err != nil { if err != nil {
return dferrors.New(base.Code_UnknownError, fmt.Sprintf("%s", err)) return dferrors.New(commonv1.Code_UnknownError, fmt.Sprintf("%s", err))
} }
if tiny != nil { if tiny != nil {
results <- &dfdaemongrpc.DownResult{ results <- &dfdaemonv1.DownResult{
TaskId: tiny.TaskID, TaskId: tiny.TaskID,
PeerId: tiny.PeerID, PeerId: tiny.PeerID,
CompletedLength: uint64(len(tiny.Content)), CompletedLength: uint64(len(tiny.Content)),
@ -375,13 +376,13 @@ func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest,
if !ok { if !ok {
err = errors.New("progress closed unexpected") err = errors.New("progress closed unexpected")
log.Errorf(err.Error()) log.Errorf(err.Error())
return dferrors.New(base.Code_UnknownError, err.Error()) return dferrors.New(commonv1.Code_UnknownError, err.Error())
} }
if !p.State.Success { if !p.State.Success {
log.Errorf("task %s/%s failed: %d/%s", p.PeerID, p.TaskID, p.State.Code, p.State.Msg) log.Errorf("task %s/%s failed: %d/%s", p.PeerID, p.TaskID, p.State.Code, p.State.Msg)
return dferrors.New(p.State.Code, p.State.Msg) return dferrors.New(p.State.Code, p.State.Msg)
} }
results <- &dfdaemongrpc.DownResult{ results <- &dfdaemonv1.DownResult{
TaskId: p.TaskID, TaskId: p.TaskID,
PeerId: p.PeerID, PeerId: p.PeerID,
CompletedLength: uint64(p.CompletedLength), CompletedLength: uint64(p.CompletedLength),
@ -401,7 +402,7 @@ func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest,
return nil return nil
} }
case <-ctx.Done(): case <-ctx.Done():
results <- &dfdaemongrpc.DownResult{ results <- &dfdaemonv1.DownResult{
CompletedLength: 0, CompletedLength: 0,
Done: true, Done: true,
} }
@ -411,7 +412,7 @@ func (s *server) doDownload(ctx context.Context, req *dfdaemongrpc.DownRequest,
} }
} }
func (s *server) StatTask(ctx context.Context, req *dfdaemongrpc.StatTaskRequest) error { func (s *server) StatTask(ctx context.Context, req *dfdaemonv1.StatTaskRequest) error {
s.Keep() s.Keep()
taskID := idgen.TaskID(req.Url, req.UrlMeta) taskID := idgen.TaskID(req.Url, req.UrlMeta)
log := logger.With("function", "StatTask", "URL", req.Url, "Tag", req.UrlMeta.Tag, "taskID", taskID, "LocalOnly", req.LocalOnly) log := logger.With("function", "StatTask", "URL", req.Url, "Tag", req.UrlMeta.Tag, "taskID", taskID, "LocalOnly", req.LocalOnly)
@ -426,7 +427,7 @@ func (s *server) StatTask(ctx context.Context, req *dfdaemongrpc.StatTaskRequest
if req.LocalOnly { if req.LocalOnly {
msg := "task not found in local cache" msg := "task not found in local cache"
log.Info(msg) log.Info(msg)
return dferrors.New(base.Code_PeerTaskNotFound, msg) return dferrors.New(commonv1.Code_PeerTaskNotFound, msg)
} }
// Check scheduler if other peers hold the task // Check scheduler if other peers hold the task
@ -440,14 +441,14 @@ func (s *server) StatTask(ctx context.Context, req *dfdaemongrpc.StatTaskRequest
} }
msg := fmt.Sprintf("task found but not available for download, state %s, has available peer %t", task.State, task.HasAvailablePeer) msg := fmt.Sprintf("task found but not available for download, state %s, has available peer %t", task.State, task.HasAvailablePeer)
log.Info(msg) log.Info(msg)
return dferrors.New(base.Code_PeerTaskNotFound, msg) return dferrors.New(commonv1.Code_PeerTaskNotFound, msg)
} }
func (s *server) isTaskCompleted(taskID string) bool { func (s *server) isTaskCompleted(taskID string) bool {
return s.storageManager.FindCompletedTask(taskID) != nil return s.storageManager.FindCompletedTask(taskID) != nil
} }
func (s *server) ImportTask(ctx context.Context, req *dfdaemongrpc.ImportTaskRequest) error { func (s *server) ImportTask(ctx context.Context, req *dfdaemonv1.ImportTaskRequest) error {
s.Keep() s.Keep()
peerID := idgen.PeerID(s.peerHost.Ip) peerID := idgen.PeerID(s.peerHost.Ip)
taskID := idgen.TaskID(req.Url, req.UrlMeta) taskID := idgen.TaskID(req.Url, req.UrlMeta)
@ -509,7 +510,7 @@ func (s *server) ImportTask(ctx context.Context, req *dfdaemongrpc.ImportTaskReq
return nil return nil
} }
func (s *server) ExportTask(ctx context.Context, req *dfdaemongrpc.ExportTaskRequest) error { func (s *server) ExportTask(ctx context.Context, req *dfdaemonv1.ExportTaskRequest) error {
s.Keep() s.Keep()
taskID := idgen.TaskID(req.Url, req.UrlMeta) taskID := idgen.TaskID(req.Url, req.UrlMeta)
log := logger.With("function", "ExportTask", "URL", req.Url, "Tag", req.UrlMeta.Tag, "taskID", taskID, "destination", req.Output) log := logger.With("function", "ExportTask", "URL", req.Url, "Tag", req.UrlMeta.Tag, "taskID", taskID, "destination", req.Output)
@ -521,7 +522,7 @@ func (s *server) ExportTask(ctx context.Context, req *dfdaemongrpc.ExportTaskReq
if req.LocalOnly { if req.LocalOnly {
msg := fmt.Sprintf("task not found in local storage") msg := fmt.Sprintf("task not found in local storage")
log.Info(msg) log.Info(msg)
return dferrors.New(base.Code_PeerTaskNotFound, msg) return dferrors.New(commonv1.Code_PeerTaskNotFound, msg)
} }
log.Info("task not found, try from peers") log.Info("task not found, try from peers")
return s.exportFromPeers(ctx, log, req) return s.exportFromPeers(ctx, log, req)
@ -534,7 +535,7 @@ func (s *server) ExportTask(ctx context.Context, req *dfdaemongrpc.ExportTaskReq
return nil return nil
} }
func (s *server) exportFromLocal(ctx context.Context, req *dfdaemongrpc.ExportTaskRequest, peerID string) error { func (s *server) exportFromLocal(ctx context.Context, req *dfdaemonv1.ExportTaskRequest, peerID string) error {
return s.storageManager.Store(ctx, &storage.StoreRequest{ return s.storageManager.Store(ctx, &storage.StoreRequest{
CommonTaskRequest: storage.CommonTaskRequest{ CommonTaskRequest: storage.CommonTaskRequest{
PeerID: peerID, PeerID: peerID,
@ -545,13 +546,13 @@ func (s *server) exportFromLocal(ctx context.Context, req *dfdaemongrpc.ExportTa
}) })
} }
func (s *server) exportFromPeers(ctx context.Context, log *logger.SugaredLoggerOnWith, req *dfdaemongrpc.ExportTaskRequest) error { func (s *server) exportFromPeers(ctx context.Context, log *logger.SugaredLoggerOnWith, req *dfdaemonv1.ExportTaskRequest) error {
peerID := idgen.PeerID(s.peerHost.Ip) peerID := idgen.PeerID(s.peerHost.Ip)
taskID := idgen.TaskID(req.Url, req.UrlMeta) taskID := idgen.TaskID(req.Url, req.UrlMeta)
task, err := s.peerTaskManager.StatTask(ctx, taskID) task, err := s.peerTaskManager.StatTask(ctx, taskID)
if err != nil { if err != nil {
if dferrors.CheckError(err, base.Code_PeerTaskNotFound) { if dferrors.CheckError(err, commonv1.Code_PeerTaskNotFound) {
log.Info("task not found in P2P network") log.Info("task not found in P2P network")
} else { } else {
msg := fmt.Sprintf("failed to StatTask from peers: %s", err) msg := fmt.Sprintf("failed to StatTask from peers: %s", err)
@ -562,18 +563,18 @@ func (s *server) exportFromPeers(ctx context.Context, log *logger.SugaredLoggerO
if task.State != resource.TaskStateSucceeded || !task.HasAvailablePeer { if task.State != resource.TaskStateSucceeded || !task.HasAvailablePeer {
msg := fmt.Sprintf("task found but not available for download, state %s, has available peer %t", task.State, task.HasAvailablePeer) msg := fmt.Sprintf("task found but not available for download, state %s, has available peer %t", task.State, task.HasAvailablePeer)
log.Info(msg) log.Info(msg)
return dferrors.New(base.Code_PeerTaskNotFound, msg) return dferrors.New(commonv1.Code_PeerTaskNotFound, msg)
} }
// Task exists in peers // Task exists in peers
var ( var (
start = time.Now() start = time.Now()
drc = make(chan *dfdaemongrpc.DownResult, 1) drc = make(chan *dfdaemonv1.DownResult, 1)
errChan = make(chan error, 3) errChan = make(chan error, 3)
result *dfdaemongrpc.DownResult result *dfdaemonv1.DownResult
downError error downError error
) )
downRequest := &dfdaemongrpc.DownRequest{ downRequest := &dfdaemonv1.DownRequest{
Url: req.Url, Url: req.Url,
Output: req.Output, Output: req.Output,
Timeout: req.Timeout, Timeout: req.Timeout,
@ -609,7 +610,7 @@ func (s *server) exportFromPeers(ctx context.Context, log *logger.SugaredLoggerO
return nil return nil
} }
func call(ctx context.Context, peerID string, drc chan *dfdaemongrpc.DownResult, s *server, req *dfdaemongrpc.DownRequest, errChan chan error) { func call(ctx context.Context, peerID string, drc chan *dfdaemonv1.DownResult, s *server, req *dfdaemonv1.DownRequest, errChan chan error) {
err := safe.Call(func() { err := safe.Call(func() {
if err := s.doDownload(ctx, req, drc, peerID); err != nil { if err := s.doDownload(ctx, req, drc, peerID); err != nil {
errChan <- err errChan <- err
@ -621,7 +622,7 @@ func call(ctx context.Context, peerID string, drc chan *dfdaemongrpc.DownResult,
} }
} }
func (s *server) DeleteTask(ctx context.Context, req *dfdaemongrpc.DeleteTaskRequest) error { func (s *server) DeleteTask(ctx context.Context, req *dfdaemonv1.DeleteTaskRequest) error {
s.Keep() s.Keep()
taskID := idgen.TaskID(req.Url, req.UrlMeta) taskID := idgen.TaskID(req.Url, req.UrlMeta)
log := logger.With("function", "DeleteTask", "URL", req.Url, "Tag", req.UrlMeta.Tag, "taskID", taskID) log := logger.With("function", "DeleteTask", "URL", req.Url, "Tag", req.UrlMeta.Tag, "taskID", taskID)

View File

@ -30,6 +30,10 @@ import (
"github.com/phayes/freeport" "github.com/phayes/freeport"
testifyassert "github.com/stretchr/testify/assert" testifyassert "github.com/stretchr/testify/assert"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/peer"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/daemon/storage/mocks" "d7y.io/dragonfly/v2/client/daemon/storage/mocks"
@ -37,11 +41,8 @@ import (
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/net/ip" "d7y.io/dragonfly/v2/pkg/net/ip"
"d7y.io/dragonfly/v2/pkg/rpc/base"
dfdaemongrpc "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
dfclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" dfclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client"
dfdaemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" dfdaemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
@ -77,17 +78,17 @@ func Test_ServeDownload(t *testing.T) {
}) })
m := &server{ m := &server{
KeepAlive: util.NewKeepAlive("test"), KeepAlive: util.NewKeepAlive("test"),
peerHost: &scheduler.PeerHost{}, peerHost: &schedulerv1.PeerHost{},
peerTaskManager: mockPeerTaskManager, peerTaskManager: mockPeerTaskManager,
} }
m.downloadServer = dfdaemonserver.New(m) m.downloadServer = dfdaemonserver.New(m)
_, client := setupPeerServerAndClient(t, m, assert, m.ServeDownload) _, client := setupPeerServerAndClient(t, m, assert, m.ServeDownload)
request := &dfdaemongrpc.DownRequest{ request := &dfdaemonv1.DownRequest{
Uuid: uuid.Generate().String(), Uuid: uuid.Generate().String(),
Url: "http://localhost/test", Url: "http://localhost/test",
Output: "./testdata/file1", Output: "./testdata/file1",
DisableBackSource: false, DisableBackSource: false,
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Tag: "unit test", Tag: "unit test",
}, },
Pattern: "p2p", Pattern: "p2p",
@ -97,8 +98,8 @@ func Test_ServeDownload(t *testing.T) {
assert.Nil(err, "client download grpc call should be ok") assert.Nil(err, "client download grpc call should be ok")
var ( var (
lastResult *dfdaemongrpc.DownResult lastResult *dfdaemonv1.DownResult
curResult *dfdaemongrpc.DownResult curResult *dfdaemonv1.DownResult
) )
for { for {
curResult, err = down.Recv() curResult, err = down.Recv()
@ -119,22 +120,22 @@ func Test_ServePeer(t *testing.T) {
var maxPieceNum uint32 = 10 var maxPieceNum uint32 = 10
mockStorageManger := mocks.NewMockManager(ctrl) mockStorageManger := mocks.NewMockManager(ctrl)
mockStorageManger.EXPECT().GetPieces(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(func(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { mockStorageManger.EXPECT().GetPieces(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(func(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
var ( var (
pieces []*base.PieceInfo pieces []*commonv1.PieceInfo
pieceSize = uint32(1024) pieceSize = uint32(1024)
) )
for i := req.StartNum; i < req.Limit+req.StartNum && i < maxPieceNum; i++ { for i := req.StartNum; i < req.Limit+req.StartNum && i < maxPieceNum; i++ {
pieces = append(pieces, &base.PieceInfo{ pieces = append(pieces, &commonv1.PieceInfo{
PieceNum: int32(i), PieceNum: int32(i),
RangeStart: uint64(i * pieceSize), RangeStart: uint64(i * pieceSize),
RangeSize: pieceSize, RangeSize: pieceSize,
PieceMd5: "", PieceMd5: "",
PieceOffset: uint64(i * pieceSize), PieceOffset: uint64(i * pieceSize),
PieceStyle: base.PieceStyle_PLAIN, PieceStyle: commonv1.PieceStyle_PLAIN,
}) })
} }
return &base.PiecePacket{ return &commonv1.PiecePacket{
TaskId: "", TaskId: "",
DstPid: "", DstPid: "",
DstAddr: "", DstAddr: "",
@ -146,7 +147,7 @@ func Test_ServePeer(t *testing.T) {
}) })
s := &server{ s := &server{
KeepAlive: util.NewKeepAlive("test"), KeepAlive: util.NewKeepAlive("test"),
peerHost: &scheduler.PeerHost{}, peerHost: &schedulerv1.PeerHost{},
storageManager: mockStorageManger, storageManager: mockStorageManger,
} }
s.peerServer = dfdaemonserver.New(s) s.peerServer = dfdaemonserver.New(s)
@ -154,12 +155,12 @@ func Test_ServePeer(t *testing.T) {
defer s.peerServer.GracefulStop() defer s.peerServer.GracefulStop()
var tests = []struct { var tests = []struct {
request *base.PieceTaskRequest request *commonv1.PieceTaskRequest
responsePieceSize int responsePieceSize int
}{ }{
{ {
request: &base.PieceTaskRequest{ request: &commonv1.PieceTaskRequest{
TaskId: idgen.TaskID("http://www.test.com", &base.UrlMeta{}), TaskId: idgen.TaskID("http://www.test.com", &commonv1.UrlMeta{}),
SrcPid: idgen.PeerID(ip.IPv4), SrcPid: idgen.PeerID(ip.IPv4),
DstPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4),
StartNum: 0, StartNum: 0,
@ -169,8 +170,8 @@ func Test_ServePeer(t *testing.T) {
responsePieceSize: 1, responsePieceSize: 1,
}, },
{ {
request: &base.PieceTaskRequest{ request: &commonv1.PieceTaskRequest{
TaskId: idgen.TaskID("http://www.test.com", &base.UrlMeta{}), TaskId: idgen.TaskID("http://www.test.com", &commonv1.UrlMeta{}),
SrcPid: idgen.PeerID(ip.IPv4), SrcPid: idgen.PeerID(ip.IPv4),
DstPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4),
StartNum: 0, StartNum: 0,
@ -180,8 +181,8 @@ func Test_ServePeer(t *testing.T) {
responsePieceSize: 4, responsePieceSize: 4,
}, },
{ {
request: &base.PieceTaskRequest{ request: &commonv1.PieceTaskRequest{
TaskId: idgen.TaskID("http://www.test.com", &base.UrlMeta{}), TaskId: idgen.TaskID("http://www.test.com", &commonv1.UrlMeta{}),
SrcPid: idgen.PeerID(ip.IPv4), SrcPid: idgen.PeerID(ip.IPv4),
DstPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4),
StartNum: 8, StartNum: 8,
@ -191,8 +192,8 @@ func Test_ServePeer(t *testing.T) {
responsePieceSize: 1, responsePieceSize: 1,
}, },
{ {
request: &base.PieceTaskRequest{ request: &commonv1.PieceTaskRequest{
TaskId: idgen.TaskID("http://www.test.com", &base.UrlMeta{}), TaskId: idgen.TaskID("http://www.test.com", &commonv1.UrlMeta{}),
SrcPid: idgen.PeerID(ip.IPv4), SrcPid: idgen.PeerID(ip.IPv4),
DstPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4),
StartNum: 8, StartNum: 8,
@ -373,11 +374,11 @@ func Test_SyncPieceTasks(t *testing.T) {
} }
var ( var (
totalPieces []*base.PieceInfo totalPieces []*commonv1.PieceInfo
lock sync.Mutex lock sync.Mutex
) )
var addedPieces = make(map[uint32]*base.PieceInfo) var addedPieces = make(map[uint32]*commonv1.PieceInfo)
for _, p := range tc.existPieces { for _, p := range tc.existPieces {
if p.end == 0 { if p.end == 0 {
p.end = p.start p.end = p.start
@ -386,12 +387,12 @@ func Test_SyncPieceTasks(t *testing.T) {
if _, ok := addedPieces[uint32(i)]; ok { if _, ok := addedPieces[uint32(i)]; ok {
continue continue
} }
piece := &base.PieceInfo{ piece := &commonv1.PieceInfo{
PieceNum: int32(i), PieceNum: int32(i),
RangeStart: uint64(i) * uint64(pieceSize), RangeStart: uint64(i) * uint64(pieceSize),
RangeSize: pieceSize, RangeSize: pieceSize,
PieceOffset: uint64(i) * uint64(pieceSize), PieceOffset: uint64(i) * uint64(pieceSize),
PieceStyle: base.PieceStyle_PLAIN, PieceStyle: commonv1.PieceStyle_PLAIN,
} }
totalPieces = append(totalPieces, piece) totalPieces = append(totalPieces, piece)
addedPieces[uint32(i)] = piece addedPieces[uint32(i)] = piece
@ -400,8 +401,8 @@ func Test_SyncPieceTasks(t *testing.T) {
mockStorageManger.EXPECT().GetPieces(gomock.Any(), mockStorageManger.EXPECT().GetPieces(gomock.Any(),
gomock.Any()).AnyTimes().DoAndReturn( gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { func(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
var pieces []*base.PieceInfo var pieces []*commonv1.PieceInfo
lock.Lock() lock.Lock()
for i := req.StartNum; i < tc.totalPieces; i++ { for i := req.StartNum; i < tc.totalPieces; i++ {
if piece, ok := addedPieces[i]; ok { if piece, ok := addedPieces[i]; ok {
@ -411,7 +412,7 @@ func Test_SyncPieceTasks(t *testing.T) {
} }
} }
lock.Unlock() lock.Unlock()
return &base.PiecePacket{ return &commonv1.PiecePacket{
TaskId: req.TaskId, TaskId: req.TaskId,
DstPid: req.DstPid, DstPid: req.DstPid,
DstAddr: "", DstAddr: "",
@ -423,8 +424,8 @@ func Test_SyncPieceTasks(t *testing.T) {
}) })
mockStorageManger.EXPECT().GetExtendAttribute(gomock.Any(), mockStorageManger.EXPECT().GetExtendAttribute(gomock.Any(),
gomock.Any()).AnyTimes().DoAndReturn( gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) {
return &base.ExtendAttribute{ return &commonv1.ExtendAttribute{
Header: map[string]string{ Header: map[string]string{
"Test": "test", "Test": "test",
}, },
@ -432,7 +433,7 @@ func Test_SyncPieceTasks(t *testing.T) {
}) })
mockTaskManager := peer.NewMockTaskManager(ctrl) mockTaskManager := peer.NewMockTaskManager(ctrl)
mockTaskManager.EXPECT().Subscribe(gomock.Any()).AnyTimes().DoAndReturn( mockTaskManager.EXPECT().Subscribe(gomock.Any()).AnyTimes().DoAndReturn(
func(request *base.PieceTaskRequest) (*peer.SubscribeResponse, bool) { func(request *commonv1.PieceTaskRequest) (*peer.SubscribeResponse, bool) {
ch := make(chan *peer.PieceInfo) ch := make(chan *peer.PieceInfo)
success := make(chan struct{}) success := make(chan struct{})
fail := make(chan struct{}) fail := make(chan struct{})
@ -447,12 +448,12 @@ func Test_SyncPieceTasks(t *testing.T) {
if _, ok := addedPieces[uint32(j)]; ok { if _, ok := addedPieces[uint32(j)]; ok {
continue continue
} }
piece := &base.PieceInfo{ piece := &commonv1.PieceInfo{
PieceNum: int32(j), PieceNum: int32(j),
RangeStart: uint64(j) * uint64(pieceSize), RangeStart: uint64(j) * uint64(pieceSize),
RangeSize: pieceSize, RangeSize: pieceSize,
PieceOffset: uint64(j) * uint64(pieceSize), PieceOffset: uint64(j) * uint64(pieceSize),
PieceStyle: base.PieceStyle_PLAIN, PieceStyle: commonv1.PieceStyle_PLAIN,
} }
totalPieces = append(totalPieces, piece) totalPieces = append(totalPieces, piece)
addedPieces[uint32(j)] = piece addedPieces[uint32(j)] = piece
@ -483,7 +484,7 @@ func Test_SyncPieceTasks(t *testing.T) {
s := &server{ s := &server{
KeepAlive: util.NewKeepAlive("test"), KeepAlive: util.NewKeepAlive("test"),
peerHost: &scheduler.PeerHost{}, peerHost: &schedulerv1.PeerHost{},
storageManager: mockStorageManger, storageManager: mockStorageManger,
peerTaskManager: mockTaskManager, peerTaskManager: mockTaskManager,
} }
@ -496,7 +497,7 @@ func Test_SyncPieceTasks(t *testing.T) {
Type: dfnet.TCP, Type: dfnet.TCP,
Addr: fmt.Sprintf("127.0.0.1:%d", port), Addr: fmt.Sprintf("127.0.0.1:%d", port),
}, },
&base.PieceTaskRequest{ &commonv1.PieceTaskRequest{
TaskId: tc.name, TaskId: tc.name,
SrcPid: idgen.PeerID(ip.IPv4), SrcPid: idgen.PeerID(ip.IPv4),
DstPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4),
@ -515,7 +516,7 @@ func Test_SyncPieceTasks(t *testing.T) {
} else { } else {
go func() { go func() {
for _, n := range tc.requestPieces { for _, n := range tc.requestPieces {
request := &base.PieceTaskRequest{ request := &commonv1.PieceTaskRequest{
TaskId: tc.name, TaskId: tc.name,
SrcPid: idgen.PeerID(ip.IPv4), SrcPid: idgen.PeerID(ip.IPv4),
DstPid: idgen.PeerID(ip.IPv4), DstPid: idgen.PeerID(ip.IPv4),

View File

@ -25,6 +25,10 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/metrics"
"d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/peer"
@ -32,36 +36,33 @@ import (
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/net/http" "d7y.io/dragonfly/v2/pkg/net/http"
"d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/rpc/common"
"d7y.io/dragonfly/v2/pkg/rpc/base/common"
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
type seeder struct { type seeder struct {
server *server server *server
} }
func (s *seeder) GetPieceTasks(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { func (s *seeder) GetPieceTasks(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
return s.server.GetPieceTasks(ctx, request) return s.server.GetPieceTasks(ctx, request)
} }
func (s *seeder) SyncPieceTasks(tasksServer cdnsystem.Seeder_SyncPieceTasksServer) error { func (s *seeder) SyncPieceTasks(tasksServer cdnsystemv1.Seeder_SyncPieceTasksServer) error {
return s.server.SyncPieceTasks(tasksServer) return s.server.SyncPieceTasks(tasksServer)
} }
func (s *seeder) ObtainSeeds(seedRequest *cdnsystem.SeedRequest, seedsServer cdnsystem.Seeder_ObtainSeedsServer) error { func (s *seeder) ObtainSeeds(seedRequest *cdnsystemv1.SeedRequest, seedsServer cdnsystemv1.Seeder_ObtainSeedsServer) error {
metrics.SeedPeerConcurrentDownloadGauge.Inc() metrics.SeedPeerConcurrentDownloadGauge.Inc()
defer metrics.SeedPeerConcurrentDownloadGauge.Dec() defer metrics.SeedPeerConcurrentDownloadGauge.Dec()
metrics.SeedPeerDownloadCount.Add(1) metrics.SeedPeerDownloadCount.Add(1)
s.server.Keep() s.server.Keep()
if seedRequest.UrlMeta == nil { if seedRequest.UrlMeta == nil {
seedRequest.UrlMeta = &base.UrlMeta{} seedRequest.UrlMeta = &commonv1.UrlMeta{}
} }
req := peer.SeedTaskRequest{ req := peer.SeedTaskRequest{
PeerTaskRequest: scheduler.PeerTaskRequest{ PeerTaskRequest: schedulerv1.PeerTaskRequest{
Url: seedRequest.Url, Url: seedRequest.Url,
UrlMeta: seedRequest.UrlMeta, UrlMeta: seedRequest.UrlMeta,
PeerId: idgen.SeedPeerID(s.server.peerHost.Ip), // when reuse peer task, peer id will be replaced. PeerId: idgen.SeedPeerID(s.server.peerHost.Ip), // when reuse peer task, peer id will be replaced.
@ -114,10 +115,10 @@ func (s *seeder) ObtainSeeds(seedRequest *cdnsystem.SeedRequest, seedsServer cdn
log.Infof("start seed task") log.Infof("start seed task")
err = seedsServer.Send( err = seedsServer.Send(
&cdnsystem.PieceSeed{ &cdnsystemv1.PieceSeed{
PeerId: resp.PeerID, PeerId: resp.PeerID,
HostId: req.PeerHost.Id, HostId: req.PeerHost.Id,
PieceInfo: &base.PieceInfo{ PieceInfo: &commonv1.PieceInfo{
PieceNum: common.BeginOfPiece, PieceNum: common.BeginOfPiece,
}, },
Done: false, Done: false,
@ -149,7 +150,7 @@ func (s *seeder) ObtainSeeds(seedRequest *cdnsystem.SeedRequest, seedsServer cdn
type seedSynchronizer struct { type seedSynchronizer struct {
*peer.SeedTaskResponse *peer.SeedTaskResponse
*logger.SugaredLoggerOnWith *logger.SugaredLoggerOnWith
seedsServer cdnsystem.Seeder_ObtainSeedsServer seedsServer cdnsystemv1.Seeder_ObtainSeedsServer
seedTaskRequest *peer.SeedTaskRequest seedTaskRequest *peer.SeedTaskRequest
startNanoSecond int64 startNanoSecond int64
attributeSent bool attributeSent bool
@ -210,7 +211,7 @@ func (s *seedSynchronizer) sendPieceSeeds(reuse bool) (err error) {
func (s *seedSynchronizer) sendRemindingPieceSeeds(desired int32, reuse bool) error { func (s *seedSynchronizer) sendRemindingPieceSeeds(desired int32, reuse bool) error {
for { for {
pp, err := s.Storage.GetPieces(s.Context, pp, err := s.Storage.GetPieces(s.Context,
&base.PieceTaskRequest{ &commonv1.PieceTaskRequest{
TaskId: s.TaskID, TaskId: s.TaskID,
StartNum: uint32(desired), StartNum: uint32(desired),
Limit: 16, Limit: 16,
@ -274,7 +275,7 @@ func (s *seedSynchronizer) sendOrderedPieceSeeds(desired, orderedNum int32, fini
var contentLength int64 = -1 var contentLength int64 = -1
for ; cur <= orderedNum; cur++ { for ; cur <= orderedNum; cur++ {
pp, err := s.Storage.GetPieces(s.Context, pp, err := s.Storage.GetPieces(s.Context,
&base.PieceTaskRequest{ &commonv1.PieceTaskRequest{
TaskId: s.TaskID, TaskId: s.TaskID,
StartNum: uint32(cur), StartNum: uint32(cur),
Limit: 1, Limit: 1,
@ -314,8 +315,8 @@ func (s *seedSynchronizer) sendOrderedPieceSeeds(desired, orderedNum int32, fini
return contentLength, cur, nil return contentLength, cur, nil
} }
func (s *seedSynchronizer) compositePieceSeed(pp *base.PiecePacket, piece *base.PieceInfo) cdnsystem.PieceSeed { func (s *seedSynchronizer) compositePieceSeed(pp *commonv1.PiecePacket, piece *commonv1.PieceInfo) cdnsystemv1.PieceSeed {
return cdnsystem.PieceSeed{ return cdnsystemv1.PieceSeed{
PeerId: s.seedTaskRequest.PeerId, PeerId: s.seedTaskRequest.PeerId,
HostId: s.seedTaskRequest.PeerHost.Id, HostId: s.seedTaskRequest.PeerHost.Id,
PieceInfo: piece, PieceInfo: piece,

View File

@ -30,18 +30,19 @@ import (
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/peer"
"d7y.io/dragonfly/v2/client/daemon/storage" "d7y.io/dragonfly/v2/client/daemon/storage"
"d7y.io/dragonfly/v2/client/daemon/storage/mocks" "d7y.io/dragonfly/v2/client/daemon/storage/mocks"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client"
"d7y.io/dragonfly/v2/pkg/rpc/base/common" "d7y.io/dragonfly/v2/pkg/rpc/common"
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
cdnclient "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client"
dfdaemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" dfdaemonserver "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
func Test_ObtainSeeds(t *testing.T) { func Test_ObtainSeeds(t *testing.T) {
@ -196,11 +197,11 @@ func Test_ObtainSeeds(t *testing.T) {
} }
var ( var (
totalPieces []*base.PieceInfo totalPieces []*commonv1.PieceInfo
lock sync.Mutex lock sync.Mutex
) )
var addedPieces = make(map[uint32]*base.PieceInfo) var addedPieces = make(map[uint32]*commonv1.PieceInfo)
for _, p := range tc.existPieces { for _, p := range tc.existPieces {
if p.end == 0 { if p.end == 0 {
p.end = p.start p.end = p.start
@ -209,12 +210,12 @@ func Test_ObtainSeeds(t *testing.T) {
if _, ok := addedPieces[uint32(i)]; ok { if _, ok := addedPieces[uint32(i)]; ok {
continue continue
} }
piece := &base.PieceInfo{ piece := &commonv1.PieceInfo{
PieceNum: int32(i), PieceNum: int32(i),
RangeStart: uint64(i) * uint64(pieceSize), RangeStart: uint64(i) * uint64(pieceSize),
RangeSize: pieceSize, RangeSize: pieceSize,
PieceOffset: uint64(i) * uint64(pieceSize), PieceOffset: uint64(i) * uint64(pieceSize),
PieceStyle: base.PieceStyle_PLAIN, PieceStyle: commonv1.PieceStyle_PLAIN,
} }
totalPieces = append(totalPieces, piece) totalPieces = append(totalPieces, piece)
addedPieces[uint32(i)] = piece addedPieces[uint32(i)] = piece
@ -223,8 +224,8 @@ func Test_ObtainSeeds(t *testing.T) {
mockStorageManger.EXPECT().GetPieces(gomock.Any(), mockStorageManger.EXPECT().GetPieces(gomock.Any(),
gomock.Any()).AnyTimes().DoAndReturn( gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { func(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
var pieces []*base.PieceInfo var pieces []*commonv1.PieceInfo
lock.Lock() lock.Lock()
for i := req.StartNum; i < tc.totalPieces; i++ { for i := req.StartNum; i < tc.totalPieces; i++ {
if piece, ok := addedPieces[i]; ok { if piece, ok := addedPieces[i]; ok {
@ -234,7 +235,7 @@ func Test_ObtainSeeds(t *testing.T) {
} }
} }
lock.Unlock() lock.Unlock()
return &base.PiecePacket{ return &commonv1.PiecePacket{
TaskId: req.TaskId, TaskId: req.TaskId,
DstPid: req.DstPid, DstPid: req.DstPid,
DstAddr: "", DstAddr: "",
@ -246,8 +247,8 @@ func Test_ObtainSeeds(t *testing.T) {
}) })
mockStorageManger.EXPECT().GetExtendAttribute(gomock.Any(), mockStorageManger.EXPECT().GetExtendAttribute(gomock.Any(),
gomock.Any()).AnyTimes().DoAndReturn( gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { func(ctx context.Context, req *storage.PeerTaskMetadata) (*commonv1.ExtendAttribute, error) {
return &base.ExtendAttribute{ return &commonv1.ExtendAttribute{
Header: map[string]string{ Header: map[string]string{
"Test": "test", "Test": "test",
}, },
@ -270,12 +271,12 @@ func Test_ObtainSeeds(t *testing.T) {
if _, ok := addedPieces[uint32(j)]; ok { if _, ok := addedPieces[uint32(j)]; ok {
continue continue
} }
piece := &base.PieceInfo{ piece := &commonv1.PieceInfo{
PieceNum: int32(j), PieceNum: int32(j),
RangeStart: uint64(j) * uint64(pieceSize), RangeStart: uint64(j) * uint64(pieceSize),
RangeSize: pieceSize, RangeSize: pieceSize,
PieceOffset: uint64(j) * uint64(pieceSize), PieceOffset: uint64(j) * uint64(pieceSize),
PieceStyle: base.PieceStyle_PLAIN, PieceStyle: commonv1.PieceStyle_PLAIN,
} }
totalPieces = append(totalPieces, piece) totalPieces = append(totalPieces, piece)
addedPieces[uint32(j)] = piece addedPieces[uint32(j)] = piece
@ -313,7 +314,7 @@ func Test_ObtainSeeds(t *testing.T) {
s := &server{ s := &server{
KeepAlive: util.NewKeepAlive("test"), KeepAlive: util.NewKeepAlive("test"),
peerHost: &scheduler.PeerHost{}, peerHost: &schedulerv1.PeerHost{},
storageManager: mockStorageManger, storageManager: mockStorageManger,
peerTaskManager: mockTaskManager, peerTaskManager: mockTaskManager,
} }
@ -323,7 +324,7 @@ func Test_ObtainSeeds(t *testing.T) {
pps, err := client.ObtainSeeds( pps, err := client.ObtainSeeds(
context.Background(), context.Background(),
&cdnsystem.SeedRequest{ &cdnsystemv1.SeedRequest{
TaskId: "fake-task-id", TaskId: "fake-task-id",
Url: "http://localhost/path/to/file", Url: "http://localhost/path/to/file",
UrlMeta: nil, UrlMeta: nil,
@ -361,9 +362,9 @@ func Test_ObtainSeeds(t *testing.T) {
} }
} }
func setupSeederServerAndClient(t *testing.T, srv *server, sd *seeder, assert *testifyassert.Assertions, serveFunc func(listener net.Listener) error) (int, cdnclient.CdnClient) { func setupSeederServerAndClient(t *testing.T, srv *server, sd *seeder, assert *testifyassert.Assertions, serveFunc func(listener net.Listener) error) (int, client.CdnClient) {
srv.peerServer = dfdaemonserver.New(srv) srv.peerServer = dfdaemonserver.New(srv)
cdnsystem.RegisterSeederServer(srv.peerServer, sd) cdnsystemv1.RegisterSeederServer(srv.peerServer, sd)
port, err := freeport.GetFreePort() port, err := freeport.GetFreePort()
if err != nil { if err != nil {
@ -378,7 +379,7 @@ func setupSeederServerAndClient(t *testing.T, srv *server, sd *seeder, assert *t
} }
}() }()
client := cdnclient.GetClientByAddr([]dfnet.NetAddr{ client := client.GetClientByAddr([]dfnet.NetAddr{
{ {
Type: dfnet.TCP, Type: dfnet.TCP,
Addr: fmt.Sprintf(":%d", port), Addr: fmt.Sprintf(":%d", port),

View File

@ -25,19 +25,20 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
"d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/peer"
"d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/internal/dferrors"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
) )
type subscriber struct { type subscriber struct {
sync.Mutex // lock for sent map and grpc Send sync.Mutex // lock for sent map and grpc Send
*logger.SugaredLoggerOnWith *logger.SugaredLoggerOnWith
*peer.SubscribeResponse *peer.SubscribeResponse
sync dfdaemon.Daemon_SyncPieceTasksServer sync dfdaemonv1.Daemon_SyncPieceTasksServer
request *base.PieceTaskRequest request *commonv1.PieceTaskRequest
skipPieceCount uint32 skipPieceCount uint32
totalPieces int32 totalPieces int32
sentMap map[int32]struct{} sentMap map[int32]struct{}
@ -46,7 +47,7 @@ type subscriber struct {
attributeSent *atomic.Bool attributeSent *atomic.Bool
} }
func (s *subscriber) getPieces(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error) { func (s *subscriber) getPieces(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
p, err := s.Storage.GetPieces(ctx, request) p, err := s.Storage.GetPieces(ctx, request)
if err != nil { if err != nil {
return nil, err return nil, err
@ -67,15 +68,15 @@ func (s *subscriber) getPieces(ctx context.Context, request *base.PieceTaskReque
func sendExistPieces( func sendExistPieces(
ctx context.Context, ctx context.Context,
log *logger.SugaredLoggerOnWith, log *logger.SugaredLoggerOnWith,
get func(ctx context.Context, request *base.PieceTaskRequest) (*base.PiecePacket, error), get func(ctx context.Context, request *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error),
request *base.PieceTaskRequest, request *commonv1.PieceTaskRequest,
sync dfdaemon.Daemon_SyncPieceTasksServer, sync dfdaemonv1.Daemon_SyncPieceTasksServer,
sentMap map[int32]struct{}, sentMap map[int32]struct{},
skipSendZeroPiece bool) (total int32, err error) { skipSendZeroPiece bool) (total int32, err error) {
if request.Limit <= 0 { if request.Limit <= 0 {
request.Limit = 16 request.Limit = 16
} }
var pp *base.PiecePacket var pp *commonv1.PiecePacket
for { for {
pp, err = get(ctx, request) pp, err = get(ctx, request)
if err != nil { if err != nil {
@ -225,7 +226,7 @@ loop:
s.Unlock() s.Unlock()
msg := "peer task success, but can not send all pieces" msg := "peer task success, but can not send all pieces"
s.Errorf(msg) s.Errorf(msg)
return dferrors.Newf(base.Code_ClientError, msg) return dferrors.Newf(commonv1.Code_ClientError, msg)
} }
s.Unlock() s.Unlock()
break loop break loop

View File

@ -30,11 +30,12 @@ import (
"go.uber.org/atomic" "go.uber.org/atomic"
commonv1 "d7y.io/api/pkg/apis/common/v1"
clientutil "d7y.io/dragonfly/v2/client/util" clientutil "d7y.io/dragonfly/v2/client/util"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/internal/util" "d7y.io/dragonfly/v2/internal/util"
"d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/rpc/base"
) )
type localTaskStore struct { type localTaskStore struct {
@ -396,7 +397,7 @@ func (t *localTaskStore) Store(ctx context.Context, req *StoreRequest) error {
return err return err
} }
func (t *localTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { func (t *localTaskStore) GetPieces(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
if req == nil { if req == nil {
return nil, ErrBadRequest return nil, ErrBadRequest
} }
@ -408,7 +409,7 @@ func (t *localTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskReque
t.RLock() t.RLock()
defer t.RUnlock() defer t.RUnlock()
t.touch() t.touch()
piecePacket := &base.PiecePacket{ piecePacket := &commonv1.PiecePacket{
TaskId: req.TaskId, TaskId: req.TaskId,
DstPid: t.PeerID, DstPid: t.PeerID,
TotalPiece: t.TotalPieces, TotalPiece: t.TotalPieces,
@ -425,7 +426,7 @@ func (t *localTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskReque
} }
if piece, ok := t.Pieces[num]; ok { if piece, ok := t.Pieces[num]; ok {
piecePacket.PieceInfos = append(piecePacket.PieceInfos, piecePacket.PieceInfos = append(piecePacket.PieceInfos,
&base.PieceInfo{ &commonv1.PieceInfo{
PieceNum: piece.Num, PieceNum: piece.Num,
RangeStart: uint64(piece.Range.Start), RangeStart: uint64(piece.Range.Start),
RangeSize: uint32(piece.Range.Length), RangeSize: uint32(piece.Range.Length),
@ -449,7 +450,7 @@ func (t *localTaskStore) GetTotalPieces(ctx context.Context, req *PeerTaskMetada
return t.TotalPieces, nil return t.TotalPieces, nil
} }
func (t *localTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*base.ExtendAttribute, error) { func (t *localTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*commonv1.ExtendAttribute, error) {
if t.invalid.Load() { if t.invalid.Load() {
t.Errorf("invalid digest, refuse to get total pieces") t.Errorf("invalid digest, refuse to get total pieces")
return nil, ErrInvalidDigest return nil, ErrInvalidDigest
@ -463,7 +464,7 @@ func (t *localTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTaskMe
hdr[k] = t.Header.Get(k) hdr[k] = t.Header.Get(k)
} }
} }
return &base.ExtendAttribute{Header: hdr}, nil return &commonv1.ExtendAttribute{Header: hdr}, nil
} }
func (t *localTaskStore) CanReclaim() bool { func (t *localTaskStore) CanReclaim() bool {

View File

@ -24,10 +24,11 @@ import (
"go.uber.org/atomic" "go.uber.org/atomic"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/rpc/base"
) )
// TODO need refactor with localTaskStore, currently, localSubTaskStore code copies from localTaskStore // TODO need refactor with localTaskStore, currently, localSubTaskStore code copies from localTaskStore
@ -204,7 +205,7 @@ func (t *localSubTaskStore) ReadAllPieces(ctx context.Context, req *ReadAllPiece
}, nil }, nil
} }
func (t *localSubTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { func (t *localSubTaskStore) GetPieces(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
if t.invalid.Load() { if t.invalid.Load() {
t.Errorf("invalid digest, refuse to get pieces") t.Errorf("invalid digest, refuse to get pieces")
return nil, ErrInvalidDigest return nil, ErrInvalidDigest
@ -213,7 +214,7 @@ func (t *localSubTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskRe
t.RLock() t.RLock()
defer t.RUnlock() defer t.RUnlock()
t.parent.touch() t.parent.touch()
piecePacket := &base.PiecePacket{ piecePacket := &commonv1.PiecePacket{
TaskId: req.TaskId, TaskId: req.TaskId,
DstPid: t.PeerID, DstPid: t.PeerID,
TotalPiece: t.TotalPieces, TotalPiece: t.TotalPieces,
@ -227,7 +228,7 @@ func (t *localSubTaskStore) GetPieces(ctx context.Context, req *base.PieceTaskRe
for i := int32(0); i < int32(req.Limit); i++ { for i := int32(0); i < int32(req.Limit); i++ {
if piece, ok := t.Pieces[int32(req.StartNum)+i]; ok { if piece, ok := t.Pieces[int32(req.StartNum)+i]; ok {
piecePacket.PieceInfos = append(piecePacket.PieceInfos, &base.PieceInfo{ piecePacket.PieceInfos = append(piecePacket.PieceInfos, &commonv1.PieceInfo{
PieceNum: piece.Num, PieceNum: piece.Num,
RangeStart: uint64(piece.Range.Start), RangeStart: uint64(piece.Range.Start),
RangeSize: uint32(piece.Range.Length), RangeSize: uint32(piece.Range.Length),
@ -396,7 +397,7 @@ func (t *localSubTaskStore) Reclaim() error {
return nil return nil
} }
func (t *localSubTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*base.ExtendAttribute, error) { func (t *localSubTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*commonv1.ExtendAttribute, error) {
if t.invalid.Load() { if t.invalid.Load() {
t.Errorf("invalid digest, refuse to get total pieces") t.Errorf("invalid digest, refuse to get total pieces")
return nil, ErrInvalidDigest return nil, ErrInvalidDigest
@ -410,5 +411,5 @@ func (t *localSubTaskStore) GetExtendAttribute(ctx context.Context, req *PeerTas
hdr[k] = t.Header.Get(k) hdr[k] = t.Header.Get(k)
} }
} }
return &base.ExtendAttribute{Header: hdr}, nil return &commonv1.ExtendAttribute{Header: hdr}, nil
} }

View File

@ -31,13 +31,14 @@ import (
testifyassert "github.com/stretchr/testify/assert" testifyassert "github.com/stretchr/testify/assert"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/test" "d7y.io/dragonfly/v2/client/daemon/test"
clientutil "d7y.io/dragonfly/v2/client/util" clientutil "d7y.io/dragonfly/v2/client/util"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/internal/util" "d7y.io/dragonfly/v2/internal/util"
"d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/rpc/base"
_ "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server" _ "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/server"
) )
@ -195,7 +196,7 @@ func TestLocalTaskStore_PutAndGetPiece(t *testing.T) {
Start: int64(p.start), Start: int64(p.start),
Length: int64(p.end - p.start), Length: int64(p.end - p.start),
}, },
Style: base.PieceStyle_PLAIN, Style: commonv1.PieceStyle_PLAIN,
}, },
Reader: bytes.NewBuffer(testBytes[p.start:p.end]), Reader: bytes.NewBuffer(testBytes[p.start:p.end]),
}) })
@ -225,7 +226,7 @@ func TestLocalTaskStore_PutAndGetPiece(t *testing.T) {
Start: int64(p.start), Start: int64(p.start),
Length: int64(p.end - p.start), Length: int64(p.end - p.start),
}, },
Style: base.PieceStyle_PLAIN, Style: commonv1.PieceStyle_PLAIN,
}, },
}) })
assert.Nil(err, "get piece reader should be ok") assert.Nil(err, "get piece reader should be ok")

View File

@ -19,8 +19,9 @@ package storage
import ( import (
"io" "io"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
) )
@ -48,7 +49,7 @@ type PieceMetadata struct {
Md5 string `json:"md5,omitempty"` Md5 string `json:"md5,omitempty"`
Offset uint64 `json:"offset,omitempty"` Offset uint64 `json:"offset,omitempty"`
Range util.Range `json:"range,omitempty"` Range util.Range `json:"range,omitempty"`
Style base.PieceStyle `json:"style,omitempty"` Style commonv1.PieceStyle `json:"style,omitempty"`
// time(nanosecond) consumed // time(nanosecond) consumed
Cost uint64 `json:"cost,omitempty"` Cost uint64 `json:"cost,omitempty"`
} }

View File

@ -10,9 +10,9 @@ import (
reflect "reflect" reflect "reflect"
time "time" time "time"
v1 "d7y.io/api/pkg/apis/common/v1"
storage "d7y.io/dragonfly/v2/client/daemon/storage" storage "d7y.io/dragonfly/v2/client/daemon/storage"
util "d7y.io/dragonfly/v2/client/util" util "d7y.io/dragonfly/v2/client/util"
base "d7y.io/dragonfly/v2/pkg/rpc/base"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
) )
@ -40,10 +40,10 @@ func (m *MockTaskStorageDriver) EXPECT() *MockTaskStorageDriverMockRecorder {
} }
// GetExtendAttribute mocks base method. // GetExtendAttribute mocks base method.
func (m *MockTaskStorageDriver) GetExtendAttribute(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { func (m *MockTaskStorageDriver) GetExtendAttribute(ctx context.Context, req *storage.PeerTaskMetadata) (*v1.ExtendAttribute, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetExtendAttribute", ctx, req) ret := m.ctrl.Call(m, "GetExtendAttribute", ctx, req)
ret0, _ := ret[0].(*base.ExtendAttribute) ret0, _ := ret[0].(*v1.ExtendAttribute)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -55,10 +55,10 @@ func (mr *MockTaskStorageDriverMockRecorder) GetExtendAttribute(ctx, req interfa
} }
// GetPieces mocks base method. // GetPieces mocks base method.
func (m *MockTaskStorageDriver) GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { func (m *MockTaskStorageDriver) GetPieces(ctx context.Context, req *v1.PieceTaskRequest) (*v1.PiecePacket, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPieces", ctx, req) ret := m.ctrl.Call(m, "GetPieces", ctx, req)
ret0, _ := ret[0].(*base.PiecePacket) ret0, _ := ret[0].(*v1.PiecePacket)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -342,10 +342,10 @@ func (mr *MockManagerMockRecorder) FindPartialCompletedTask(taskID, rg interface
} }
// GetExtendAttribute mocks base method. // GetExtendAttribute mocks base method.
func (m *MockManager) GetExtendAttribute(ctx context.Context, req *storage.PeerTaskMetadata) (*base.ExtendAttribute, error) { func (m *MockManager) GetExtendAttribute(ctx context.Context, req *storage.PeerTaskMetadata) (*v1.ExtendAttribute, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetExtendAttribute", ctx, req) ret := m.ctrl.Call(m, "GetExtendAttribute", ctx, req)
ret0, _ := ret[0].(*base.ExtendAttribute) ret0, _ := ret[0].(*v1.ExtendAttribute)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -357,10 +357,10 @@ func (mr *MockManagerMockRecorder) GetExtendAttribute(ctx, req interface{}) *gom
} }
// GetPieces mocks base method. // GetPieces mocks base method.
func (m *MockManager) GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { func (m *MockManager) GetPieces(ctx context.Context, req *v1.PieceTaskRequest) (*v1.PiecePacket, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPieces", ctx, req) ret := m.ctrl.Call(m, "GetPieces", ctx, req)
ret0, _ := ret[0].(*base.PiecePacket) ret0, _ := ret[0].(*v1.PiecePacket)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }

View File

@ -38,11 +38,12 @@ import (
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/gc" "d7y.io/dragonfly/v2/client/daemon/gc"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
) )
type TaskStorageDriver interface { type TaskStorageDriver interface {
@ -56,11 +57,11 @@ type TaskStorageDriver interface {
ReadAllPieces(ctx context.Context, req *ReadAllPiecesRequest) (io.ReadCloser, error) ReadAllPieces(ctx context.Context, req *ReadAllPiecesRequest) (io.ReadCloser, error)
GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) GetPieces(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error)
GetTotalPieces(ctx context.Context, req *PeerTaskMetadata) (int32, error) GetTotalPieces(ctx context.Context, req *PeerTaskMetadata) (int32, error)
GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*base.ExtendAttribute, error) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*commonv1.ExtendAttribute, error)
UpdateTask(ctx context.Context, req *UpdateTaskRequest) error UpdateTask(ctx context.Context, req *UpdateTaskRequest) error
@ -316,7 +317,7 @@ func (s *storageManager) Store(ctx context.Context, req *StoreRequest) error {
return t.Store(ctx, req) return t.Store(ctx, req)
} }
func (s *storageManager) GetPieces(ctx context.Context, req *base.PieceTaskRequest) (*base.PiecePacket, error) { func (s *storageManager) GetPieces(ctx context.Context, req *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
t, ok := s.LoadTask( t, ok := s.LoadTask(
PeerTaskMetadata{ PeerTaskMetadata{
TaskID: req.TaskId, TaskID: req.TaskId,
@ -340,7 +341,7 @@ func (s *storageManager) GetTotalPieces(ctx context.Context, req *PeerTaskMetada
return t.(TaskStorageDriver).GetTotalPieces(ctx, req) return t.(TaskStorageDriver).GetTotalPieces(ctx, req)
} }
func (s *storageManager) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*base.ExtendAttribute, error) { func (s *storageManager) GetExtendAttribute(ctx context.Context, req *PeerTaskMetadata) (*commonv1.ExtendAttribute, error) {
t, ok := s.LoadTask( t, ok := s.LoadTask(
PeerTaskMetadata{ PeerTaskMetadata{
TaskID: req.TaskID, TaskID: req.TaskID,

View File

@ -35,14 +35,15 @@ import (
"go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/propagation"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
commonv1 "d7y.io/api/pkg/apis/common/v1"
errordetailsv1 "d7y.io/api/pkg/apis/errordetails/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/client/daemon/metrics" "d7y.io/dragonfly/v2/client/daemon/metrics"
"d7y.io/dragonfly/v2/client/daemon/peer" "d7y.io/dragonfly/v2/client/daemon/peer"
"d7y.io/dragonfly/v2/client/util" "d7y.io/dragonfly/v2/client/util"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
nethttp "d7y.io/dragonfly/v2/pkg/net/http" nethttp "d7y.io/dragonfly/v2/pkg/net/http"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/errordetails"
) )
var _ *logger.SugaredLoggerOnWith // pin this package for no log code generation var _ *logger.SugaredLoggerOnWith // pin this package for no log code generation
@ -70,7 +71,7 @@ type transport struct {
defaultFilter string defaultFilter string
// defaultFilter is used for registering steam task // defaultFilter is used for registering steam task
defaultPattern base.Pattern defaultPattern commonv1.Pattern
// defaultTag is used when http request without X-Dragonfly-Tag Header // defaultTag is used when http request without X-Dragonfly-Tag Header
defaultTag string defaultTag string
@ -125,7 +126,7 @@ func WithDefaultFilter(f string) Option {
} }
// WithDefaultPattern sets default pattern // WithDefaultPattern sets default pattern
func WithDefaultPattern(pattern base.Pattern) Option { func WithDefaultPattern(pattern commonv1.Pattern) Option {
return func(rt *transport) *transport { return func(rt *transport) *transport {
rt.defaultPattern = pattern rt.defaultPattern = pattern
return rt return rt
@ -215,7 +216,7 @@ func (rt *transport) download(ctx context.Context, req *http.Request) (*http.Res
log.Infof("start download with url: %s", url) log.Infof("start download with url: %s", url)
// Init meta value // Init meta value
meta := &base.UrlMeta{Header: map[string]string{}} meta := &commonv1.UrlMeta{Header: map[string]string{}}
var rg *util.Range var rg *util.Range
// Set meta range's value // Set meta range's value
@ -261,7 +262,7 @@ func (rt *transport) download(ctx context.Context, req *http.Request) (*http.Res
if st, ok := status.FromError(err); ok { if st, ok := status.FromError(err); ok {
for _, detail := range st.Details() { for _, detail := range st.Details() {
switch d := detail.(type) { switch d := detail.(type) {
case *errordetails.SourceError: case *errordetailsv1.SourceError:
hdr := nethttp.MapToHeader(attr) hdr := nethttp.MapToHeader(attr)
for k, v := range d.Metadata.Header { for k, v := range d.Metadata.Header {
hdr.Set(k, v) hdr.Set(k, v)

View File

@ -24,12 +24,13 @@ import (
"os" "os"
"time" "time"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
"d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/internal/dferrors"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/basic" "d7y.io/dragonfly/v2/pkg/basic"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
daemonclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" daemonclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client"
) )
@ -89,7 +90,7 @@ func statTask(ctx context.Context, client daemonclient.DaemonClient, cfg *config
} }
// Task not found, return os.ErrNotExist // Task not found, return os.ErrNotExist
if dferrors.CheckError(statError, base.Code_PeerTaskNotFound) { if dferrors.CheckError(statError, commonv1.Code_PeerTaskNotFound) {
return os.ErrNotExist return os.ErrNotExist
} }
@ -98,10 +99,10 @@ func statTask(ctx context.Context, client daemonclient.DaemonClient, cfg *config
return statError return statError
} }
func newStatRequest(cfg *config.DfcacheConfig) *dfdaemon.StatTaskRequest { func newStatRequest(cfg *config.DfcacheConfig) *dfdaemonv1.StatTaskRequest {
return &dfdaemon.StatTaskRequest{ return &dfdaemonv1.StatTaskRequest{
Url: newCid(cfg.Cid), Url: newCid(cfg.Cid),
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Tag: cfg.Tag, Tag: cfg.Tag,
}, },
LocalOnly: cfg.LocalOnly, LocalOnly: cfg.LocalOnly,
@ -158,12 +159,12 @@ func importTask(ctx context.Context, client daemonclient.DaemonClient, cfg *conf
return nil return nil
} }
func newImportRequest(cfg *config.DfcacheConfig) *dfdaemon.ImportTaskRequest { func newImportRequest(cfg *config.DfcacheConfig) *dfdaemonv1.ImportTaskRequest {
return &dfdaemon.ImportTaskRequest{ return &dfdaemonv1.ImportTaskRequest{
Type: base.TaskType_DfCache, Type: commonv1.TaskType_DfCache,
Url: newCid(cfg.Cid), Url: newCid(cfg.Cid),
Path: cfg.Path, Path: cfg.Path,
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Tag: cfg.Tag, Tag: cfg.Tag,
}, },
} }
@ -217,7 +218,7 @@ func exportTask(ctx context.Context, client daemonclient.DaemonClient, cfg *conf
} }
// Task not found, return os.ErrNotExist // Task not found, return os.ErrNotExist
if dferrors.CheckError(exportError, base.Code_PeerTaskNotFound) { if dferrors.CheckError(exportError, commonv1.Code_PeerTaskNotFound) {
return os.ErrNotExist return os.ErrNotExist
} }
@ -226,13 +227,13 @@ func exportTask(ctx context.Context, client daemonclient.DaemonClient, cfg *conf
return exportError return exportError
} }
func newExportRequest(cfg *config.DfcacheConfig) *dfdaemon.ExportTaskRequest { func newExportRequest(cfg *config.DfcacheConfig) *dfdaemonv1.ExportTaskRequest {
return &dfdaemon.ExportTaskRequest{ return &dfdaemonv1.ExportTaskRequest{
Url: newCid(cfg.Cid), Url: newCid(cfg.Cid),
Output: cfg.Output, Output: cfg.Output,
Timeout: uint64(cfg.Timeout), Timeout: uint64(cfg.Timeout),
Limit: float64(cfg.RateLimit), Limit: float64(cfg.RateLimit),
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Tag: cfg.Tag, Tag: cfg.Tag,
}, },
Uid: int64(basic.UserID), Uid: int64(basic.UserID),
@ -290,10 +291,10 @@ func deleteTask(ctx context.Context, client daemonclient.DaemonClient, cfg *conf
return nil return nil
} }
func newDeleteRequest(cfg *config.DfcacheConfig) *dfdaemon.DeleteTaskRequest { func newDeleteRequest(cfg *config.DfcacheConfig) *dfdaemonv1.DeleteTaskRequest {
return &dfdaemon.DeleteTaskRequest{ return &dfdaemonv1.DeleteTaskRequest{
Url: newCid(cfg.Cid), Url: newCid(cfg.Cid),
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Tag: cfg.Tag, Tag: cfg.Tag,
}, },
} }

View File

@ -33,12 +33,13 @@ import (
"github.com/go-http-utils/headers" "github.com/go-http-utils/headers"
"github.com/schollz/progressbar/v3" "github.com/schollz/progressbar/v3"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/basic" "d7y.io/dragonfly/v2/pkg/basic"
"d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
daemonclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" daemonclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
pkgstrings "d7y.io/dragonfly/v2/pkg/strings" pkgstrings "d7y.io/dragonfly/v2/pkg/strings"
@ -91,7 +92,7 @@ func singleDownload(ctx context.Context, client daemonclient.DaemonClient, cfg *
var ( var (
start = time.Now() start = time.Now()
stream *daemonclient.DownResultStream stream *daemonclient.DownResultStream
result *dfdaemon.DownResult result *dfdaemonv1.DownResult
pb *progressbar.ProgressBar pb *progressbar.ProgressBar
request = newDownRequest(cfg, hdr) request = newDownRequest(cfg, hdr)
downError error downError error
@ -220,20 +221,20 @@ func parseHeader(s []string) map[string]string {
return hdr return hdr
} }
func newDownRequest(cfg *config.DfgetConfig, hdr map[string]string) *dfdaemon.DownRequest { func newDownRequest(cfg *config.DfgetConfig, hdr map[string]string) *dfdaemonv1.DownRequest {
var rg string var rg string
if r, ok := hdr[headers.Range]; ok { if r, ok := hdr[headers.Range]; ok {
rg = strings.TrimLeft(r, "bytes=") rg = strings.TrimLeft(r, "bytes=")
} else { } else {
rg = cfg.Range rg = cfg.Range
} }
return &dfdaemon.DownRequest{ return &dfdaemonv1.DownRequest{
Url: cfg.URL, Url: cfg.URL,
Output: cfg.Output, Output: cfg.Output,
Timeout: uint64(cfg.Timeout), Timeout: uint64(cfg.Timeout),
Limit: float64(cfg.RateLimit.Limit), Limit: float64(cfg.RateLimit.Limit),
DisableBackSource: cfg.DisableBackSource, DisableBackSource: cfg.DisableBackSource,
UrlMeta: &base.UrlMeta{ UrlMeta: &commonv1.UrlMeta{
Digest: cfg.Digest, Digest: cfg.Digest,
Tag: cfg.Tag, Tag: cfg.Tag,
Range: rg, Range: rg,

View File

@ -50,7 +50,7 @@ file that has been imported or added into P2P network by other peer, it's the us
responsibility to go back to source and add file into P2P network. responsibility to go back to source and add file into P2P network.
` `
// rootCmd represents the base command when called without any subcommands // rootCmd represents the commonv1 command when called without any subcommands
var rootCmd = &cobra.Command{ var rootCmd = &cobra.Command{
Use: "dfcache <command> [flags]", Use: "dfcache <command> [flags]",
Short: "the P2P cache client of dragonfly", Short: "the P2P cache client of dragonfly",

View File

@ -57,7 +57,7 @@ peers to download pieces from it if it owns them. In addition, dfget has the
abilities to provide more advanced functionality, such as network bandwidth abilities to provide more advanced functionality, such as network bandwidth
limit, transmission encryption and so on.` limit, transmission encryption and so on.`
// rootCmd represents the base command when called without any subcommands // rootCmd represents the commonv1 command when called without any subcommands
var rootCmd = &cobra.Command{ var rootCmd = &cobra.Command{
Use: "dfget url -O path", Use: "dfget url -O path",
Short: "the P2P client of dragonfly", Short: "the P2P client of dragonfly",

View File

@ -43,7 +43,7 @@ Rely on S3 or OSS as the backend to ensure storage reliability.
In the process of object storage, P2P Cache is effectively used for fast read and write storage. In the process of object storage, P2P Cache is effectively used for fast read and write storage.
` `
// rootCmd represents the base command when called without any subcommands // rootCmd represents the commonv1 command when called without any subcommands
var rootCmd = &cobra.Command{ var rootCmd = &cobra.Command{
Use: "dfstore <command> [flags]", Use: "dfstore <command> [flags]",
Short: "object storage client of dragonfly.", Short: "object storage client of dragonfly.",

View File

@ -36,7 +36,7 @@ var (
cfg *config.Config cfg *config.Config
) )
// rootCmd represents the base command when called without any subcommands // rootCmd represents the commonv1 command when called without any subcommands
var rootCmd = &cobra.Command{ var rootCmd = &cobra.Command{
Use: "manager", Use: "manager",
Short: "The central manager of dragonfly.", Short: "The central manager of dragonfly.",

View File

@ -37,7 +37,7 @@ var (
cfg *config.Config cfg *config.Config
) )
// rootCmd represents the base command when called without any subcommands // rootCmd represents the commonv1 command when called without any subcommands
var rootCmd = &cobra.Command{ var rootCmd = &cobra.Command{
Use: "scheduler", Use: "scheduler",
Short: "the scheduler of dragonfly", Short: "the scheduler of dragonfly",

11
go.mod
View File

@ -3,6 +3,7 @@ module d7y.io/dragonfly/v2
go 1.18 go 1.18
require ( require (
d7y.io/api v1.0.1
github.com/RichardKnop/machinery v1.10.6 github.com/RichardKnop/machinery v1.10.6
github.com/Showmax/go-fqdn v1.0.0 github.com/Showmax/go-fqdn v1.0.0
github.com/VividCortex/mysqlerr v1.0.0 github.com/VividCortex/mysqlerr v1.0.0
@ -16,7 +17,6 @@ require (
github.com/distribution/distribution/v3 v3.0.0-20220620080156-3e4f8a0ab147 github.com/distribution/distribution/v3 v3.0.0-20220620080156-3e4f8a0ab147
github.com/docker/go-connections v0.4.0 github.com/docker/go-connections v0.4.0
github.com/docker/go-units v0.4.0 github.com/docker/go-units v0.4.0
github.com/envoyproxy/protoc-gen-validate v0.6.7
github.com/gammazero/deque v0.2.0 github.com/gammazero/deque v0.2.0
github.com/gin-contrib/cors v1.3.1 github.com/gin-contrib/cors v1.3.1
github.com/gin-contrib/static v0.0.1 github.com/gin-contrib/static v0.0.1
@ -70,11 +70,11 @@ require (
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d
golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26 golang.org/x/oauth2 v0.0.0-20220628200809-02e64fa58f26
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 golang.org/x/time v0.0.0-20220609170525-579cf78fd858
google.golang.org/api v0.90.0 google.golang.org/api v0.90.0
google.golang.org/grpc v1.48.0 google.golang.org/grpc v1.48.0
google.golang.org/protobuf v1.28.0 google.golang.org/protobuf v1.28.1
gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
gorm.io/driver/mysql v1.3.4 gorm.io/driver/mysql v1.3.4
@ -102,6 +102,7 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/denisenkom/go-mssqldb v0.12.2 // indirect github.com/denisenkom/go-mssqldb v0.12.2 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/envoyproxy/protoc-gen-validate v0.6.7 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-echarts/go-echarts/v2 v2.2.4 // indirect github.com/go-echarts/go-echarts/v2 v2.2.4 // indirect
@ -195,12 +196,12 @@ require (
go.mongodb.org/mongo-driver v1.9.1 // indirect go.mongodb.org/mongo-driver v1.9.1 // indirect
go.opencensus.io v0.23.0 // indirect go.opencensus.io v0.23.0 // indirect
go.uber.org/multierr v1.8.0 // indirect go.uber.org/multierr v1.8.0 // indirect
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect golang.org/x/net v0.0.0-20220728211354-c7608f3a8462 // indirect
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
golang.org/x/text v0.3.7 // indirect golang.org/x/text v0.3.7 // indirect
golang.org/x/tools v0.1.11 // indirect golang.org/x/tools v0.1.11 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03 // indirect google.golang.org/genproto v0.0.0-20220728213248-dd149ef739b9 // indirect
gopkg.in/ini.v1 v1.66.6 // indirect gopkg.in/ini.v1 v1.66.6 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gorm.io/driver/sqlserver v1.3.2 // indirect gorm.io/driver/sqlserver v1.3.2 // indirect

16
go.sum
View File

@ -69,6 +69,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
d7y.io/api v1.0.1 h1:FCtyOacd7hBk3H6TFyVBLW9cAlFaS8YyQ7LPcYdrBBY=
d7y.io/api v1.0.1/go.mod h1:GFnWPZFe4DUW70aOQikRZF0pvXpbUwAsGSCAZFFitPo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw=
@ -1262,8 +1264,9 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462 h1:UreQrH7DbFXSi9ZFox6FNT3WBooWmdANpU+IfkT1T4I=
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1403,8 +1406,8 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b h1:2n253B2r0pYSmEV+UNCQoPfU/FiaizQEK5Gu4Bq4JE8= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1656,8 +1659,8 @@ google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljW
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03 h1:W70HjnmXFJm+8RNjOpIDYW2nKsSi/af0VvIZUtYkwuU= google.golang.org/genproto v0.0.0-20220728213248-dd149ef739b9 h1:d3fKQZK+1rWQMg3xLKQbPMirUCo29I/NRdI2WarSzTg=
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220728213248-dd149ef739b9/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1710,8 +1713,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -1,22 +0,0 @@
#!/bin/bash
PROTOC_ALL_IMAGE=${PROTOC_ALL_IMAGE:-"namely/protoc-all:1.47_0"}
PROTO_PATH=pkg/rpc
LANGUAGE=go
proto_modules="base cdnsystem dfdaemon manager scheduler errordetails"
echo "generate protos..."
for module in ${proto_modules}; do
if docker run --rm -v $PWD:/defs ${PROTOC_ALL_IMAGE} \
-d ${PROTO_PATH}/$module -i . \
-l ${LANGUAGE} -o . \
--go-source-relative \
--with-validator \
--validator-source-relative; then
echo "generate protos ${module} successfully"
else
echo "generate protos ${module} failed"
fi
done

View File

@ -20,7 +20,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"d7y.io/dragonfly/v2/pkg/rpc/base" commonv1 "d7y.io/api/pkg/apis/common/v1"
) )
// common and framework errors // common and framework errors
@ -39,7 +39,7 @@ func IsEndOfStream(err error) bool {
} }
type DfError struct { type DfError struct {
Code base.Code Code commonv1.Code
Message string Message string
} }
@ -47,21 +47,21 @@ func (s *DfError) Error() string {
return fmt.Sprintf("[%d]%s", s.Code, s.Message) return fmt.Sprintf("[%d]%s", s.Code, s.Message)
} }
func New(code base.Code, msg string) *DfError { func New(code commonv1.Code, msg string) *DfError {
return &DfError{ return &DfError{
Code: code, Code: code,
Message: msg, Message: msg,
} }
} }
func Newf(code base.Code, format string, a ...any) *DfError { func Newf(code commonv1.Code, format string, a ...any) *DfError {
return &DfError{ return &DfError{
Code: code, Code: code,
Message: fmt.Sprintf(format, a...), Message: fmt.Sprintf(format, a...),
} }
} }
func CheckError(err error, code base.Code) bool { func CheckError(err error, code commonv1.Code) bool {
if err == nil { if err == nil {
return false return false
} }

View File

@ -27,8 +27,9 @@ import (
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
"gorm.io/gorm" "gorm.io/gorm"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/internal/dferrors"
"d7y.io/dragonfly/v2/pkg/rpc/base"
) )
type ErrorResponse struct { type ErrorResponse struct {
@ -58,7 +59,7 @@ func Error() gin.HandlerFunc {
var dferr *dferrors.DfError var dferr *dferrors.DfError
if errors.As(err.Err, &dferr) { if errors.As(err.Err, &dferr) {
switch dferr.Code { switch dferr.Code {
case base.Code_InvalidResourceType: case commonv1.Code_InvalidResourceType:
c.JSON(http.StatusBadRequest, ErrorResponse{ c.JSON(http.StatusBadRequest, ErrorResponse{
Message: http.StatusText(http.StatusBadRequest), Message: http.StatusText(http.StatusBadRequest),
}) })

View File

@ -35,6 +35,8 @@ import (
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"gorm.io/gorm" "gorm.io/gorm"
managerv1 "d7y.io/api/pkg/apis/manager/v1"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/manager/cache" "d7y.io/dragonfly/v2/manager/cache"
"d7y.io/dragonfly/v2/manager/config" "d7y.io/dragonfly/v2/manager/config"
@ -44,7 +46,6 @@ import (
"d7y.io/dragonfly/v2/manager/searcher" "d7y.io/dragonfly/v2/manager/searcher"
"d7y.io/dragonfly/v2/manager/types" "d7y.io/dragonfly/v2/manager/types"
"d7y.io/dragonfly/v2/pkg/objectstorage" "d7y.io/dragonfly/v2/pkg/objectstorage"
"d7y.io/dragonfly/v2/pkg/rpc/manager"
) )
// Default middlewares for stream. // Default middlewares for stream.
@ -80,7 +81,7 @@ type Server struct {
// Searcher interface. // Searcher interface.
searcher searcher.Searcher searcher searcher.Searcher
// Manager grpc interface. // Manager grpc interface.
manager.UnimplementedManagerServer managerv1.UnimplementedManagerServer
// Object storage interface. // Object storage interface.
objectStorage objectstorage.ObjectStorage objectStorage objectstorage.ObjectStorage
// Object storage configuration. // Object storage configuration.
@ -108,14 +109,14 @@ func New(
}, opts...)...) }, opts...)...)
// Register servers on grpc server. // Register servers on grpc server.
manager.RegisterManagerServer(grpcServer, server) managerv1.RegisterManagerServer(grpcServer, server)
healthpb.RegisterHealthServer(grpcServer, health.NewServer()) healthpb.RegisterHealthServer(grpcServer, health.NewServer())
return grpcServer return grpcServer
} }
// Get SeedPeer and SeedPeer cluster configuration. // Get SeedPeer and SeedPeer cluster configuration.
func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerRequest) (*manager.SeedPeer, error) { func (s *Server) GetSeedPeer(ctx context.Context, req *managerv1.GetSeedPeerRequest) (*managerv1.SeedPeer, error) {
var pbSeedPeer manager.SeedPeer var pbSeedPeer managerv1.SeedPeer
cacheKey := cache.MakeSeedPeerCacheKey(uint(req.SeedPeerClusterId), req.HostName, req.Ip) cacheKey := cache.MakeSeedPeerCacheKey(uint(req.SeedPeerClusterId), req.HostName, req.Ip)
// Cache hit. // Cache hit.
@ -143,10 +144,10 @@ func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerReques
} }
// Construct schedulers. // Construct schedulers.
var pbSchedulers []*manager.Scheduler var pbSchedulers []*managerv1.Scheduler
for _, schedulerCluster := range seedPeer.SeedPeerCluster.SchedulerClusters { for _, schedulerCluster := range seedPeer.SeedPeerCluster.SchedulerClusters {
for _, scheduler := range schedulerCluster.Schedulers { for _, scheduler := range schedulerCluster.Schedulers {
pbSchedulers = append(pbSchedulers, &manager.Scheduler{ pbSchedulers = append(pbSchedulers, &managerv1.Scheduler{
Id: uint64(scheduler.ID), Id: uint64(scheduler.ID),
HostName: scheduler.HostName, HostName: scheduler.HostName,
Idc: scheduler.IDC, Idc: scheduler.IDC,
@ -160,7 +161,7 @@ func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerReques
} }
// Construct seed peer. // Construct seed peer.
pbSeedPeer = manager.SeedPeer{ pbSeedPeer = managerv1.SeedPeer{
Id: uint64(seedPeer.ID), Id: uint64(seedPeer.ID),
Type: seedPeer.Type, Type: seedPeer.Type,
HostName: seedPeer.HostName, HostName: seedPeer.HostName,
@ -173,7 +174,7 @@ func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerReques
ObjectStoragePort: seedPeer.ObjectStoragePort, ObjectStoragePort: seedPeer.ObjectStoragePort,
State: seedPeer.State, State: seedPeer.State,
SeedPeerClusterId: uint64(seedPeer.SeedPeerClusterID), SeedPeerClusterId: uint64(seedPeer.SeedPeerClusterID),
SeedPeerCluster: &manager.SeedPeerCluster{ SeedPeerCluster: &managerv1.SeedPeerCluster{
Id: uint64(seedPeer.SeedPeerCluster.ID), Id: uint64(seedPeer.SeedPeerCluster.ID),
Name: seedPeer.SeedPeerCluster.Name, Name: seedPeer.SeedPeerCluster.Name,
Bio: seedPeer.SeedPeerCluster.BIO, Bio: seedPeer.SeedPeerCluster.BIO,
@ -196,7 +197,7 @@ func (s *Server) GetSeedPeer(ctx context.Context, req *manager.GetSeedPeerReques
} }
// Update SeedPeer configuration. // Update SeedPeer configuration.
func (s *Server) UpdateSeedPeer(ctx context.Context, req *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) { func (s *Server) UpdateSeedPeer(ctx context.Context, req *managerv1.UpdateSeedPeerRequest) (*managerv1.SeedPeer, error) {
seedPeer := model.SeedPeer{} seedPeer := model.SeedPeer{}
if err := s.db.WithContext(ctx).First(&seedPeer, model.SeedPeer{ if err := s.db.WithContext(ctx).First(&seedPeer, model.SeedPeer{
HostName: req.HostName, HostName: req.HostName,
@ -229,7 +230,7 @@ func (s *Server) UpdateSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
logger.Warnf("%s refresh keepalive status failed in seed peer cluster %d", seedPeer.HostName, seedPeer.SeedPeerClusterID) logger.Warnf("%s refresh keepalive status failed in seed peer cluster %d", seedPeer.HostName, seedPeer.SeedPeerClusterID)
} }
return &manager.SeedPeer{ return &managerv1.SeedPeer{
Id: uint64(seedPeer.ID), Id: uint64(seedPeer.ID),
HostName: seedPeer.HostName, HostName: seedPeer.HostName,
Type: seedPeer.Type, Type: seedPeer.Type,
@ -246,7 +247,7 @@ func (s *Server) UpdateSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
} }
// Create SeedPeer and associate cluster. // Create SeedPeer and associate cluster.
func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) { func (s *Server) createSeedPeer(ctx context.Context, req *managerv1.UpdateSeedPeerRequest) (*managerv1.SeedPeer, error) {
seedPeer := model.SeedPeer{ seedPeer := model.SeedPeer{
HostName: req.HostName, HostName: req.HostName,
Type: req.Type, Type: req.Type,
@ -264,7 +265,7 @@ func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
return nil, status.Error(codes.Unknown, err.Error()) return nil, status.Error(codes.Unknown, err.Error())
} }
return &manager.SeedPeer{ return &managerv1.SeedPeer{
Id: uint64(seedPeer.ID), Id: uint64(seedPeer.ID),
HostName: seedPeer.HostName, HostName: seedPeer.HostName,
Type: seedPeer.Type, Type: seedPeer.Type,
@ -281,8 +282,8 @@ func (s *Server) createSeedPeer(ctx context.Context, req *manager.UpdateSeedPeer
} }
// Get Scheduler and Scheduler cluster configuration. // Get Scheduler and Scheduler cluster configuration.
func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequest) (*manager.Scheduler, error) { func (s *Server) GetScheduler(ctx context.Context, req *managerv1.GetSchedulerRequest) (*managerv1.Scheduler, error) {
var pbScheduler manager.Scheduler var pbScheduler managerv1.Scheduler
cacheKey := cache.MakeSchedulerCacheKey(uint(req.SchedulerClusterId), req.HostName, req.Ip) cacheKey := cache.MakeSchedulerCacheKey(uint(req.SchedulerClusterId), req.HostName, req.Ip)
// Cache hit. // Cache hit.
@ -316,7 +317,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
} }
// Construct seed peers. // Construct seed peers.
var pbSeedPeers []*manager.SeedPeer var pbSeedPeers []*managerv1.SeedPeer
for _, seedPeerCluster := range scheduler.SchedulerCluster.SeedPeerClusters { for _, seedPeerCluster := range scheduler.SchedulerCluster.SeedPeerClusters {
seedPeerClusterConfig, err := seedPeerCluster.Config.MarshalJSON() seedPeerClusterConfig, err := seedPeerCluster.Config.MarshalJSON()
if err != nil { if err != nil {
@ -324,7 +325,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
} }
for _, seedPeer := range seedPeerCluster.SeedPeers { for _, seedPeer := range seedPeerCluster.SeedPeers {
pbSeedPeers = append(pbSeedPeers, &manager.SeedPeer{ pbSeedPeers = append(pbSeedPeers, &managerv1.SeedPeer{
Id: uint64(seedPeer.ID), Id: uint64(seedPeer.ID),
HostName: seedPeer.HostName, HostName: seedPeer.HostName,
Type: seedPeer.Type, Type: seedPeer.Type,
@ -337,7 +338,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
ObjectStoragePort: seedPeer.ObjectStoragePort, ObjectStoragePort: seedPeer.ObjectStoragePort,
State: seedPeer.State, State: seedPeer.State,
SeedPeerClusterId: uint64(seedPeer.SeedPeerClusterID), SeedPeerClusterId: uint64(seedPeer.SeedPeerClusterID),
SeedPeerCluster: &manager.SeedPeerCluster{ SeedPeerCluster: &managerv1.SeedPeerCluster{
Id: uint64(seedPeerCluster.ID), Id: uint64(seedPeerCluster.ID),
Name: seedPeerCluster.Name, Name: seedPeerCluster.Name,
Bio: seedPeerCluster.BIO, Bio: seedPeerCluster.BIO,
@ -348,7 +349,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
} }
// Construct scheduler. // Construct scheduler.
pbScheduler = manager.Scheduler{ pbScheduler = managerv1.Scheduler{
Id: uint64(scheduler.ID), Id: uint64(scheduler.ID),
HostName: scheduler.HostName, HostName: scheduler.HostName,
Idc: scheduler.IDC, Idc: scheduler.IDC,
@ -358,7 +359,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
Port: scheduler.Port, Port: scheduler.Port,
State: scheduler.State, State: scheduler.State,
SchedulerClusterId: uint64(scheduler.SchedulerClusterID), SchedulerClusterId: uint64(scheduler.SchedulerClusterID),
SchedulerCluster: &manager.SchedulerCluster{ SchedulerCluster: &managerv1.SchedulerCluster{
Id: uint64(scheduler.SchedulerCluster.ID), Id: uint64(scheduler.SchedulerCluster.ID),
Name: scheduler.SchedulerCluster.Name, Name: scheduler.SchedulerCluster.Name,
Bio: scheduler.SchedulerCluster.BIO, Bio: scheduler.SchedulerCluster.BIO,
@ -382,7 +383,7 @@ func (s *Server) GetScheduler(ctx context.Context, req *manager.GetSchedulerRequ
} }
// Update scheduler configuration. // Update scheduler configuration.
func (s *Server) UpdateScheduler(ctx context.Context, req *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) { func (s *Server) UpdateScheduler(ctx context.Context, req *managerv1.UpdateSchedulerRequest) (*managerv1.Scheduler, error) {
scheduler := model.Scheduler{} scheduler := model.Scheduler{}
if err := s.db.WithContext(ctx).First(&scheduler, model.Scheduler{ if err := s.db.WithContext(ctx).First(&scheduler, model.Scheduler{
HostName: req.HostName, HostName: req.HostName,
@ -412,7 +413,7 @@ func (s *Server) UpdateScheduler(ctx context.Context, req *manager.UpdateSchedul
logger.Warnf("%s refresh keepalive status failed in scheduler cluster %d", scheduler.HostName, scheduler.SchedulerClusterID) logger.Warnf("%s refresh keepalive status failed in scheduler cluster %d", scheduler.HostName, scheduler.SchedulerClusterID)
} }
return &manager.Scheduler{ return &managerv1.Scheduler{
Id: uint64(scheduler.ID), Id: uint64(scheduler.ID),
HostName: scheduler.HostName, HostName: scheduler.HostName,
Idc: scheduler.IDC, Idc: scheduler.IDC,
@ -426,7 +427,7 @@ func (s *Server) UpdateScheduler(ctx context.Context, req *manager.UpdateSchedul
} }
// Create scheduler and associate cluster. // Create scheduler and associate cluster.
func (s *Server) createScheduler(ctx context.Context, req *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) { func (s *Server) createScheduler(ctx context.Context, req *managerv1.UpdateSchedulerRequest) (*managerv1.Scheduler, error) {
scheduler := model.Scheduler{ scheduler := model.Scheduler{
HostName: req.HostName, HostName: req.HostName,
IDC: req.Idc, IDC: req.Idc,
@ -441,7 +442,7 @@ func (s *Server) createScheduler(ctx context.Context, req *manager.UpdateSchedul
return nil, status.Error(codes.Unknown, err.Error()) return nil, status.Error(codes.Unknown, err.Error())
} }
return &manager.Scheduler{ return &managerv1.Scheduler{
Id: uint64(scheduler.ID), Id: uint64(scheduler.ID),
HostName: scheduler.HostName, HostName: scheduler.HostName,
Idc: scheduler.IDC, Idc: scheduler.IDC,
@ -455,11 +456,11 @@ func (s *Server) createScheduler(ctx context.Context, req *manager.UpdateSchedul
} }
// List acitve schedulers configuration. // List acitve schedulers configuration.
func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) { func (s *Server) ListSchedulers(ctx context.Context, req *managerv1.ListSchedulersRequest) (*managerv1.ListSchedulersResponse, error) {
log := logger.WithHostnameAndIP(req.HostName, req.Ip) log := logger.WithHostnameAndIP(req.HostName, req.Ip)
// Count the number of the active peer. // Count the number of the active peer.
if s.config.Metrics.EnablePeerGauge && req.SourceType == manager.SourceType_PEER_SOURCE { if s.config.Metrics.EnablePeerGauge && req.SourceType == managerv1.SourceType_PEER_SOURCE {
count, err := s.getPeerCount(ctx, req) count, err := s.getPeerCount(ctx, req)
if err != nil { if err != nil {
log.Warnf("get peer count failed: %s", err.Error()) log.Warnf("get peer count failed: %s", err.Error())
@ -468,7 +469,7 @@ func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulers
} }
} }
var pbListSchedulersResponse manager.ListSchedulersResponse var pbListSchedulersResponse managerv1.ListSchedulersResponse
cacheKey := cache.MakeSchedulersCacheKeyForPeer(req.HostName, req.Ip) cacheKey := cache.MakeSchedulersCacheKeyForPeer(req.HostName, req.Ip)
// Cache hit. // Cache hit.
@ -503,10 +504,10 @@ func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulers
// Construct schedulers. // Construct schedulers.
for _, scheduler := range schedulers { for _, scheduler := range schedulers {
seedPeers := []*manager.SeedPeer{} seedPeers := []*managerv1.SeedPeer{}
for _, seedPeerCluster := range scheduler.SchedulerCluster.SeedPeerClusters { for _, seedPeerCluster := range scheduler.SchedulerCluster.SeedPeerClusters {
for _, seedPeer := range seedPeerCluster.SeedPeers { for _, seedPeer := range seedPeerCluster.SeedPeers {
seedPeers = append(seedPeers, &manager.SeedPeer{ seedPeers = append(seedPeers, &managerv1.SeedPeer{
Id: uint64(seedPeer.ID), Id: uint64(seedPeer.ID),
HostName: seedPeer.HostName, HostName: seedPeer.HostName,
Type: seedPeer.Type, Type: seedPeer.Type,
@ -523,7 +524,7 @@ func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulers
} }
} }
pbListSchedulersResponse.Schedulers = append(pbListSchedulersResponse.Schedulers, &manager.Scheduler{ pbListSchedulersResponse.Schedulers = append(pbListSchedulersResponse.Schedulers, &managerv1.Scheduler{
Id: uint64(scheduler.ID), Id: uint64(scheduler.ID),
HostName: scheduler.HostName, HostName: scheduler.HostName,
Idc: scheduler.IDC, Idc: scheduler.IDC,
@ -551,7 +552,7 @@ func (s *Server) ListSchedulers(ctx context.Context, req *manager.ListSchedulers
} }
// Get the number of active peers // Get the number of active peers
func (s *Server) getPeerCount(ctx context.Context, req *manager.ListSchedulersRequest) (int, error) { func (s *Server) getPeerCount(ctx context.Context, req *managerv1.ListSchedulersRequest) (int, error) {
cacheKey := cache.MakePeerCacheKey(req.HostName, req.Ip) cacheKey := cache.MakePeerCacheKey(req.HostName, req.Ip)
if err := s.rdb.Set(ctx, cacheKey, types.Peer{ if err := s.rdb.Set(ctx, cacheKey, types.Peer{
ID: cacheKey, ID: cacheKey,
@ -570,7 +571,7 @@ func (s *Server) getPeerCount(ctx context.Context, req *manager.ListSchedulersRe
} }
// Get object storage configuration. // Get object storage configuration.
func (s *Server) GetObjectStorage(ctx context.Context, req *manager.GetObjectStorageRequest) (*manager.ObjectStorage, error) { func (s *Server) GetObjectStorage(ctx context.Context, req *managerv1.GetObjectStorageRequest) (*managerv1.ObjectStorage, error) {
log := logger.WithHostnameAndIP(req.HostName, req.Ip) log := logger.WithHostnameAndIP(req.HostName, req.Ip)
if !s.objectStorageConfig.Enable { if !s.objectStorageConfig.Enable {
@ -579,7 +580,7 @@ func (s *Server) GetObjectStorage(ctx context.Context, req *manager.GetObjectSto
return nil, status.Error(codes.NotFound, msg) return nil, status.Error(codes.NotFound, msg)
} }
return &manager.ObjectStorage{ return &managerv1.ObjectStorage{
Name: s.objectStorageConfig.Name, Name: s.objectStorageConfig.Name,
Region: s.objectStorageConfig.Region, Region: s.objectStorageConfig.Region,
Endpoint: s.objectStorageConfig.Endpoint, Endpoint: s.objectStorageConfig.Endpoint,
@ -589,7 +590,7 @@ func (s *Server) GetObjectStorage(ctx context.Context, req *manager.GetObjectSto
} }
// List buckets configuration. // List buckets configuration.
func (s *Server) ListBuckets(ctx context.Context, req *manager.ListBucketsRequest) (*manager.ListBucketsResponse, error) { func (s *Server) ListBuckets(ctx context.Context, req *managerv1.ListBucketsRequest) (*managerv1.ListBucketsResponse, error) {
log := logger.WithHostnameAndIP(req.HostName, req.Ip) log := logger.WithHostnameAndIP(req.HostName, req.Ip)
if !s.objectStorageConfig.Enable { if !s.objectStorageConfig.Enable {
@ -598,7 +599,7 @@ func (s *Server) ListBuckets(ctx context.Context, req *manager.ListBucketsReques
return nil, status.Error(codes.NotFound, msg) return nil, status.Error(codes.NotFound, msg)
} }
var pbListBucketsResponse manager.ListBucketsResponse var pbListBucketsResponse managerv1.ListBucketsResponse
cacheKey := cache.MakeBucketsCacheKey(s.objectStorageConfig.Name) cacheKey := cache.MakeBucketsCacheKey(s.objectStorageConfig.Name)
// Cache hit. // Cache hit.
@ -617,7 +618,7 @@ func (s *Server) ListBuckets(ctx context.Context, req *manager.ListBucketsReques
// Construct schedulers. // Construct schedulers.
for _, bucket := range buckets { for _, bucket := range buckets {
pbListBucketsResponse.Buckets = append(pbListBucketsResponse.Buckets, &manager.Bucket{ pbListBucketsResponse.Buckets = append(pbListBucketsResponse.Buckets, &managerv1.Bucket{
Name: bucket.Name, Name: bucket.Name,
}) })
} }
@ -636,7 +637,7 @@ func (s *Server) ListBuckets(ctx context.Context, req *manager.ListBucketsReques
} }
// KeepAlive with manager. // KeepAlive with manager.
func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error { func (s *Server) KeepAlive(stream managerv1.Manager_KeepAliveServer) error {
req, err := stream.Recv() req, err := stream.Recv()
if err != nil { if err != nil {
logger.Errorf("keepalive failed for the first time: %v", err) logger.Errorf("keepalive failed for the first time: %v", err)
@ -649,7 +650,7 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error {
logger.Infof("%s keepalive successfully for the first time in cluster %d", hostName, clusterID) logger.Infof("%s keepalive successfully for the first time in cluster %d", hostName, clusterID)
// Initialize active scheduler. // Initialize active scheduler.
if sourceType == manager.SourceType_SCHEDULER_SOURCE { if sourceType == managerv1.SourceType_SCHEDULER_SOURCE {
scheduler := model.Scheduler{} scheduler := model.Scheduler{}
if err := s.db.First(&scheduler, model.Scheduler{ if err := s.db.First(&scheduler, model.Scheduler{
HostName: hostName, HostName: hostName,
@ -669,7 +670,7 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error {
} }
// Initialize active seed peer. // Initialize active seed peer.
if sourceType == manager.SourceType_SEED_PEER_SOURCE { if sourceType == managerv1.SourceType_SEED_PEER_SOURCE {
seedPeer := model.SeedPeer{} seedPeer := model.SeedPeer{}
if err := s.db.First(&seedPeer, model.SeedPeer{ if err := s.db.First(&seedPeer, model.SeedPeer{
HostName: hostName, HostName: hostName,
@ -692,7 +693,7 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error {
_, err := stream.Recv() _, err := stream.Recv()
if err != nil { if err != nil {
// Inactive scheduler. // Inactive scheduler.
if sourceType == manager.SourceType_SCHEDULER_SOURCE { if sourceType == managerv1.SourceType_SCHEDULER_SOURCE {
scheduler := model.Scheduler{} scheduler := model.Scheduler{}
if err := s.db.First(&scheduler, model.Scheduler{ if err := s.db.First(&scheduler, model.Scheduler{
HostName: hostName, HostName: hostName,
@ -712,7 +713,7 @@ func (s *Server) KeepAlive(stream manager.Manager_KeepAliveServer) error {
} }
// Inactive seed peer. // Inactive seed peer.
if sourceType == manager.SourceType_SEED_PEER_SOURCE { if sourceType == managerv1.SourceType_SEED_PEER_SOURCE {
seedPeer := model.SeedPeer{} seedPeer := model.SeedPeer{}
if err := s.db.First(&seedPeer, model.SeedPeer{ if err := s.db.First(&seedPeer, model.SeedPeer{
HostName: hostName, HostName: hostName,

View File

@ -8,8 +8,8 @@ import (
context "context" context "context"
reflect "reflect" reflect "reflect"
v1 "d7y.io/api/pkg/apis/manager/v1"
model "d7y.io/dragonfly/v2/manager/model" model "d7y.io/dragonfly/v2/manager/model"
manager "d7y.io/dragonfly/v2/pkg/rpc/manager"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
) )
@ -37,7 +37,7 @@ func (m *MockSearcher) EXPECT() *MockSearcherMockRecorder {
} }
// FindSchedulerClusters mocks base method. // FindSchedulerClusters mocks base method.
func (m *MockSearcher) FindSchedulerClusters(arg0 context.Context, arg1 []model.SchedulerCluster, arg2 *manager.ListSchedulersRequest) ([]model.SchedulerCluster, error) { func (m *MockSearcher) FindSchedulerClusters(arg0 context.Context, arg1 []model.SchedulerCluster, arg2 *v1.ListSchedulersRequest) ([]model.SchedulerCluster, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FindSchedulerClusters", arg0, arg1, arg2) ret := m.ctrl.Call(m, "FindSchedulerClusters", arg0, arg1, arg2)
ret0, _ := ret[0].([]model.SchedulerCluster) ret0, _ := ret[0].([]model.SchedulerCluster)

View File

@ -27,10 +27,11 @@ import (
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
managerv1 "d7y.io/api/pkg/apis/manager/v1"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/manager/model" "d7y.io/dragonfly/v2/manager/model"
"d7y.io/dragonfly/v2/pkg/math" "d7y.io/dragonfly/v2/pkg/math"
"d7y.io/dragonfly/v2/pkg/rpc/manager"
) )
const ( const (
@ -83,7 +84,7 @@ type Scopes struct {
type Searcher interface { type Searcher interface {
// FindSchedulerClusters finds scheduler clusters that best matches the evaluation // FindSchedulerClusters finds scheduler clusters that best matches the evaluation
FindSchedulerClusters(context.Context, []model.SchedulerCluster, *manager.ListSchedulersRequest) ([]model.SchedulerCluster, error) FindSchedulerClusters(context.Context, []model.SchedulerCluster, *managerv1.ListSchedulersRequest) ([]model.SchedulerCluster, error)
} }
type searcher struct{} type searcher struct{}
@ -100,7 +101,7 @@ func New(pluginDir string) Searcher {
} }
// FindSchedulerClusters finds scheduler clusters that best matches the evaluation // FindSchedulerClusters finds scheduler clusters that best matches the evaluation
func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []model.SchedulerCluster, client *manager.ListSchedulersRequest) ([]model.SchedulerCluster, error) { func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []model.SchedulerCluster, client *managerv1.ListSchedulersRequest) ([]model.SchedulerCluster, error) {
conditions := client.HostInfo conditions := client.HostInfo
if len(conditions) <= 0 { if len(conditions) <= 0 {
return nil, errors.New("empty conditions") return nil, errors.New("empty conditions")

View File

@ -22,8 +22,9 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
managerv1 "d7y.io/api/pkg/apis/manager/v1"
"d7y.io/dragonfly/v2/manager/model" "d7y.io/dragonfly/v2/manager/model"
"d7y.io/dragonfly/v2/pkg/rpc/manager"
) )
func TestSchedulerCluster(t *testing.T) { func TestSchedulerCluster(t *testing.T) {
@ -730,7 +731,7 @@ func TestSchedulerCluster(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
searcher := New(pluginDir) searcher := New(pluginDir)
clusters, ok := searcher.FindSchedulerClusters(context.Background(), tc.schedulerClusters, &manager.ListSchedulersRequest{ clusters, ok := searcher.FindSchedulerClusters(context.Background(), tc.schedulerClusters, &managerv1.ListSchedulersRequest{
HostName: "foo", HostName: "foo",
Ip: "127.0.0.1", Ip: "127.0.0.1",
HostInfo: tc.conditions, HostInfo: tc.conditions,

View File

@ -21,9 +21,10 @@ import (
"fmt" "fmt"
"os" "os"
managerv1 "d7y.io/api/pkg/apis/manager/v1"
"d7y.io/dragonfly/v2/manager/model" "d7y.io/dragonfly/v2/manager/model"
"d7y.io/dragonfly/v2/manager/searcher" "d7y.io/dragonfly/v2/manager/searcher"
"d7y.io/dragonfly/v2/pkg/rpc/manager"
) )
func main() { func main() {
@ -33,7 +34,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
clusters, err := s.FindSchedulerClusters(context.Background(), []model.SchedulerCluster{}, &manager.ListSchedulersRequest{}) clusters, err := s.FindSchedulerClusters(context.Background(), []model.SchedulerCluster{}, &managerv1.ListSchedulersRequest{})
if err != nil { if err != nil {
fmt.Println("scheduler cluster not found") fmt.Println("scheduler cluster not found")
os.Exit(1) os.Exit(1)

View File

@ -19,13 +19,14 @@ package main
import ( import (
"context" "context"
managerv1 "d7y.io/api/pkg/apis/manager/v1"
"d7y.io/dragonfly/v2/manager/model" "d7y.io/dragonfly/v2/manager/model"
"d7y.io/dragonfly/v2/pkg/rpc/manager"
) )
type searcher struct{} type searcher struct{}
func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []model.SchedulerCluster, client *manager.ListSchedulersRequest) ([]model.SchedulerCluster, error) { func (s *searcher) FindSchedulerClusters(ctx context.Context, schedulerClusters []model.SchedulerCluster, client *managerv1.ListSchedulersRequest) ([]model.SchedulerCluster, error) {
return []model.SchedulerCluster{{Name: "foo"}}, nil return []model.SchedulerCluster{{Name: "foo"}}, nil
} }

View File

@ -19,9 +19,10 @@ package idgen
import ( import (
"strings" "strings"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/pkg/digest" "d7y.io/dragonfly/v2/pkg/digest"
neturl "d7y.io/dragonfly/v2/pkg/net/url" neturl "d7y.io/dragonfly/v2/pkg/net/url"
"d7y.io/dragonfly/v2/pkg/rpc/base"
pkgstrings "d7y.io/dragonfly/v2/pkg/strings" pkgstrings "d7y.io/dragonfly/v2/pkg/strings"
) )
@ -31,19 +32,19 @@ const (
// TaskID generates a task id. // TaskID generates a task id.
// filter is separated by & character. // filter is separated by & character.
func TaskID(url string, meta *base.UrlMeta) string { func TaskID(url string, meta *commonv1.UrlMeta) string {
return taskID(url, meta, false) return taskID(url, meta, false)
} }
// ParentTaskID generates a task id like TaskID, but without range. // ParentTaskID generates a task id like TaskID, but without range.
// this func is used to check the parent tasks for ranged requests // this func is used to check the parent tasks for ranged requests
func ParentTaskID(url string, meta *base.UrlMeta) string { func ParentTaskID(url string, meta *commonv1.UrlMeta) string {
return taskID(url, meta, true) return taskID(url, meta, true)
} }
// taskID generates a task id. // taskID generates a task id.
// filter is separated by & character. // filter is separated by & character.
func taskID(url string, meta *base.UrlMeta, ignoreRange bool) string { func taskID(url string, meta *commonv1.UrlMeta, ignoreRange bool) string {
if meta == nil { if meta == nil {
return digest.SHA256FromStrings(url) return digest.SHA256FromStrings(url)
} }

View File

@ -21,14 +21,14 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"d7y.io/dragonfly/v2/pkg/rpc/base" commonv1 "d7y.io/api/pkg/apis/common/v1"
) )
func TestTaskID(t *testing.T) { func TestTaskID(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
url string url string
meta *base.UrlMeta meta *commonv1.UrlMeta
ignoreRange bool ignoreRange bool
expect func(t *testing.T, d any) expect func(t *testing.T, d any)
}{ }{
@ -44,7 +44,7 @@ func TestTaskID(t *testing.T) {
{ {
name: "generate taskID with meta", name: "generate taskID with meta",
url: "https://example.com", url: "https://example.com",
meta: &base.UrlMeta{ meta: &commonv1.UrlMeta{
Range: "foo", Range: "foo",
Digest: "bar", Digest: "bar",
Tag: "", Tag: "",
@ -57,7 +57,7 @@ func TestTaskID(t *testing.T) {
{ {
name: "generate taskID with meta", name: "generate taskID with meta",
url: "https://example.com", url: "https://example.com",
meta: &base.UrlMeta{ meta: &commonv1.UrlMeta{
Range: "foo", Range: "foo",
Digest: "bar", Digest: "bar",
Tag: "", Tag: "",
@ -71,7 +71,7 @@ func TestTaskID(t *testing.T) {
{ {
name: "generate taskID with filter", name: "generate taskID with filter",
url: "https://example.com?foo=foo&bar=bar", url: "https://example.com?foo=foo&bar=bar",
meta: &base.UrlMeta{ meta: &commonv1.UrlMeta{
Tag: "foo", Tag: "foo",
Filter: "foo&bar", Filter: "foo&bar",
}, },
@ -83,7 +83,7 @@ func TestTaskID(t *testing.T) {
{ {
name: "generate taskID with tag", name: "generate taskID with tag",
url: "https://example.com", url: "https://example.com",
meta: &base.UrlMeta{ meta: &commonv1.UrlMeta{
Tag: "foo", Tag: "foo",
}, },
expect: func(t *testing.T, d any) { expect: func(t *testing.T, d any) {

File diff suppressed because it is too large Load Diff

View File

@ -1,681 +0,0 @@
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: pkg/rpc/base/base.proto
package base
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
)
// Validate checks the field values on GrpcDfError with the rules defined in
// the proto definition for this message. If any rules are violated, an error
// is returned.
func (m *GrpcDfError) Validate() error {
if m == nil {
return nil
}
// no validation rules for Code
// no validation rules for Message
return nil
}
// GrpcDfErrorValidationError is the validation error returned by
// GrpcDfError.Validate if the designated constraints aren't met.
type GrpcDfErrorValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e GrpcDfErrorValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e GrpcDfErrorValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e GrpcDfErrorValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e GrpcDfErrorValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e GrpcDfErrorValidationError) ErrorName() string { return "GrpcDfErrorValidationError" }
// Error satisfies the builtin error interface
func (e GrpcDfErrorValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sGrpcDfError.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = GrpcDfErrorValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = GrpcDfErrorValidationError{}
// Validate checks the field values on UrlMeta with the rules defined in the
// proto definition for this message. If any rules are violated, an error is returned.
func (m *UrlMeta) Validate() error {
if m == nil {
return nil
}
if m.GetDigest() != "" {
if !_UrlMeta_Digest_Pattern.MatchString(m.GetDigest()) {
return UrlMetaValidationError{
field: "Digest",
reason: "value does not match regex pattern \"^(md5)|(sha256):[A-Fa-f0-9]+$\"",
}
}
}
// no validation rules for Tag
if m.GetRange() != "" {
if !_UrlMeta_Range_Pattern.MatchString(m.GetRange()) {
return UrlMetaValidationError{
field: "Range",
reason: "value does not match regex pattern \"^[0-9]+-[0-9]*$\"",
}
}
}
// no validation rules for Filter
// no validation rules for Header
return nil
}
// UrlMetaValidationError is the validation error returned by UrlMeta.Validate
// if the designated constraints aren't met.
type UrlMetaValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e UrlMetaValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e UrlMetaValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e UrlMetaValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e UrlMetaValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e UrlMetaValidationError) ErrorName() string { return "UrlMetaValidationError" }
// Error satisfies the builtin error interface
func (e UrlMetaValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sUrlMeta.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = UrlMetaValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = UrlMetaValidationError{}
var _UrlMeta_Digest_Pattern = regexp.MustCompile("^(md5)|(sha256):[A-Fa-f0-9]+$")
var _UrlMeta_Range_Pattern = regexp.MustCompile("^[0-9]+-[0-9]*$")
// Validate checks the field values on HostLoad with the rules defined in the
// proto definition for this message. If any rules are violated, an error is returned.
func (m *HostLoad) Validate() error {
if m == nil {
return nil
}
if val := m.GetCpuRatio(); val < 0 || val > 1 {
return HostLoadValidationError{
field: "CpuRatio",
reason: "value must be inside range [0, 1]",
}
}
if val := m.GetMemRatio(); val < 0 || val > 1 {
return HostLoadValidationError{
field: "MemRatio",
reason: "value must be inside range [0, 1]",
}
}
if val := m.GetDiskRatio(); val < 0 || val > 1 {
return HostLoadValidationError{
field: "DiskRatio",
reason: "value must be inside range [0, 1]",
}
}
return nil
}
// HostLoadValidationError is the validation error returned by
// HostLoad.Validate if the designated constraints aren't met.
type HostLoadValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e HostLoadValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e HostLoadValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e HostLoadValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e HostLoadValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e HostLoadValidationError) ErrorName() string { return "HostLoadValidationError" }
// Error satisfies the builtin error interface
func (e HostLoadValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sHostLoad.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = HostLoadValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = HostLoadValidationError{}
// Validate checks the field values on PieceTaskRequest with the rules defined
// in the proto definition for this message. If any rules are violated, an
// error is returned.
func (m *PieceTaskRequest) Validate() error {
if m == nil {
return nil
}
if utf8.RuneCountInString(m.GetTaskId()) < 1 {
return PieceTaskRequestValidationError{
field: "TaskId",
reason: "value length must be at least 1 runes",
}
}
if utf8.RuneCountInString(m.GetSrcPid()) < 1 {
return PieceTaskRequestValidationError{
field: "SrcPid",
reason: "value length must be at least 1 runes",
}
}
if utf8.RuneCountInString(m.GetDstPid()) < 1 {
return PieceTaskRequestValidationError{
field: "DstPid",
reason: "value length must be at least 1 runes",
}
}
if m.GetStartNum() < 0 {
return PieceTaskRequestValidationError{
field: "StartNum",
reason: "value must be greater than or equal to 0",
}
}
if m.GetLimit() < 0 {
return PieceTaskRequestValidationError{
field: "Limit",
reason: "value must be greater than or equal to 0",
}
}
return nil
}
// PieceTaskRequestValidationError is the validation error returned by
// PieceTaskRequest.Validate if the designated constraints aren't met.
type PieceTaskRequestValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e PieceTaskRequestValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e PieceTaskRequestValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e PieceTaskRequestValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e PieceTaskRequestValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e PieceTaskRequestValidationError) ErrorName() string { return "PieceTaskRequestValidationError" }
// Error satisfies the builtin error interface
func (e PieceTaskRequestValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sPieceTaskRequest.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = PieceTaskRequestValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = PieceTaskRequestValidationError{}
// Validate checks the field values on PieceInfo with the rules defined in the
// proto definition for this message. If any rules are violated, an error is returned.
func (m *PieceInfo) Validate() error {
if m == nil {
return nil
}
// no validation rules for PieceNum
if m.GetRangeStart() < 0 {
return PieceInfoValidationError{
field: "RangeStart",
reason: "value must be greater than or equal to 0",
}
}
if m.GetRangeSize() < 0 {
return PieceInfoValidationError{
field: "RangeSize",
reason: "value must be greater than or equal to 0",
}
}
if m.GetPieceMd5() != "" {
if !_PieceInfo_PieceMd5_Pattern.MatchString(m.GetPieceMd5()) {
return PieceInfoValidationError{
field: "PieceMd5",
reason: "value does not match regex pattern \"([a-f\\\\d]{32}|[A-F\\\\d]{32}|[a-f\\\\d]{16}|[A-F\\\\d]{16})\"",
}
}
}
if m.GetPieceOffset() < 0 {
return PieceInfoValidationError{
field: "PieceOffset",
reason: "value must be greater than or equal to 0",
}
}
// no validation rules for PieceStyle
if m.GetDownloadCost() < 0 {
return PieceInfoValidationError{
field: "DownloadCost",
reason: "value must be greater than or equal to 0",
}
}
return nil
}
// PieceInfoValidationError is the validation error returned by
// PieceInfo.Validate if the designated constraints aren't met.
type PieceInfoValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e PieceInfoValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e PieceInfoValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e PieceInfoValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e PieceInfoValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e PieceInfoValidationError) ErrorName() string { return "PieceInfoValidationError" }
// Error satisfies the builtin error interface
func (e PieceInfoValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sPieceInfo.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = PieceInfoValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = PieceInfoValidationError{}
var _PieceInfo_PieceMd5_Pattern = regexp.MustCompile("([a-f\\d]{32}|[A-F\\d]{32}|[a-f\\d]{16}|[A-F\\d]{16})")
// Validate checks the field values on ExtendAttribute with the rules defined
// in the proto definition for this message. If any rules are violated, an
// error is returned.
func (m *ExtendAttribute) Validate() error {
if m == nil {
return nil
}
// no validation rules for Header
// no validation rules for StatusCode
// no validation rules for Status
return nil
}
// ExtendAttributeValidationError is the validation error returned by
// ExtendAttribute.Validate if the designated constraints aren't met.
type ExtendAttributeValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e ExtendAttributeValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e ExtendAttributeValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e ExtendAttributeValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e ExtendAttributeValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e ExtendAttributeValidationError) ErrorName() string { return "ExtendAttributeValidationError" }
// Error satisfies the builtin error interface
func (e ExtendAttributeValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sExtendAttribute.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = ExtendAttributeValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = ExtendAttributeValidationError{}
// Validate checks the field values on PiecePacket with the rules defined in
// the proto definition for this message. If any rules are violated, an error
// is returned.
func (m *PiecePacket) Validate() error {
if m == nil {
return nil
}
if utf8.RuneCountInString(m.GetTaskId()) < 1 {
return PiecePacketValidationError{
field: "TaskId",
reason: "value length must be at least 1 runes",
}
}
if utf8.RuneCountInString(m.GetDstPid()) < 1 {
return PiecePacketValidationError{
field: "DstPid",
reason: "value length must be at least 1 runes",
}
}
if utf8.RuneCountInString(m.GetDstAddr()) < 1 {
return PiecePacketValidationError{
field: "DstAddr",
reason: "value length must be at least 1 runes",
}
}
for idx, item := range m.GetPieceInfos() {
_, _ = idx, item
if v, ok := interface{}(item).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return PiecePacketValidationError{
field: fmt.Sprintf("PieceInfos[%v]", idx),
reason: "embedded message failed validation",
cause: err,
}
}
}
}
// no validation rules for TotalPiece
// no validation rules for ContentLength
// no validation rules for PieceMd5Sign
if v, ok := interface{}(m.GetExtendAttribute()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return PiecePacketValidationError{
field: "ExtendAttribute",
reason: "embedded message failed validation",
cause: err,
}
}
}
return nil
}
// PiecePacketValidationError is the validation error returned by
// PiecePacket.Validate if the designated constraints aren't met.
type PiecePacketValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e PiecePacketValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e PiecePacketValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e PiecePacketValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e PiecePacketValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e PiecePacketValidationError) ErrorName() string { return "PiecePacketValidationError" }
// Error satisfies the builtin error interface
func (e PiecePacketValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sPiecePacket.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = PiecePacketValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = PiecePacketValidationError{}

View File

@ -1,185 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package base;
import "validate/validate.proto";
option go_package = "d7y.io/dragonfly/v2/pkg/rpc/base";
enum Code{
X_UNSPECIFIED = 0;
// success code 200-299
Success = 200;
// framework can not find server node
ServerUnavailable = 500;
// common response error 1000-1999
// client can be migrated to another scheduler/CDN
ResourceLacked = 1000;
BackToSourceAborted = 1001;
BadRequest = 1400;
PeerTaskNotFound = 1404;
UnknownError = 1500;
RequestTimeOut = 1504;
// client response error 4000-4999
ClientError = 4000;
ClientPieceRequestFail = 4001; // get piece task from other peer error
ClientScheduleTimeout = 4002; // wait scheduler response timeout
ClientContextCanceled = 4003;
ClientWaitPieceReady = 4004; // when target peer downloads from source slowly, should wait
ClientPieceDownloadFail = 4005;
ClientRequestLimitFail = 4006;
ClientConnectionError = 4007;
ClientBackSourceError = 4008;
ClientPieceNotFound = 4404;
// scheduler response error 5000-5999
SchedError = 5000;
SchedNeedBackSource = 5001; // client should try to download from source
SchedPeerGone = 5002; // client should disconnect from scheduler
SchedPeerNotFound = 5004; // peer not found in scheduler
SchedPeerPieceResultReportFail = 5005; // report piece
SchedTaskStatusError = 5006; // task status is fail
// cdnsystem response error 6000-6999
CDNTaskRegistryFail = 6001;
CDNTaskNotFound = 6404;
// manager response error 7000-7999
InvalidResourceType = 7001;
}
enum PieceStyle{
PLAIN = 0;
}
enum SizeScope{
// size > one piece size
NORMAL = 0;
// 128 byte < size <= one piece size and be plain type
SMALL = 1;
// size <= 128 byte and be plain type
TINY = 2;
}
// Pattern represents pattern of task.
enum Pattern{
// Default pattern, scheduler will use all p2p node
// include dfdaemon and seed peers.
P2P = 0;
// Seed peer pattern, scheduler will use only seed peers.
SEED_PEER = 1;
// Source pattern, scheduler will say back source
// when there is no available peer in p2p.
SOURCE = 2;
}
// TaskType represents type of task.
enum TaskType{
// Normal is normal type of task,
// normal task is a normal p2p task.
Normal = 0;
// DfCache is dfcache type of task,
// dfcache task is a cache task, and the task url is fake url.
// It can only be used for caching and cannot be downloaded back to source.
DfCache = 1;
// DfStore is dfstore type of task,
// dfstore task is a persistent task in backend.
DfStore = 2;
}
message GrpcDfError {
Code code = 1;
string message = 2;
}
// UrlMeta describes url meta info.
message UrlMeta{
// digest checks integrity of url content, for example md5:xxx or sha256:yyy
string digest = 1 [(validate.rules).string = {pattern: "^(md5)|(sha256):[A-Fa-f0-9]+$", ignore_empty:true}];
// url tag identifies different task for same url, conflict with digest
string tag = 2;
// content range for url
string range = 3 [(validate.rules).string = {pattern: "^[0-9]+-[0-9]*$", ignore_empty:true}];
// filter url used to generate task id
string filter = 4;
// other url header infos
map<string, string> header = 5;
}
message HostLoad{
// cpu usage
float cpu_ratio = 1 [(validate.rules).float = {gte: 0, lte: 1}];
// memory usage
float mem_ratio = 2 [(validate.rules).float = {gte: 0, lte: 1}];
// disk space usage
float disk_ratio = 3 [(validate.rules).float = {gte: 0, lte: 1}];
}
message PieceTaskRequest{
string task_id = 1 [(validate.rules).string.min_len = 1];
string src_pid = 2 [(validate.rules).string.min_len = 1];
string dst_pid = 3 [(validate.rules).string.min_len = 1];
// piece number
uint32 start_num = 4 [(validate.rules).uint32.gte = 0];
// expected piece count, limit = 0 represent request pieces as many shards as possible
uint32 limit = 5 [(validate.rules).uint32.gte = 0];
}
message PieceInfo{
// piece_num < 0 represent start report piece flag
int32 piece_num = 1;
uint64 range_start = 2 [(validate.rules).uint64.gte = 0];
uint32 range_size = 3 [(validate.rules).uint32.gte = 0];
string piece_md5 = 4 [(validate.rules).string = {pattern:"([a-f\\d]{32}|[A-F\\d]{32}|[a-f\\d]{16}|[A-F\\d]{16})", ignore_empty:true}];
uint64 piece_offset = 5 [(validate.rules).uint64.gte = 0];
base.PieceStyle piece_style = 6;
// total time(millisecond) consumed
uint64 download_cost = 7 [(validate.rules).uint64.gte = 0];
}
message ExtendAttribute{
// task response header, eg: HTTP Response Header
map<string, string> header = 1;
// task response code, eg: HTTP Status Code
int32 status_code = 2;
// task response status, eg: HTTP Status
string status = 3;
}
message PiecePacket{
string task_id = 2 [(validate.rules).string.min_len = 1];
string dst_pid = 3 [(validate.rules).string.min_len = 1];
// ip:port
string dst_addr = 4 [(validate.rules).string.min_len = 1];
repeated PieceInfo piece_infos = 5;
// total piece count for url, total_piece represent total piece is unknown
int32 total_piece = 6;
// content_length < 0 represent content length is unknown
int64 content_length = 7;
// sha256 code of all piece md5
string piece_md5_sign = 8;
// task extend attribute
ExtendAttribute extend_attribute = 9;
}

View File

@ -1,5 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: base/base.pb.go
// Package mocks is a generated GoMock package.
package mocks

View File

@ -1,591 +0,0 @@
//
// Copyright 2020 The Dragonfly Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.19.4
// source: pkg/rpc/cdnsystem/cdnsystem.proto
package cdnsystem
import (
context "context"
base "d7y.io/dragonfly/v2/pkg/rpc/base"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SeedRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
TaskId string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"`
Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
UrlMeta *base.UrlMeta `protobuf:"bytes,3,opt,name=url_meta,json=urlMeta,proto3" json:"url_meta,omitempty"`
}
func (x *SeedRequest) Reset() {
*x = SeedRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SeedRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SeedRequest) ProtoMessage() {}
func (x *SeedRequest) ProtoReflect() protoreflect.Message {
mi := &file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SeedRequest.ProtoReflect.Descriptor instead.
func (*SeedRequest) Descriptor() ([]byte, []int) {
return file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescGZIP(), []int{0}
}
func (x *SeedRequest) GetTaskId() string {
if x != nil {
return x.TaskId
}
return ""
}
func (x *SeedRequest) GetUrl() string {
if x != nil {
return x.Url
}
return ""
}
func (x *SeedRequest) GetUrlMeta() *base.UrlMeta {
if x != nil {
return x.UrlMeta
}
return nil
}
// keep piece meta and data separately
// check piece md5, md5s sign and total content length
type PieceSeed struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// peer id for cdn node, need suffix with _CDN
PeerId string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
// cdn host id
HostId string `protobuf:"bytes,3,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"`
PieceInfo *base.PieceInfo `protobuf:"bytes,4,opt,name=piece_info,json=pieceInfo,proto3" json:"piece_info,omitempty"`
// whether or not all seeds are downloaded
Done bool `protobuf:"varint,5,opt,name=done,proto3" json:"done,omitempty"`
// content total length for the url, content_length < 0 represent content length is unknown
ContentLength int64 `protobuf:"varint,6,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"`
// total piece count, -1 represents task is downloading or failed
TotalPieceCount int32 `protobuf:"varint,7,opt,name=total_piece_count,json=totalPieceCount,proto3" json:"total_piece_count,omitempty"`
// begin time for the piece downloading
BeginTime uint64 `protobuf:"varint,8,opt,name=begin_time,json=beginTime,proto3" json:"begin_time,omitempty"`
// end time for the piece downloading
EndTime uint64 `protobuf:"varint,9,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// task extend attribute
ExtendAttribute *base.ExtendAttribute `protobuf:"bytes,10,opt,name=extend_attribute,json=extendAttribute,proto3" json:"extend_attribute,omitempty"`
}
func (x *PieceSeed) Reset() {
*x = PieceSeed{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PieceSeed) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PieceSeed) ProtoMessage() {}
func (x *PieceSeed) ProtoReflect() protoreflect.Message {
mi := &file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PieceSeed.ProtoReflect.Descriptor instead.
func (*PieceSeed) Descriptor() ([]byte, []int) {
return file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescGZIP(), []int{1}
}
func (x *PieceSeed) GetPeerId() string {
if x != nil {
return x.PeerId
}
return ""
}
func (x *PieceSeed) GetHostId() string {
if x != nil {
return x.HostId
}
return ""
}
func (x *PieceSeed) GetPieceInfo() *base.PieceInfo {
if x != nil {
return x.PieceInfo
}
return nil
}
func (x *PieceSeed) GetDone() bool {
if x != nil {
return x.Done
}
return false
}
func (x *PieceSeed) GetContentLength() int64 {
if x != nil {
return x.ContentLength
}
return 0
}
func (x *PieceSeed) GetTotalPieceCount() int32 {
if x != nil {
return x.TotalPieceCount
}
return 0
}
func (x *PieceSeed) GetBeginTime() uint64 {
if x != nil {
return x.BeginTime
}
return 0
}
func (x *PieceSeed) GetEndTime() uint64 {
if x != nil {
return x.EndTime
}
return 0
}
func (x *PieceSeed) GetExtendAttribute() *base.ExtendAttribute {
if x != nil {
return x.ExtendAttribute
}
return nil
}
var File_pkg_rpc_cdnsystem_cdnsystem_proto protoreflect.FileDescriptor
var file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDesc = []byte{
0x0a, 0x21, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73,
0x74, 0x65, 0x6d, 0x2f, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x09, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x1a, 0x17,
0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x62, 0x61, 0x73,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x22, 0x75, 0x0a, 0x0b, 0x53, 0x65, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x20, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49,
0x64, 0x12, 0x1a, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08,
0xfa, 0x42, 0x05, 0x72, 0x03, 0x88, 0x01, 0x01, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x28, 0x0a,
0x08, 0x75, 0x72, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x0d, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x55, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x07,
0x75, 0x72, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x22, 0xe2, 0x02, 0x0a, 0x09, 0x50, 0x69, 0x65, 0x63,
0x65, 0x53, 0x65, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52,
0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x20, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f,
0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10,
0x01, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x70, 0x69, 0x65,
0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09,
0x70, 0x69, 0x65, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e,
0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x25, 0x0a,
0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18,
0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x65,
0x6e, 0x67, 0x74, 0x68, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x69,
0x65, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52,
0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x69, 0x65, 0x63, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08,
0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12,
0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
0x04, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x10, 0x65, 0x78,
0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x0a,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65,
0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0f, 0x65, 0x78, 0x74,
0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x32, 0xc4, 0x01, 0x0a,
0x06, 0x53, 0x65, 0x65, 0x64, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0b, 0x4f, 0x62, 0x74, 0x61, 0x69,
0x6e, 0x53, 0x65, 0x65, 0x64, 0x73, 0x12, 0x16, 0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74,
0x65, 0x6d, 0x2e, 0x53, 0x65, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14,
0x2e, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65,
0x53, 0x65, 0x65, 0x64, 0x30, 0x01, 0x12, 0x3a, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x69, 0x65,
0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50,
0x69, 0x65, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x11, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b,
0x65, 0x74, 0x12, 0x3f, 0x0a, 0x0e, 0x53, 0x79, 0x6e, 0x63, 0x50, 0x69, 0x65, 0x63, 0x65, 0x54,
0x61, 0x73, 0x6b, 0x73, 0x12, 0x16, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63,
0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x62,
0x61, 0x73, 0x65, 0x2e, 0x50, 0x69, 0x65, 0x63, 0x65, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x28,
0x01, 0x30, 0x01, 0x42, 0x27, 0x5a, 0x25, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x64, 0x72,
0x61, 0x67, 0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72,
0x70, 0x63, 0x2f, 0x63, 0x64, 0x6e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
}
var (
file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescOnce sync.Once
file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescData = file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDesc
)
func file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescGZIP() []byte {
file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescOnce.Do(func() {
file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescData)
})
return file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDescData
}
var file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_pkg_rpc_cdnsystem_cdnsystem_proto_goTypes = []interface{}{
(*SeedRequest)(nil), // 0: cdnsystem.SeedRequest
(*PieceSeed)(nil), // 1: cdnsystem.PieceSeed
(*base.UrlMeta)(nil), // 2: base.UrlMeta
(*base.PieceInfo)(nil), // 3: base.PieceInfo
(*base.ExtendAttribute)(nil), // 4: base.ExtendAttribute
(*base.PieceTaskRequest)(nil), // 5: base.PieceTaskRequest
(*base.PiecePacket)(nil), // 6: base.PiecePacket
}
var file_pkg_rpc_cdnsystem_cdnsystem_proto_depIdxs = []int32{
2, // 0: cdnsystem.SeedRequest.url_meta:type_name -> base.UrlMeta
3, // 1: cdnsystem.PieceSeed.piece_info:type_name -> base.PieceInfo
4, // 2: cdnsystem.PieceSeed.extend_attribute:type_name -> base.ExtendAttribute
0, // 3: cdnsystem.Seeder.ObtainSeeds:input_type -> cdnsystem.SeedRequest
5, // 4: cdnsystem.Seeder.GetPieceTasks:input_type -> base.PieceTaskRequest
5, // 5: cdnsystem.Seeder.SyncPieceTasks:input_type -> base.PieceTaskRequest
1, // 6: cdnsystem.Seeder.ObtainSeeds:output_type -> cdnsystem.PieceSeed
6, // 7: cdnsystem.Seeder.GetPieceTasks:output_type -> base.PiecePacket
6, // 8: cdnsystem.Seeder.SyncPieceTasks:output_type -> base.PiecePacket
6, // [6:9] is the sub-list for method output_type
3, // [3:6] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_pkg_rpc_cdnsystem_cdnsystem_proto_init() }
func file_pkg_rpc_cdnsystem_cdnsystem_proto_init() {
if File_pkg_rpc_cdnsystem_cdnsystem_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SeedRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PieceSeed); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_pkg_rpc_cdnsystem_cdnsystem_proto_goTypes,
DependencyIndexes: file_pkg_rpc_cdnsystem_cdnsystem_proto_depIdxs,
MessageInfos: file_pkg_rpc_cdnsystem_cdnsystem_proto_msgTypes,
}.Build()
File_pkg_rpc_cdnsystem_cdnsystem_proto = out.File
file_pkg_rpc_cdnsystem_cdnsystem_proto_rawDesc = nil
file_pkg_rpc_cdnsystem_cdnsystem_proto_goTypes = nil
file_pkg_rpc_cdnsystem_cdnsystem_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// SeederClient is the client API for Seeder service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type SeederClient interface {
// Generate seeds and return to scheduler
ObtainSeeds(ctx context.Context, in *SeedRequest, opts ...grpc.CallOption) (Seeder_ObtainSeedsClient, error)
// Get piece tasks from cdn
GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error)
// Sync piece tasks with other peers
SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Seeder_SyncPieceTasksClient, error)
}
type seederClient struct {
cc grpc.ClientConnInterface
}
func NewSeederClient(cc grpc.ClientConnInterface) SeederClient {
return &seederClient{cc}
}
func (c *seederClient) ObtainSeeds(ctx context.Context, in *SeedRequest, opts ...grpc.CallOption) (Seeder_ObtainSeedsClient, error) {
stream, err := c.cc.NewStream(ctx, &_Seeder_serviceDesc.Streams[0], "/cdnsystem.Seeder/ObtainSeeds", opts...)
if err != nil {
return nil, err
}
x := &seederObtainSeedsClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type Seeder_ObtainSeedsClient interface {
Recv() (*PieceSeed, error)
grpc.ClientStream
}
type seederObtainSeedsClient struct {
grpc.ClientStream
}
func (x *seederObtainSeedsClient) Recv() (*PieceSeed, error) {
m := new(PieceSeed)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *seederClient) GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) {
out := new(base.PiecePacket)
err := c.cc.Invoke(ctx, "/cdnsystem.Seeder/GetPieceTasks", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *seederClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (Seeder_SyncPieceTasksClient, error) {
stream, err := c.cc.NewStream(ctx, &_Seeder_serviceDesc.Streams[1], "/cdnsystem.Seeder/SyncPieceTasks", opts...)
if err != nil {
return nil, err
}
x := &seederSyncPieceTasksClient{stream}
return x, nil
}
type Seeder_SyncPieceTasksClient interface {
Send(*base.PieceTaskRequest) error
Recv() (*base.PiecePacket, error)
grpc.ClientStream
}
type seederSyncPieceTasksClient struct {
grpc.ClientStream
}
func (x *seederSyncPieceTasksClient) Send(m *base.PieceTaskRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *seederSyncPieceTasksClient) Recv() (*base.PiecePacket, error) {
m := new(base.PiecePacket)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// SeederServer is the server API for Seeder service.
type SeederServer interface {
// Generate seeds and return to scheduler
ObtainSeeds(*SeedRequest, Seeder_ObtainSeedsServer) error
// Get piece tasks from cdn
GetPieceTasks(context.Context, *base.PieceTaskRequest) (*base.PiecePacket, error)
// Sync piece tasks with other peers
SyncPieceTasks(Seeder_SyncPieceTasksServer) error
}
// UnimplementedSeederServer can be embedded to have forward compatible implementations.
type UnimplementedSeederServer struct {
}
func (*UnimplementedSeederServer) ObtainSeeds(*SeedRequest, Seeder_ObtainSeedsServer) error {
return status.Errorf(codes.Unimplemented, "method ObtainSeeds not implemented")
}
func (*UnimplementedSeederServer) GetPieceTasks(context.Context, *base.PieceTaskRequest) (*base.PiecePacket, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetPieceTasks not implemented")
}
func (*UnimplementedSeederServer) SyncPieceTasks(Seeder_SyncPieceTasksServer) error {
return status.Errorf(codes.Unimplemented, "method SyncPieceTasks not implemented")
}
func RegisterSeederServer(s *grpc.Server, srv SeederServer) {
s.RegisterService(&_Seeder_serviceDesc, srv)
}
func _Seeder_ObtainSeeds_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(SeedRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(SeederServer).ObtainSeeds(m, &seederObtainSeedsServer{stream})
}
type Seeder_ObtainSeedsServer interface {
Send(*PieceSeed) error
grpc.ServerStream
}
type seederObtainSeedsServer struct {
grpc.ServerStream
}
func (x *seederObtainSeedsServer) Send(m *PieceSeed) error {
return x.ServerStream.SendMsg(m)
}
func _Seeder_GetPieceTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(base.PieceTaskRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SeederServer).GetPieceTasks(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/cdnsystem.Seeder/GetPieceTasks",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SeederServer).GetPieceTasks(ctx, req.(*base.PieceTaskRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Seeder_SyncPieceTasks_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SeederServer).SyncPieceTasks(&seederSyncPieceTasksServer{stream})
}
type Seeder_SyncPieceTasksServer interface {
Send(*base.PiecePacket) error
Recv() (*base.PieceTaskRequest, error)
grpc.ServerStream
}
type seederSyncPieceTasksServer struct {
grpc.ServerStream
}
func (x *seederSyncPieceTasksServer) Send(m *base.PiecePacket) error {
return x.ServerStream.SendMsg(m)
}
func (x *seederSyncPieceTasksServer) Recv() (*base.PieceTaskRequest, error) {
m := new(base.PieceTaskRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _Seeder_serviceDesc = grpc.ServiceDesc{
ServiceName: "cdnsystem.Seeder",
HandlerType: (*SeederServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetPieceTasks",
Handler: _Seeder_GetPieceTasks_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ObtainSeeds",
Handler: _Seeder_ObtainSeeds_Handler,
ServerStreams: true,
},
{
StreamName: "SyncPieceTasks",
Handler: _Seeder_SyncPieceTasks_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "pkg/rpc/cdnsystem/cdnsystem.proto",
}

View File

@ -1,237 +0,0 @@
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: pkg/rpc/cdnsystem/cdnsystem.proto
package cdnsystem
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
)
// Validate checks the field values on SeedRequest with the rules defined in
// the proto definition for this message. If any rules are violated, an error
// is returned.
func (m *SeedRequest) Validate() error {
if m == nil {
return nil
}
if utf8.RuneCountInString(m.GetTaskId()) < 1 {
return SeedRequestValidationError{
field: "TaskId",
reason: "value length must be at least 1 runes",
}
}
if uri, err := url.Parse(m.GetUrl()); err != nil {
return SeedRequestValidationError{
field: "Url",
reason: "value must be a valid URI",
cause: err,
}
} else if !uri.IsAbs() {
return SeedRequestValidationError{
field: "Url",
reason: "value must be absolute",
}
}
if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return SeedRequestValidationError{
field: "UrlMeta",
reason: "embedded message failed validation",
cause: err,
}
}
}
return nil
}
// SeedRequestValidationError is the validation error returned by
// SeedRequest.Validate if the designated constraints aren't met.
type SeedRequestValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e SeedRequestValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e SeedRequestValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e SeedRequestValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e SeedRequestValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e SeedRequestValidationError) ErrorName() string { return "SeedRequestValidationError" }
// Error satisfies the builtin error interface
func (e SeedRequestValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sSeedRequest.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = SeedRequestValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = SeedRequestValidationError{}
// Validate checks the field values on PieceSeed with the rules defined in the
// proto definition for this message. If any rules are violated, an error is returned.
func (m *PieceSeed) Validate() error {
if m == nil {
return nil
}
if utf8.RuneCountInString(m.GetPeerId()) < 1 {
return PieceSeedValidationError{
field: "PeerId",
reason: "value length must be at least 1 runes",
}
}
if utf8.RuneCountInString(m.GetHostId()) < 1 {
return PieceSeedValidationError{
field: "HostId",
reason: "value length must be at least 1 runes",
}
}
if v, ok := interface{}(m.GetPieceInfo()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return PieceSeedValidationError{
field: "PieceInfo",
reason: "embedded message failed validation",
cause: err,
}
}
}
// no validation rules for Done
// no validation rules for ContentLength
// no validation rules for TotalPieceCount
// no validation rules for BeginTime
// no validation rules for EndTime
if v, ok := interface{}(m.GetExtendAttribute()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return PieceSeedValidationError{
field: "ExtendAttribute",
reason: "embedded message failed validation",
cause: err,
}
}
}
return nil
}
// PieceSeedValidationError is the validation error returned by
// PieceSeed.Validate if the designated constraints aren't met.
type PieceSeedValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e PieceSeedValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e PieceSeedValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e PieceSeedValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e PieceSeedValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e PieceSeedValidationError) ErrorName() string { return "PieceSeedValidationError" }
// Error satisfies the builtin error interface
func (e PieceSeedValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sPieceSeed.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = PieceSeedValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = PieceSeedValidationError{}

View File

@ -1,63 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package cdnsystem;
import "pkg/rpc/base/base.proto";
import "validate/validate.proto";
option go_package = "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem";
message SeedRequest{
string task_id = 1 [(validate.rules).string.min_len = 1];
string url = 2 [(validate.rules).string.uri = true];
base.UrlMeta url_meta = 3;
}
// keep piece meta and data separately
// check piece md5, md5s sign and total content length
message PieceSeed{
// peer id for cdn node, need suffix with _CDN
string peer_id = 2 [(validate.rules).string.min_len = 1];
// cdn host id
string host_id = 3 [(validate.rules).string.min_len = 1];
base.PieceInfo piece_info = 4;
// whether or not all seeds are downloaded
bool done = 5;
// content total length for the url, content_length < 0 represent content length is unknown
int64 content_length = 6;
// total piece count, -1 represents task is downloading or failed
int32 total_piece_count = 7;
// begin time for the piece downloading
uint64 begin_time = 8;
// end time for the piece downloading
uint64 end_time = 9;
// task extend attribute
base.ExtendAttribute extend_attribute = 10;
}
// CDN System RPC Service
service Seeder{
// Generate seeds and return to scheduler
rpc ObtainSeeds(SeedRequest)returns(stream PieceSeed);
// Get piece tasks from cdn
rpc GetPieceTasks(base.PieceTaskRequest)returns(base.PiecePacket);
// Sync piece tasks with other peers
rpc SyncPieceTasks(stream base.PieceTaskRequest)returns(stream base.PiecePacket);
}

View File

@ -26,11 +26,12 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1"
commonv1 "d7y.io/api/pkg/apis/common/v1"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc" "d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
) )
func GetClientByAddr(addrs []dfnet.NetAddr, opts ...grpc.DialOption) CdnClient { func GetClientByAddr(addrs []dfnet.NetAddr, opts ...grpc.DialOption) CdnClient {
@ -61,13 +62,13 @@ func GetElasticClientByAddrs(addrs []dfnet.NetAddr, opts ...grpc.DialOption) (Cd
return elasticCdnClient, nil return elasticCdnClient, nil
} }
// CdnClient see cdnsystem.CdnClient // CdnClient see cdnsystemv1.CdnClient
type CdnClient interface { type CdnClient interface {
ObtainSeeds(ctx context.Context, sr *cdnsystem.SeedRequest, opts ...grpc.CallOption) (*PieceSeedStream, error) ObtainSeeds(ctx context.Context, sr *cdnsystemv1.SeedRequest, opts ...grpc.CallOption) (*PieceSeedStream, error)
GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (*commonv1.PiecePacket, error)
SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (cdnsystem.Seeder_SyncPieceTasksClient, error) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (cdnsystemv1.Seeder_SyncPieceTasksClient, error)
UpdateState(addrs []dfnet.NetAddr) UpdateState(addrs []dfnet.NetAddr)
@ -80,27 +81,27 @@ type cdnClient struct {
var _ CdnClient = (*cdnClient)(nil) var _ CdnClient = (*cdnClient)(nil)
func (cc *cdnClient) getCdnClient(key string, stick bool) (cdnsystem.SeederClient, string, error) { func (cc *cdnClient) getCdnClient(key string, stick bool) (cdnsystemv1.SeederClient, string, error) {
clientConn, err := cc.Connection.GetClientConn(key, stick) clientConn, err := cc.Connection.GetClientConn(key, stick)
if err != nil { if err != nil {
return nil, "", fmt.Errorf("get ClientConn for hashKey %s: %w", key, err) return nil, "", fmt.Errorf("get ClientConn for hashKey %s: %w", key, err)
} }
return cdnsystem.NewSeederClient(clientConn), clientConn.Target(), nil return cdnsystemv1.NewSeederClient(clientConn), clientConn.Target(), nil
} }
func (cc *cdnClient) getSeederClientWithTarget(target string) (cdnsystem.SeederClient, error) { func (cc *cdnClient) getSeederClientWithTarget(target string) (cdnsystemv1.SeederClient, error) {
conn, err := cc.Connection.GetClientConnByTarget(target) conn, err := cc.Connection.GetClientConnByTarget(target)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return cdnsystem.NewSeederClient(conn), nil return cdnsystemv1.NewSeederClient(conn), nil
} }
func (cc *cdnClient) ObtainSeeds(ctx context.Context, sr *cdnsystem.SeedRequest, opts ...grpc.CallOption) (*PieceSeedStream, error) { func (cc *cdnClient) ObtainSeeds(ctx context.Context, sr *cdnsystemv1.SeedRequest, opts ...grpc.CallOption) (*PieceSeedStream, error) {
return newPieceSeedStream(ctx, cc, sr.TaskId, sr, opts) return newPieceSeedStream(ctx, cc, sr.TaskId, sr, opts)
} }
func (cc *cdnClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { func (cc *cdnClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (*commonv1.PiecePacket, error) {
client, err := cc.getSeederClientWithTarget(addr.GetEndpoint()) client, err := cc.getSeederClientWithTarget(addr.GetEndpoint())
if err != nil { if err != nil {
return nil, err return nil, err
@ -108,7 +109,7 @@ func (cc *cdnClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req
return client.GetPieceTasks(ctx, req, opts...) return client.GetPieceTasks(ctx, req, opts...)
} }
func (cc *cdnClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *base.PieceTaskRequest, opts ...grpc.CallOption) (cdnsystem.Seeder_SyncPieceTasksClient, error) { func (cc *cdnClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (cdnsystemv1.Seeder_SyncPieceTasksClient, error) {
client, err := cc.getSeederClientWithTarget(addr.GetEndpoint()) client, err := cc.getSeederClientWithTarget(addr.GetEndpoint())
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -8,9 +8,9 @@ import (
context "context" context "context"
reflect "reflect" reflect "reflect"
v1 "d7y.io/api/pkg/apis/cdnsystem/v1"
v10 "d7y.io/api/pkg/apis/common/v1"
dfnet "d7y.io/dragonfly/v2/pkg/dfnet" dfnet "d7y.io/dragonfly/v2/pkg/dfnet"
base "d7y.io/dragonfly/v2/pkg/rpc/base"
cdnsystem "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
client "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client" client "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/client"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc" grpc "google.golang.org/grpc"
@ -54,14 +54,14 @@ func (mr *MockCdnClientMockRecorder) Close() *gomock.Call {
} }
// GetPieceTasks mocks base method. // GetPieceTasks mocks base method.
func (m *MockCdnClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { func (m *MockCdnClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, req *v10.PieceTaskRequest, opts ...grpc.CallOption) (*v10.PiecePacket, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, addr, req} varargs := []interface{}{ctx, addr, req}
for _, a := range opts { for _, a := range opts {
varargs = append(varargs, a) varargs = append(varargs, a)
} }
ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) ret := m.ctrl.Call(m, "GetPieceTasks", varargs...)
ret0, _ := ret[0].(*base.PiecePacket) ret0, _ := ret[0].(*v10.PiecePacket)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -74,7 +74,7 @@ func (mr *MockCdnClientMockRecorder) GetPieceTasks(ctx, addr, req interface{}, o
} }
// ObtainSeeds mocks base method. // ObtainSeeds mocks base method.
func (m *MockCdnClient) ObtainSeeds(ctx context.Context, sr *cdnsystem.SeedRequest, opts ...grpc.CallOption) (*client.PieceSeedStream, error) { func (m *MockCdnClient) ObtainSeeds(ctx context.Context, sr *v1.SeedRequest, opts ...grpc.CallOption) (*client.PieceSeedStream, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, sr} varargs := []interface{}{ctx, sr}
for _, a := range opts { for _, a := range opts {
@ -94,14 +94,14 @@ func (mr *MockCdnClientMockRecorder) ObtainSeeds(ctx, sr interface{}, opts ...in
} }
// SyncPieceTasks mocks base method. // SyncPieceTasks mocks base method.
func (m *MockCdnClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (cdnsystem.Seeder_SyncPieceTasksClient, error) { func (m *MockCdnClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *v10.PieceTaskRequest, opts ...grpc.CallOption) (v1.Seeder_SyncPieceTasksClient, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, addr, ptr} varargs := []interface{}{ctx, addr, ptr}
for _, a := range opts { for _, a := range opts {
varargs = append(varargs, a) varargs = append(varargs, a)
} }
ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...) ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...)
ret0, _ := ret[0].(cdnsystem.Seeder_SyncPieceTasksClient) ret0, _ := ret[0].(v1.Seeder_SyncPieceTasksClient)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }

View File

@ -25,10 +25,11 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1"
"d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/internal/dferrors"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc" "d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
) )
type PieceSeedStream struct { type PieceSeedStream struct {
@ -36,16 +37,16 @@ type PieceSeedStream struct {
sc *cdnClient sc *cdnClient
ctx context.Context ctx context.Context
hashKey string hashKey string
sr *cdnsystem.SeedRequest sr *cdnsystemv1.SeedRequest
opts []grpc.CallOption opts []grpc.CallOption
// stream for one client // stream for one client
stream cdnsystem.Seeder_ObtainSeedsClient stream cdnsystemv1.Seeder_ObtainSeedsClient
// server list which cannot serve // server list which cannot serve
failedServers []string failedServers []string
rpc.RetryMeta rpc.RetryMeta
} }
func newPieceSeedStream(ctx context.Context, sc *cdnClient, hashKey string, sr *cdnsystem.SeedRequest, opts []grpc.CallOption) (*PieceSeedStream, error) { func newPieceSeedStream(ctx context.Context, sc *cdnClient, hashKey string, sr *cdnsystemv1.SeedRequest, opts []grpc.CallOption) (*PieceSeedStream, error) {
pss := &PieceSeedStream{ pss := &PieceSeedStream{
sc: sc, sc: sc,
ctx: ctx, ctx: ctx,
@ -68,7 +69,7 @@ func newPieceSeedStream(ctx context.Context, sc *cdnClient, hashKey string, sr *
func (pss *PieceSeedStream) initStream() error { func (pss *PieceSeedStream) initStream() error {
var target string var target string
stream, err := rpc.ExecuteWithRetry(func() (any, error) { stream, err := rpc.ExecuteWithRetry(func() (any, error) {
var client cdnsystem.SeederClient var client cdnsystemv1.SeederClient
var err error var err error
client, target, err = pss.sc.getCdnClient(pss.hashKey, false) client, target, err = pss.sc.getCdnClient(pss.hashKey, false)
if err != nil { if err != nil {
@ -83,17 +84,17 @@ func (pss *PieceSeedStream) initStream() error {
logger.WithTaskID(pss.hashKey).Errorf("initStream: invoke cdn node %s ObtainSeeds failed: %v", target, err) logger.WithTaskID(pss.hashKey).Errorf("initStream: invoke cdn node %s ObtainSeeds failed: %v", target, err)
return pss.replaceClient(pss.hashKey, err) return pss.replaceClient(pss.hashKey, err)
} }
pss.stream = stream.(cdnsystem.Seeder_ObtainSeedsClient) pss.stream = stream.(cdnsystemv1.Seeder_ObtainSeedsClient)
pss.StreamTimes = 1 pss.StreamTimes = 1
return nil return nil
} }
func (pss *PieceSeedStream) Recv() (ps *cdnsystem.PieceSeed, err error) { func (pss *PieceSeedStream) Recv() (ps *cdnsystemv1.PieceSeed, err error) {
pss.sc.UpdateAccessNodeMapByHashKey(pss.hashKey) pss.sc.UpdateAccessNodeMapByHashKey(pss.hashKey)
return pss.stream.Recv() return pss.stream.Recv()
} }
func (pss *PieceSeedStream) retryRecv(cause error) (*cdnsystem.PieceSeed, error) { func (pss *PieceSeedStream) retryRecv(cause error) (*cdnsystemv1.PieceSeed, error) {
if status.Code(cause) == codes.DeadlineExceeded || status.Code(cause) == codes.Canceled { if status.Code(cause) == codes.DeadlineExceeded || status.Code(cause) == codes.Canceled {
return nil, cause return nil, cause
} }
@ -111,7 +112,7 @@ func (pss *PieceSeedStream) replaceStream(cause error) error {
} }
var target string var target string
stream, err := rpc.ExecuteWithRetry(func() (any, error) { stream, err := rpc.ExecuteWithRetry(func() (any, error) {
var client cdnsystem.SeederClient var client cdnsystemv1.SeederClient
var err error var err error
client, target, err = pss.sc.getCdnClient(pss.hashKey, true) client, target, err = pss.sc.getCdnClient(pss.hashKey, true)
if err != nil { if err != nil {
@ -123,7 +124,7 @@ func (pss *PieceSeedStream) replaceStream(cause error) error {
logger.WithTaskID(pss.hashKey).Infof("replaceStream: invoke cdn node %s ObtainSeeds failed: %v", target, err) logger.WithTaskID(pss.hashKey).Infof("replaceStream: invoke cdn node %s ObtainSeeds failed: %v", target, err)
return pss.replaceStream(cause) return pss.replaceStream(cause)
} }
pss.stream = stream.(cdnsystem.Seeder_ObtainSeedsClient) pss.stream = stream.(cdnsystemv1.Seeder_ObtainSeedsClient)
pss.StreamTimes++ pss.StreamTimes++
return nil return nil
} }
@ -137,7 +138,7 @@ func (pss *PieceSeedStream) replaceClient(key string, cause error) error {
pss.failedServers = append(pss.failedServers, preNode) pss.failedServers = append(pss.failedServers, preNode)
var target string var target string
stream, err := rpc.ExecuteWithRetry(func() (any, error) { stream, err := rpc.ExecuteWithRetry(func() (any, error) {
var client cdnsystem.SeederClient var client cdnsystemv1.SeederClient
var err error var err error
client, target, err = pss.sc.getCdnClient(key, true) client, target, err = pss.sc.getCdnClient(key, true)
if err != nil { if err != nil {
@ -149,7 +150,7 @@ func (pss *PieceSeedStream) replaceClient(key string, cause error) error {
logger.WithTaskID(pss.hashKey).Infof("replaceClient: invoke cdn node %s ObtainSeeds failed: %v", target, err) logger.WithTaskID(pss.hashKey).Infof("replaceClient: invoke cdn node %s ObtainSeeds failed: %v", target, err)
return pss.replaceClient(key, cause) return pss.replaceClient(key, cause)
} }
pss.stream = stream.(cdnsystem.Seeder_ObtainSeedsClient) pss.stream = stream.(cdnsystemv1.Seeder_ObtainSeedsClient)
pss.StreamTimes = 1 pss.StreamTimes = 1
return nil return nil
} }

View File

@ -1,678 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: cdnsystem/cdnsystem.pb.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
base "d7y.io/dragonfly/v2/pkg/rpc/base"
cdnsystem "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
)
// MockSeederClient is a mock of SeederClient interface.
type MockSeederClient struct {
ctrl *gomock.Controller
recorder *MockSeederClientMockRecorder
}
// MockSeederClientMockRecorder is the mock recorder for MockSeederClient.
type MockSeederClientMockRecorder struct {
mock *MockSeederClient
}
// NewMockSeederClient creates a new mock instance.
func NewMockSeederClient(ctrl *gomock.Controller) *MockSeederClient {
mock := &MockSeederClient{ctrl: ctrl}
mock.recorder = &MockSeederClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSeederClient) EXPECT() *MockSeederClientMockRecorder {
return m.recorder
}
// GetPieceTasks mocks base method.
func (m *MockSeederClient) GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetPieceTasks", varargs...)
ret0, _ := ret[0].(*base.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieceTasks indicates an expected call of GetPieceTasks.
func (mr *MockSeederClientMockRecorder) GetPieceTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockSeederClient)(nil).GetPieceTasks), varargs...)
}
// ObtainSeeds mocks base method.
func (m *MockSeederClient) ObtainSeeds(ctx context.Context, in *cdnsystem.SeedRequest, opts ...grpc.CallOption) (cdnsystem.Seeder_ObtainSeedsClient, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ObtainSeeds", varargs...)
ret0, _ := ret[0].(cdnsystem.Seeder_ObtainSeedsClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ObtainSeeds indicates an expected call of ObtainSeeds.
func (mr *MockSeederClientMockRecorder) ObtainSeeds(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockSeederClient)(nil).ObtainSeeds), varargs...)
}
// SyncPieceTasks mocks base method.
func (m *MockSeederClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (cdnsystem.Seeder_SyncPieceTasksClient, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...)
ret0, _ := ret[0].(cdnsystem.Seeder_SyncPieceTasksClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncPieceTasks indicates an expected call of SyncPieceTasks.
func (mr *MockSeederClientMockRecorder) SyncPieceTasks(ctx interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockSeederClient)(nil).SyncPieceTasks), varargs...)
}
// MockSeeder_ObtainSeedsClient is a mock of Seeder_ObtainSeedsClient interface.
type MockSeeder_ObtainSeedsClient struct {
ctrl *gomock.Controller
recorder *MockSeeder_ObtainSeedsClientMockRecorder
}
// MockSeeder_ObtainSeedsClientMockRecorder is the mock recorder for MockSeeder_ObtainSeedsClient.
type MockSeeder_ObtainSeedsClientMockRecorder struct {
mock *MockSeeder_ObtainSeedsClient
}
// NewMockSeeder_ObtainSeedsClient creates a new mock instance.
func NewMockSeeder_ObtainSeedsClient(ctrl *gomock.Controller) *MockSeeder_ObtainSeedsClient {
mock := &MockSeeder_ObtainSeedsClient{ctrl: ctrl}
mock.recorder = &MockSeeder_ObtainSeedsClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSeeder_ObtainSeedsClient) EXPECT() *MockSeeder_ObtainSeedsClientMockRecorder {
return m.recorder
}
// CloseSend mocks base method.
func (m *MockSeeder_ObtainSeedsClient) CloseSend() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CloseSend")
ret0, _ := ret[0].(error)
return ret0
}
// CloseSend indicates an expected call of CloseSend.
func (mr *MockSeeder_ObtainSeedsClientMockRecorder) CloseSend() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).CloseSend))
}
// Context mocks base method.
func (m *MockSeeder_ObtainSeedsClient) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Context))
}
// Header mocks base method.
func (m *MockSeeder_ObtainSeedsClient) Header() (metadata.MD, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Header")
ret0, _ := ret[0].(metadata.MD)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Header indicates an expected call of Header.
func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Header() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Header))
}
// Recv mocks base method.
func (m *MockSeeder_ObtainSeedsClient) Recv() (*cdnsystem.PieceSeed, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*cdnsystem.PieceSeed)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockSeeder_ObtainSeedsClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockSeeder_ObtainSeedsClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).RecvMsg), m)
}
// SendMsg mocks base method.
func (m_2 *MockSeeder_ObtainSeedsClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockSeeder_ObtainSeedsClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).SendMsg), m)
}
// Trailer mocks base method.
func (m *MockSeeder_ObtainSeedsClient) Trailer() metadata.MD {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Trailer")
ret0, _ := ret[0].(metadata.MD)
return ret0
}
// Trailer indicates an expected call of Trailer.
func (mr *MockSeeder_ObtainSeedsClientMockRecorder) Trailer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockSeeder_ObtainSeedsClient)(nil).Trailer))
}
// MockSeeder_SyncPieceTasksClient is a mock of Seeder_SyncPieceTasksClient interface.
type MockSeeder_SyncPieceTasksClient struct {
ctrl *gomock.Controller
recorder *MockSeeder_SyncPieceTasksClientMockRecorder
}
// MockSeeder_SyncPieceTasksClientMockRecorder is the mock recorder for MockSeeder_SyncPieceTasksClient.
type MockSeeder_SyncPieceTasksClientMockRecorder struct {
mock *MockSeeder_SyncPieceTasksClient
}
// NewMockSeeder_SyncPieceTasksClient creates a new mock instance.
func NewMockSeeder_SyncPieceTasksClient(ctrl *gomock.Controller) *MockSeeder_SyncPieceTasksClient {
mock := &MockSeeder_SyncPieceTasksClient{ctrl: ctrl}
mock.recorder = &MockSeeder_SyncPieceTasksClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSeeder_SyncPieceTasksClient) EXPECT() *MockSeeder_SyncPieceTasksClientMockRecorder {
return m.recorder
}
// CloseSend mocks base method.
func (m *MockSeeder_SyncPieceTasksClient) CloseSend() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CloseSend")
ret0, _ := ret[0].(error)
return ret0
}
// CloseSend indicates an expected call of CloseSend.
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) CloseSend() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).CloseSend))
}
// Context mocks base method.
func (m *MockSeeder_SyncPieceTasksClient) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).Context))
}
// Header mocks base method.
func (m *MockSeeder_SyncPieceTasksClient) Header() (metadata.MD, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Header")
ret0, _ := ret[0].(metadata.MD)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Header indicates an expected call of Header.
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Header() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).Header))
}
// Recv mocks base method.
func (m *MockSeeder_SyncPieceTasksClient) Recv() (*base.PiecePacket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*base.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockSeeder_SyncPieceTasksClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockSeeder_SyncPieceTasksClient) Send(arg0 *base.PieceTaskRequest) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).Send), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockSeeder_SyncPieceTasksClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).SendMsg), m)
}
// Trailer mocks base method.
func (m *MockSeeder_SyncPieceTasksClient) Trailer() metadata.MD {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Trailer")
ret0, _ := ret[0].(metadata.MD)
return ret0
}
// Trailer indicates an expected call of Trailer.
func (mr *MockSeeder_SyncPieceTasksClientMockRecorder) Trailer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockSeeder_SyncPieceTasksClient)(nil).Trailer))
}
// MockSeederServer is a mock of SeederServer interface.
type MockSeederServer struct {
ctrl *gomock.Controller
recorder *MockSeederServerMockRecorder
}
// MockSeederServerMockRecorder is the mock recorder for MockSeederServer.
type MockSeederServerMockRecorder struct {
mock *MockSeederServer
}
// NewMockSeederServer creates a new mock instance.
func NewMockSeederServer(ctrl *gomock.Controller) *MockSeederServer {
mock := &MockSeederServer{ctrl: ctrl}
mock.recorder = &MockSeederServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSeederServer) EXPECT() *MockSeederServerMockRecorder {
return m.recorder
}
// GetPieceTasks mocks base method.
func (m *MockSeederServer) GetPieceTasks(arg0 context.Context, arg1 *base.PieceTaskRequest) (*base.PiecePacket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1)
ret0, _ := ret[0].(*base.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieceTasks indicates an expected call of GetPieceTasks.
func (mr *MockSeederServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockSeederServer)(nil).GetPieceTasks), arg0, arg1)
}
// ObtainSeeds mocks base method.
func (m *MockSeederServer) ObtainSeeds(arg0 *cdnsystem.SeedRequest, arg1 cdnsystem.Seeder_ObtainSeedsServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ObtainSeeds", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// ObtainSeeds indicates an expected call of ObtainSeeds.
func (mr *MockSeederServerMockRecorder) ObtainSeeds(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ObtainSeeds", reflect.TypeOf((*MockSeederServer)(nil).ObtainSeeds), arg0, arg1)
}
// SyncPieceTasks mocks base method.
func (m *MockSeederServer) SyncPieceTasks(arg0 cdnsystem.Seeder_SyncPieceTasksServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SyncPieceTasks", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SyncPieceTasks indicates an expected call of SyncPieceTasks.
func (mr *MockSeederServerMockRecorder) SyncPieceTasks(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockSeederServer)(nil).SyncPieceTasks), arg0)
}
// MockSeeder_ObtainSeedsServer is a mock of Seeder_ObtainSeedsServer interface.
type MockSeeder_ObtainSeedsServer struct {
ctrl *gomock.Controller
recorder *MockSeeder_ObtainSeedsServerMockRecorder
}
// MockSeeder_ObtainSeedsServerMockRecorder is the mock recorder for MockSeeder_ObtainSeedsServer.
type MockSeeder_ObtainSeedsServerMockRecorder struct {
mock *MockSeeder_ObtainSeedsServer
}
// NewMockSeeder_ObtainSeedsServer creates a new mock instance.
func NewMockSeeder_ObtainSeedsServer(ctrl *gomock.Controller) *MockSeeder_ObtainSeedsServer {
mock := &MockSeeder_ObtainSeedsServer{ctrl: ctrl}
mock.recorder = &MockSeeder_ObtainSeedsServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSeeder_ObtainSeedsServer) EXPECT() *MockSeeder_ObtainSeedsServerMockRecorder {
return m.recorder
}
// Context mocks base method.
func (m *MockSeeder_ObtainSeedsServer) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).Context))
}
// RecvMsg mocks base method.
func (m_2 *MockSeeder_ObtainSeedsServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockSeeder_ObtainSeedsServer) Send(arg0 *cdnsystem.PieceSeed) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).Send), arg0)
}
// SendHeader mocks base method.
func (m *MockSeeder_ObtainSeedsServer) SendHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockSeeder_ObtainSeedsServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SendMsg), m)
}
// SetHeader mocks base method.
func (m *MockSeeder_ObtainSeedsServer) SetHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SetHeader), arg0)
}
// SetTrailer mocks base method.
func (m *MockSeeder_ObtainSeedsServer) SetTrailer(arg0 metadata.MD) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetTrailer", arg0)
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockSeeder_ObtainSeedsServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockSeeder_ObtainSeedsServer)(nil).SetTrailer), arg0)
}
// MockSeeder_SyncPieceTasksServer is a mock of Seeder_SyncPieceTasksServer interface.
type MockSeeder_SyncPieceTasksServer struct {
ctrl *gomock.Controller
recorder *MockSeeder_SyncPieceTasksServerMockRecorder
}
// MockSeeder_SyncPieceTasksServerMockRecorder is the mock recorder for MockSeeder_SyncPieceTasksServer.
type MockSeeder_SyncPieceTasksServerMockRecorder struct {
mock *MockSeeder_SyncPieceTasksServer
}
// NewMockSeeder_SyncPieceTasksServer creates a new mock instance.
func NewMockSeeder_SyncPieceTasksServer(ctrl *gomock.Controller) *MockSeeder_SyncPieceTasksServer {
mock := &MockSeeder_SyncPieceTasksServer{ctrl: ctrl}
mock.recorder = &MockSeeder_SyncPieceTasksServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSeeder_SyncPieceTasksServer) EXPECT() *MockSeeder_SyncPieceTasksServerMockRecorder {
return m.recorder
}
// Context mocks base method.
func (m *MockSeeder_SyncPieceTasksServer) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).Context))
}
// Recv mocks base method.
func (m *MockSeeder_SyncPieceTasksServer) Recv() (*base.PieceTaskRequest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*base.PieceTaskRequest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockSeeder_SyncPieceTasksServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockSeeder_SyncPieceTasksServer) Send(arg0 *base.PiecePacket) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).Send), arg0)
}
// SendHeader mocks base method.
func (m *MockSeeder_SyncPieceTasksServer) SendHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockSeeder_SyncPieceTasksServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SendMsg), m)
}
// SetHeader mocks base method.
func (m *MockSeeder_SyncPieceTasksServer) SetHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SetHeader), arg0)
}
// SetTrailer mocks base method.
func (m *MockSeeder_SyncPieceTasksServer) SetTrailer(arg0 metadata.MD) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetTrailer", arg0)
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockSeeder_SyncPieceTasksServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockSeeder_SyncPieceTasksServer)(nil).SetTrailer), arg0)
}

View File

@ -30,10 +30,11 @@ import (
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/internal/dferrors"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc/base"
) )
const ( const (
@ -371,7 +372,7 @@ func (conn *Connection) TryMigrate(key string, cause error, exclusiveNodes []str
} }
// TODO recover findCandidateClientConn error // TODO recover findCandidateClientConn error
if e, ok := cause.(*dferrors.DfError); ok { if e, ok := cause.(*dferrors.DfError); ok {
if e.Code != base.Code_ResourceLacked { if e.Code != commonv1.Code_ResourceLacked {
return "", cause return "", cause
} }
} }

View File

@ -28,10 +28,11 @@ import (
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
commonv1 "d7y.io/api/pkg/apis/common/v1"
"d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/internal/dferrors"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/math" "d7y.io/dragonfly/v2/pkg/math"
"d7y.io/dragonfly/v2/pkg/rpc/base"
) )
const ( const (
@ -195,7 +196,7 @@ func convertClientError(err error) error {
s := status.Convert(err) s := status.Convert(err)
for _, d := range s.Details() { for _, d := range s.Details() {
switch internal := d.(type) { switch internal := d.(type) {
case *base.GrpcDfError: case *commonv1.GrpcDfError:
return &dferrors.DfError{ return &dferrors.DfError{
Code: internal.Code, Code: internal.Code,
Message: internal.Message, Message: internal.Message,

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2020 The Dragonfly Authors * Copyright 2022 The Dragonfly Authors
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -22,14 +22,14 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"d7y.io/dragonfly/v2/pkg/rpc/base" commonv1 "d7y.io/api/pkg/apis/common/v1"
) )
var EndOfPiece = int32(1) << 30 var EndOfPiece = int32(1) << 30
var BeginOfPiece = int32(-1) var BeginOfPiece = int32(-1)
func NewGrpcDfError(code base.Code, msg string) *base.GrpcDfError { func NewGrpcDfError(code commonv1.Code, msg string) *commonv1.GrpcDfError {
return &base.GrpcDfError{ return &commonv1.GrpcDfError{
Code: code, Code: code,
Message: msg, Message: msg,
} }
@ -37,7 +37,7 @@ func NewGrpcDfError(code base.Code, msg string) *base.GrpcDfError {
// NewResWithCodeAndMsg returns a response ptr with code and msg, // NewResWithCodeAndMsg returns a response ptr with code and msg,
// ptr is a expected type ptr. // ptr is a expected type ptr.
func NewResWithCodeAndMsg(ptr any, code base.Code, msg string) any { func NewResWithCodeAndMsg(ptr any, code commonv1.Code, msg string) any {
typ := reflect.TypeOf(ptr) typ := reflect.TypeOf(ptr)
v := reflect.New(typ.Elem()) v := reflect.New(typ.Elem())
@ -46,14 +46,14 @@ func NewResWithCodeAndMsg(ptr any, code base.Code, msg string) any {
func NewResWithErr(ptr any, err error) any { func NewResWithErr(ptr any, err error) any {
st := status.Convert(err) st := status.Convert(err)
var code base.Code var code commonv1.Code
switch st.Code() { switch st.Code() {
case codes.DeadlineExceeded: case codes.DeadlineExceeded:
code = base.Code_RequestTimeOut code = commonv1.Code_RequestTimeOut
case codes.OK: case codes.OK:
code = base.Code_Success code = commonv1.Code_Success
default: default:
code = base.Code_UnknownError code = commonv1.Code_UnknownError
} }
return NewResWithCodeAndMsg(ptr, code, st.Message()) return NewResWithCodeAndMsg(ptr, code, st.Message())
} }

View File

@ -29,12 +29,13 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/emptypb"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
"d7y.io/dragonfly/v2/pkg/idgen" "d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/rpc" "d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
) )
var _ DaemonClient = (*daemonClient)(nil) var _ DaemonClient = (*daemonClient)(nil)
@ -76,23 +77,23 @@ func GetElasticClientByAddrs(addrs []dfnet.NetAddr, opts ...grpc.DialOption) (Da
return elasticDaemonClient, nil return elasticDaemonClient, nil
} }
// DaemonClient see dfdaemon.DaemonClient // DaemonClient see dfdaemonv1.DaemonClient
type DaemonClient interface { type DaemonClient interface {
Download(ctx context.Context, req *dfdaemon.DownRequest, opts ...grpc.CallOption) (*DownResultStream, error) Download(ctx context.Context, req *dfdaemonv1.DownRequest, opts ...grpc.CallOption) (*DownResultStream, error)
GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (*commonv1.PiecePacket, error)
SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (dfdaemon.Daemon_SyncPieceTasksClient, error) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (dfdaemonv1.Daemon_SyncPieceTasksClient, error)
CheckHealth(ctx context.Context, target dfnet.NetAddr, opts ...grpc.CallOption) error CheckHealth(ctx context.Context, target dfnet.NetAddr, opts ...grpc.CallOption) error
StatTask(ctx context.Context, req *dfdaemon.StatTaskRequest, opts ...grpc.CallOption) error StatTask(ctx context.Context, req *dfdaemonv1.StatTaskRequest, opts ...grpc.CallOption) error
ImportTask(ctx context.Context, req *dfdaemon.ImportTaskRequest, opts ...grpc.CallOption) error ImportTask(ctx context.Context, req *dfdaemonv1.ImportTaskRequest, opts ...grpc.CallOption) error
ExportTask(ctx context.Context, req *dfdaemon.ExportTaskRequest, opts ...grpc.CallOption) error ExportTask(ctx context.Context, req *dfdaemonv1.ExportTaskRequest, opts ...grpc.CallOption) error
DeleteTask(ctx context.Context, req *dfdaemon.DeleteTaskRequest, opts ...grpc.CallOption) error DeleteTask(ctx context.Context, req *dfdaemonv1.DeleteTaskRequest, opts ...grpc.CallOption) error
Close() error Close() error
} }
@ -101,30 +102,30 @@ type daemonClient struct {
*rpc.Connection *rpc.Connection
} }
func (dc *daemonClient) getDaemonClient(key string, stick bool) (dfdaemon.DaemonClient, string, error) { func (dc *daemonClient) getDaemonClient(key string, stick bool) (dfdaemonv1.DaemonClient, string, error) {
clientConn, err := dc.Connection.GetClientConn(key, stick) clientConn, err := dc.Connection.GetClientConn(key, stick)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
return dfdaemon.NewDaemonClient(clientConn), clientConn.Target(), nil return dfdaemonv1.NewDaemonClient(clientConn), clientConn.Target(), nil
} }
func (dc *daemonClient) getDaemonClientWithTarget(target string) (dfdaemon.DaemonClient, error) { func (dc *daemonClient) getDaemonClientWithTarget(target string) (dfdaemonv1.DaemonClient, error) {
conn, err := dc.Connection.GetClientConnByTarget(target) conn, err := dc.Connection.GetClientConnByTarget(target)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return dfdaemon.NewDaemonClient(conn), nil return dfdaemonv1.NewDaemonClient(conn), nil
} }
func (dc *daemonClient) Download(ctx context.Context, req *dfdaemon.DownRequest, opts ...grpc.CallOption) (*DownResultStream, error) { func (dc *daemonClient) Download(ctx context.Context, req *dfdaemonv1.DownRequest, opts ...grpc.CallOption) (*DownResultStream, error) {
req.Uuid = uuid.New().String() req.Uuid = uuid.New().String()
// generate taskID // generate taskID
taskID := idgen.TaskID(req.Url, req.UrlMeta) taskID := idgen.TaskID(req.Url, req.UrlMeta)
return newDownResultStream(ctx, dc, taskID, req, opts) return newDownResultStream(ctx, dc, taskID, req, opts)
} }
func (dc *daemonClient) GetPieceTasks(ctx context.Context, target dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, func (dc *daemonClient) GetPieceTasks(ctx context.Context, target dfnet.NetAddr, ptr *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (*commonv1.PiecePacket,
error) { error) {
client, err := dc.getDaemonClientWithTarget(target.GetEndpoint()) client, err := dc.getDaemonClientWithTarget(target.GetEndpoint())
if err != nil { if err != nil {
@ -133,7 +134,7 @@ func (dc *daemonClient) GetPieceTasks(ctx context.Context, target dfnet.NetAddr,
return client.GetPieceTasks(ctx, ptr, opts...) return client.GetPieceTasks(ctx, ptr, opts...)
} }
func (dc *daemonClient) SyncPieceTasks(ctx context.Context, target dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (dfdaemon.Daemon_SyncPieceTasksClient, error) { func (dc *daemonClient) SyncPieceTasks(ctx context.Context, target dfnet.NetAddr, ptr *commonv1.PieceTaskRequest, opts ...grpc.CallOption) (dfdaemonv1.Daemon_SyncPieceTasksClient, error) {
client, err := dc.getDaemonClientWithTarget(target.GetEndpoint()) client, err := dc.getDaemonClientWithTarget(target.GetEndpoint())
if err != nil { if err != nil {
return nil, err return nil, err
@ -162,7 +163,7 @@ func (dc *daemonClient) CheckHealth(ctx context.Context, target dfnet.NetAddr, o
return return
} }
func (dc *daemonClient) StatTask(ctx context.Context, req *dfdaemon.StatTaskRequest, opts ...grpc.CallOption) error { func (dc *daemonClient) StatTask(ctx context.Context, req *dfdaemonv1.StatTaskRequest, opts ...grpc.CallOption) error {
// StatTask is a latency sensitive operation, so we don't retry & wait for daemon to start, // StatTask is a latency sensitive operation, so we don't retry & wait for daemon to start,
// we assume daemon is already running. // we assume daemon is already running.
taskID := idgen.TaskID(req.Url, req.UrlMeta) taskID := idgen.TaskID(req.Url, req.UrlMeta)
@ -175,7 +176,7 @@ func (dc *daemonClient) StatTask(ctx context.Context, req *dfdaemon.StatTaskRequ
return err return err
} }
func (dc *daemonClient) ImportTask(ctx context.Context, req *dfdaemon.ImportTaskRequest, opts ...grpc.CallOption) error { func (dc *daemonClient) ImportTask(ctx context.Context, req *dfdaemonv1.ImportTaskRequest, opts ...grpc.CallOption) error {
taskID := idgen.TaskID(req.Url, req.UrlMeta) taskID := idgen.TaskID(req.Url, req.UrlMeta)
client, _, err := dc.getDaemonClient(taskID, false) client, _, err := dc.getDaemonClient(taskID, false)
if err != nil { if err != nil {
@ -185,7 +186,7 @@ func (dc *daemonClient) ImportTask(ctx context.Context, req *dfdaemon.ImportTask
return err return err
} }
func (dc *daemonClient) ExportTask(ctx context.Context, req *dfdaemon.ExportTaskRequest, opts ...grpc.CallOption) error { func (dc *daemonClient) ExportTask(ctx context.Context, req *dfdaemonv1.ExportTaskRequest, opts ...grpc.CallOption) error {
taskID := idgen.TaskID(req.Url, req.UrlMeta) taskID := idgen.TaskID(req.Url, req.UrlMeta)
client, _, err := dc.getDaemonClient(taskID, false) client, _, err := dc.getDaemonClient(taskID, false)
if err != nil { if err != nil {
@ -195,7 +196,7 @@ func (dc *daemonClient) ExportTask(ctx context.Context, req *dfdaemon.ExportTask
return err return err
} }
func (dc *daemonClient) DeleteTask(ctx context.Context, req *dfdaemon.DeleteTaskRequest, opts ...grpc.CallOption) error { func (dc *daemonClient) DeleteTask(ctx context.Context, req *dfdaemonv1.DeleteTaskRequest, opts ...grpc.CallOption) error {
taskID := idgen.TaskID(req.Url, req.UrlMeta) taskID := idgen.TaskID(req.Url, req.UrlMeta)
client, _, err := dc.getDaemonClient(taskID, false) client, _, err := dc.getDaemonClient(taskID, false)
if err != nil { if err != nil {

View File

@ -25,25 +25,26 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
"d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/internal/dferrors"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc" "d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
) )
type DownResultStream struct { type DownResultStream struct {
dc *daemonClient dc *daemonClient
ctx context.Context ctx context.Context
hashKey string hashKey string
req *dfdaemon.DownRequest req *dfdaemonv1.DownRequest
opts []grpc.CallOption opts []grpc.CallOption
// stream for one client // stream for one client
stream dfdaemon.Daemon_DownloadClient stream dfdaemonv1.Daemon_DownloadClient
failedServers []string failedServers []string
rpc.RetryMeta rpc.RetryMeta
} }
func newDownResultStream(ctx context.Context, dc *daemonClient, hashKey string, req *dfdaemon.DownRequest, opts []grpc.CallOption) (*DownResultStream, error) { func newDownResultStream(ctx context.Context, dc *daemonClient, hashKey string, req *dfdaemonv1.DownRequest, opts []grpc.CallOption) (*DownResultStream, error) {
drs := &DownResultStream{ drs := &DownResultStream{
dc: dc, dc: dc,
ctx: ctx, ctx: ctx,
@ -67,7 +68,7 @@ func newDownResultStream(ctx context.Context, dc *daemonClient, hashKey string,
func (drs *DownResultStream) initStream() error { func (drs *DownResultStream) initStream() error {
var target string var target string
stream, err := rpc.ExecuteWithRetry(func() (any, error) { stream, err := rpc.ExecuteWithRetry(func() (any, error) {
var client dfdaemon.DaemonClient var client dfdaemonv1.DaemonClient
var err error var err error
client, target, err = drs.dc.getDaemonClient(drs.hashKey, false) client, target, err = drs.dc.getDaemonClient(drs.hashKey, false)
if err != nil { if err != nil {
@ -82,12 +83,12 @@ func (drs *DownResultStream) initStream() error {
logger.WithTaskID(drs.hashKey).Infof("initStream: invoke daemon node %s Download failed: %v", target, err) logger.WithTaskID(drs.hashKey).Infof("initStream: invoke daemon node %s Download failed: %v", target, err)
return drs.replaceClient(err) return drs.replaceClient(err)
} }
drs.stream = stream.(dfdaemon.Daemon_DownloadClient) drs.stream = stream.(dfdaemonv1.Daemon_DownloadClient)
drs.StreamTimes = 1 drs.StreamTimes = 1
return nil return nil
} }
func (drs *DownResultStream) Recv() (dr *dfdaemon.DownResult, err error) { func (drs *DownResultStream) Recv() (dr *dfdaemonv1.DownResult, err error) {
defer func() { defer func() {
if dr != nil { if dr != nil {
if dr.TaskId != drs.hashKey { if dr.TaskId != drs.hashKey {
@ -101,7 +102,7 @@ func (drs *DownResultStream) Recv() (dr *dfdaemon.DownResult, err error) {
return drs.stream.Recv() return drs.stream.Recv()
} }
func (drs *DownResultStream) retryRecv(cause error) (*dfdaemon.DownResult, error) { func (drs *DownResultStream) retryRecv(cause error) (*dfdaemonv1.DownResult, error) {
if status.Code(cause) == codes.DeadlineExceeded || status.Code(cause) == codes.Canceled { if status.Code(cause) == codes.DeadlineExceeded || status.Code(cause) == codes.Canceled {
return nil, cause return nil, cause
} }
@ -120,7 +121,7 @@ func (drs *DownResultStream) replaceStream(cause error) error {
} }
var target string var target string
stream, err := rpc.ExecuteWithRetry(func() (any, error) { stream, err := rpc.ExecuteWithRetry(func() (any, error) {
var client dfdaemon.DaemonClient var client dfdaemonv1.DaemonClient
var err error var err error
client, target, err = drs.dc.getDaemonClient(drs.hashKey, true) client, target, err = drs.dc.getDaemonClient(drs.hashKey, true)
if err != nil { if err != nil {
@ -132,7 +133,7 @@ func (drs *DownResultStream) replaceStream(cause error) error {
logger.WithTaskID(drs.hashKey).Infof("replaceStream: invoke daemon node %s Download failed: %v", target, err) logger.WithTaskID(drs.hashKey).Infof("replaceStream: invoke daemon node %s Download failed: %v", target, err)
return drs.replaceClient(cause) return drs.replaceClient(cause)
} }
drs.stream = stream.(dfdaemon.Daemon_DownloadClient) drs.stream = stream.(dfdaemonv1.Daemon_DownloadClient)
drs.StreamTimes++ drs.StreamTimes++
return nil return nil
} }
@ -147,7 +148,7 @@ func (drs *DownResultStream) replaceClient(cause error) error {
var target string var target string
stream, err := rpc.ExecuteWithRetry(func() (any, error) { stream, err := rpc.ExecuteWithRetry(func() (any, error) {
var client dfdaemon.DaemonClient var client dfdaemonv1.DaemonClient
var err error var err error
client, target, err = drs.dc.getDaemonClient(drs.hashKey, true) client, target, err = drs.dc.getDaemonClient(drs.hashKey, true)
if err != nil { if err != nil {
@ -159,7 +160,7 @@ func (drs *DownResultStream) replaceClient(cause error) error {
logger.WithTaskID(drs.hashKey).Infof("replaceClient: invoke daemon node %s Download failed: %v", target, err) logger.WithTaskID(drs.hashKey).Infof("replaceClient: invoke daemon node %s Download failed: %v", target, err)
return drs.replaceClient(cause) return drs.replaceClient(cause)
} }
drs.stream = stream.(dfdaemon.Daemon_DownloadClient) drs.stream = stream.(dfdaemonv1.Daemon_DownloadClient)
drs.StreamTimes = 1 drs.StreamTimes = 1
return nil return nil
} }

View File

@ -8,9 +8,9 @@ import (
context "context" context "context"
reflect "reflect" reflect "reflect"
v1 "d7y.io/api/pkg/apis/common/v1"
v10 "d7y.io/api/pkg/apis/dfdaemon/v1"
dfnet "d7y.io/dragonfly/v2/pkg/dfnet" dfnet "d7y.io/dragonfly/v2/pkg/dfnet"
base "d7y.io/dragonfly/v2/pkg/rpc/base"
dfdaemon "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
client "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" client "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc" grpc "google.golang.org/grpc"
@ -73,7 +73,7 @@ func (mr *MockDaemonClientMockRecorder) Close() *gomock.Call {
} }
// DeleteTask mocks base method. // DeleteTask mocks base method.
func (m *MockDaemonClient) DeleteTask(ctx context.Context, req *dfdaemon.DeleteTaskRequest, opts ...grpc.CallOption) error { func (m *MockDaemonClient) DeleteTask(ctx context.Context, req *v10.DeleteTaskRequest, opts ...grpc.CallOption) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, req} varargs := []interface{}{ctx, req}
for _, a := range opts { for _, a := range opts {
@ -92,7 +92,7 @@ func (mr *MockDaemonClientMockRecorder) DeleteTask(ctx, req interface{}, opts ..
} }
// Download mocks base method. // Download mocks base method.
func (m *MockDaemonClient) Download(ctx context.Context, req *dfdaemon.DownRequest, opts ...grpc.CallOption) (*client.DownResultStream, error) { func (m *MockDaemonClient) Download(ctx context.Context, req *v10.DownRequest, opts ...grpc.CallOption) (*client.DownResultStream, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, req} varargs := []interface{}{ctx, req}
for _, a := range opts { for _, a := range opts {
@ -112,7 +112,7 @@ func (mr *MockDaemonClientMockRecorder) Download(ctx, req interface{}, opts ...i
} }
// ExportTask mocks base method. // ExportTask mocks base method.
func (m *MockDaemonClient) ExportTask(ctx context.Context, req *dfdaemon.ExportTaskRequest, opts ...grpc.CallOption) error { func (m *MockDaemonClient) ExportTask(ctx context.Context, req *v10.ExportTaskRequest, opts ...grpc.CallOption) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, req} varargs := []interface{}{ctx, req}
for _, a := range opts { for _, a := range opts {
@ -131,14 +131,14 @@ func (mr *MockDaemonClientMockRecorder) ExportTask(ctx, req interface{}, opts ..
} }
// GetPieceTasks mocks base method. // GetPieceTasks mocks base method.
func (m *MockDaemonClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) { func (m *MockDaemonClient) GetPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *v1.PieceTaskRequest, opts ...grpc.CallOption) (*v1.PiecePacket, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, addr, ptr} varargs := []interface{}{ctx, addr, ptr}
for _, a := range opts { for _, a := range opts {
varargs = append(varargs, a) varargs = append(varargs, a)
} }
ret := m.ctrl.Call(m, "GetPieceTasks", varargs...) ret := m.ctrl.Call(m, "GetPieceTasks", varargs...)
ret0, _ := ret[0].(*base.PiecePacket) ret0, _ := ret[0].(*v1.PiecePacket)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -151,7 +151,7 @@ func (mr *MockDaemonClientMockRecorder) GetPieceTasks(ctx, addr, ptr interface{}
} }
// ImportTask mocks base method. // ImportTask mocks base method.
func (m *MockDaemonClient) ImportTask(ctx context.Context, req *dfdaemon.ImportTaskRequest, opts ...grpc.CallOption) error { func (m *MockDaemonClient) ImportTask(ctx context.Context, req *v10.ImportTaskRequest, opts ...grpc.CallOption) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, req} varargs := []interface{}{ctx, req}
for _, a := range opts { for _, a := range opts {
@ -170,7 +170,7 @@ func (mr *MockDaemonClientMockRecorder) ImportTask(ctx, req interface{}, opts ..
} }
// StatTask mocks base method. // StatTask mocks base method.
func (m *MockDaemonClient) StatTask(ctx context.Context, req *dfdaemon.StatTaskRequest, opts ...grpc.CallOption) error { func (m *MockDaemonClient) StatTask(ctx context.Context, req *v10.StatTaskRequest, opts ...grpc.CallOption) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, req} varargs := []interface{}{ctx, req}
for _, a := range opts { for _, a := range opts {
@ -189,14 +189,14 @@ func (mr *MockDaemonClientMockRecorder) StatTask(ctx, req interface{}, opts ...i
} }
// SyncPieceTasks mocks base method. // SyncPieceTasks mocks base method.
func (m *MockDaemonClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *base.PieceTaskRequest, opts ...grpc.CallOption) (dfdaemon.Daemon_SyncPieceTasksClient, error) { func (m *MockDaemonClient) SyncPieceTasks(ctx context.Context, addr dfnet.NetAddr, ptr *v1.PieceTaskRequest, opts ...grpc.CallOption) (v10.Daemon_SyncPieceTasksClient, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{ctx, addr, ptr} varargs := []interface{}{ctx, addr, ptr}
for _, a := range opts { for _, a := range opts {
varargs = append(varargs, a) varargs = append(varargs, a)
} }
ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...) ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...)
ret0, _ := ret[0].(dfdaemon.Daemon_SyncPieceTasksClient) ret0, _ := ret[0].(v10.Daemon_SyncPieceTasksClient)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }

View File

@ -22,16 +22,17 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
func GetPieceTasks(ctx context.Context, func GetPieceTasks(ctx context.Context,
dstPeer *scheduler.PeerPacket_DestPeer, dstPeer *schedulerv1.PeerPacket_DestPeer,
ptr *base.PieceTaskRequest, ptr *commonv1.PieceTaskRequest,
opts ...grpc.CallOption) (*base.PiecePacket, error) { opts ...grpc.CallOption) (*commonv1.PiecePacket, error) {
netAddr := dfnet.NetAddr{ netAddr := dfnet.NetAddr{
Type: dfnet.TCP, Type: dfnet.TCP,
Addr: fmt.Sprintf("%s:%d", dstPeer.Ip, dstPeer.RpcPort), Addr: fmt.Sprintf("%s:%d", dstPeer.Ip, dstPeer.RpcPort),
@ -46,9 +47,9 @@ func GetPieceTasks(ctx context.Context,
} }
func SyncPieceTasks(ctx context.Context, func SyncPieceTasks(ctx context.Context,
destPeer *scheduler.PeerPacket_DestPeer, destPeer *schedulerv1.PeerPacket_DestPeer,
ptr *base.PieceTaskRequest, ptr *commonv1.PieceTaskRequest,
opts ...grpc.CallOption) (dfdaemon.Daemon_SyncPieceTasksClient, error) { opts ...grpc.CallOption) (dfdaemonv1.Daemon_SyncPieceTasksClient, error) {
netAddr := dfnet.NetAddr{ netAddr := dfnet.NetAddr{
Type: dfnet.TCP, Type: dfnet.TCP,
Addr: fmt.Sprintf("%s:%d", destPeer.Ip, destPeer.RpcPort), Addr: fmt.Sprintf("%s:%d", destPeer.Ip, destPeer.RpcPort),

File diff suppressed because it is too large Load Diff

View File

@ -1,654 +0,0 @@
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: pkg/rpc/dfdaemon/dfdaemon.proto
package dfdaemon
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
base "d7y.io/dragonfly/v2/pkg/rpc/base"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
_ = base.TaskType(0)
)
// define the regex for a UUID once up-front
var _dfdaemon_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$")
// Validate checks the field values on DownRequest with the rules defined in
// the proto definition for this message. If any rules are violated, an error
// is returned.
func (m *DownRequest) Validate() error {
if m == nil {
return nil
}
if err := m._validateUuid(m.GetUuid()); err != nil {
return DownRequestValidationError{
field: "Uuid",
reason: "value must be a valid UUID",
cause: err,
}
}
if uri, err := url.Parse(m.GetUrl()); err != nil {
return DownRequestValidationError{
field: "Url",
reason: "value must be a valid URI",
cause: err,
}
} else if !uri.IsAbs() {
return DownRequestValidationError{
field: "Url",
reason: "value must be absolute",
}
}
if utf8.RuneCountInString(m.GetOutput()) < 1 {
return DownRequestValidationError{
field: "Output",
reason: "value length must be at least 1 runes",
}
}
if m.GetTimeout() < 0 {
return DownRequestValidationError{
field: "Timeout",
reason: "value must be greater than or equal to 0",
}
}
if m.GetLimit() < 0 {
return DownRequestValidationError{
field: "Limit",
reason: "value must be greater than or equal to 0",
}
}
// no validation rules for DisableBackSource
if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return DownRequestValidationError{
field: "UrlMeta",
reason: "embedded message failed validation",
cause: err,
}
}
}
if m.GetPattern() != "" {
if _, ok := _DownRequest_Pattern_InLookup[m.GetPattern()]; !ok {
return DownRequestValidationError{
field: "Pattern",
reason: "value must be in list [p2p seed-peer source]",
}
}
}
// no validation rules for Callsystem
// no validation rules for Uid
// no validation rules for Gid
// no validation rules for KeepOriginalOffset
return nil
}
func (m *DownRequest) _validateUuid(uuid string) error {
if matched := _dfdaemon_uuidPattern.MatchString(uuid); !matched {
return errors.New("invalid uuid format")
}
return nil
}
// DownRequestValidationError is the validation error returned by
// DownRequest.Validate if the designated constraints aren't met.
type DownRequestValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e DownRequestValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e DownRequestValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e DownRequestValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e DownRequestValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e DownRequestValidationError) ErrorName() string { return "DownRequestValidationError" }
// Error satisfies the builtin error interface
func (e DownRequestValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sDownRequest.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = DownRequestValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = DownRequestValidationError{}
var _DownRequest_Pattern_InLookup = map[string]struct{}{
"p2p": {},
"seed-peer": {},
"source": {},
}
// Validate checks the field values on DownResult with the rules defined in the
// proto definition for this message. If any rules are violated, an error is returned.
func (m *DownResult) Validate() error {
if m == nil {
return nil
}
if utf8.RuneCountInString(m.GetTaskId()) < 1 {
return DownResultValidationError{
field: "TaskId",
reason: "value length must be at least 1 runes",
}
}
if utf8.RuneCountInString(m.GetPeerId()) < 1 {
return DownResultValidationError{
field: "PeerId",
reason: "value length must be at least 1 runes",
}
}
if m.GetCompletedLength() < 0 {
return DownResultValidationError{
field: "CompletedLength",
reason: "value must be greater than or equal to 0",
}
}
// no validation rules for Done
return nil
}
// DownResultValidationError is the validation error returned by
// DownResult.Validate if the designated constraints aren't met.
type DownResultValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e DownResultValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e DownResultValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e DownResultValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e DownResultValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e DownResultValidationError) ErrorName() string { return "DownResultValidationError" }
// Error satisfies the builtin error interface
func (e DownResultValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sDownResult.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = DownResultValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = DownResultValidationError{}
// Validate checks the field values on StatTaskRequest with the rules defined
// in the proto definition for this message. If any rules are violated, an
// error is returned.
func (m *StatTaskRequest) Validate() error {
if m == nil {
return nil
}
if utf8.RuneCountInString(m.GetUrl()) < 1 {
return StatTaskRequestValidationError{
field: "Url",
reason: "value length must be at least 1 runes",
}
}
if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return StatTaskRequestValidationError{
field: "UrlMeta",
reason: "embedded message failed validation",
cause: err,
}
}
}
// no validation rules for LocalOnly
return nil
}
// StatTaskRequestValidationError is the validation error returned by
// StatTaskRequest.Validate if the designated constraints aren't met.
type StatTaskRequestValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e StatTaskRequestValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e StatTaskRequestValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e StatTaskRequestValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e StatTaskRequestValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e StatTaskRequestValidationError) ErrorName() string { return "StatTaskRequestValidationError" }
// Error satisfies the builtin error interface
func (e StatTaskRequestValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sStatTaskRequest.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = StatTaskRequestValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = StatTaskRequestValidationError{}
// Validate checks the field values on ImportTaskRequest with the rules defined
// in the proto definition for this message. If any rules are violated, an
// error is returned.
func (m *ImportTaskRequest) Validate() error {
if m == nil {
return nil
}
if utf8.RuneCountInString(m.GetUrl()) < 1 {
return ImportTaskRequestValidationError{
field: "Url",
reason: "value length must be at least 1 runes",
}
}
if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return ImportTaskRequestValidationError{
field: "UrlMeta",
reason: "embedded message failed validation",
cause: err,
}
}
}
if utf8.RuneCountInString(m.GetPath()) < 1 {
return ImportTaskRequestValidationError{
field: "Path",
reason: "value length must be at least 1 runes",
}
}
// no validation rules for Type
return nil
}
// ImportTaskRequestValidationError is the validation error returned by
// ImportTaskRequest.Validate if the designated constraints aren't met.
type ImportTaskRequestValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e ImportTaskRequestValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e ImportTaskRequestValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e ImportTaskRequestValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e ImportTaskRequestValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e ImportTaskRequestValidationError) ErrorName() string {
return "ImportTaskRequestValidationError"
}
// Error satisfies the builtin error interface
func (e ImportTaskRequestValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sImportTaskRequest.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = ImportTaskRequestValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = ImportTaskRequestValidationError{}
// Validate checks the field values on ExportTaskRequest with the rules defined
// in the proto definition for this message. If any rules are violated, an
// error is returned.
func (m *ExportTaskRequest) Validate() error {
if m == nil {
return nil
}
if utf8.RuneCountInString(m.GetUrl()) < 1 {
return ExportTaskRequestValidationError{
field: "Url",
reason: "value length must be at least 1 runes",
}
}
if utf8.RuneCountInString(m.GetOutput()) < 1 {
return ExportTaskRequestValidationError{
field: "Output",
reason: "value length must be at least 1 runes",
}
}
if m.GetTimeout() < 0 {
return ExportTaskRequestValidationError{
field: "Timeout",
reason: "value must be greater than or equal to 0",
}
}
if m.GetLimit() < 0 {
return ExportTaskRequestValidationError{
field: "Limit",
reason: "value must be greater than or equal to 0",
}
}
if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return ExportTaskRequestValidationError{
field: "UrlMeta",
reason: "embedded message failed validation",
cause: err,
}
}
}
// no validation rules for Callsystem
// no validation rules for Uid
// no validation rules for Gid
// no validation rules for LocalOnly
return nil
}
// ExportTaskRequestValidationError is the validation error returned by
// ExportTaskRequest.Validate if the designated constraints aren't met.
type ExportTaskRequestValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e ExportTaskRequestValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e ExportTaskRequestValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e ExportTaskRequestValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e ExportTaskRequestValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e ExportTaskRequestValidationError) ErrorName() string {
return "ExportTaskRequestValidationError"
}
// Error satisfies the builtin error interface
func (e ExportTaskRequestValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sExportTaskRequest.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = ExportTaskRequestValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = ExportTaskRequestValidationError{}
// Validate checks the field values on DeleteTaskRequest with the rules defined
// in the proto definition for this message. If any rules are violated, an
// error is returned.
func (m *DeleteTaskRequest) Validate() error {
if m == nil {
return nil
}
if utf8.RuneCountInString(m.GetUrl()) < 1 {
return DeleteTaskRequestValidationError{
field: "Url",
reason: "value length must be at least 1 runes",
}
}
if v, ok := interface{}(m.GetUrlMeta()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return DeleteTaskRequestValidationError{
field: "UrlMeta",
reason: "embedded message failed validation",
cause: err,
}
}
}
return nil
}
// DeleteTaskRequestValidationError is the validation error returned by
// DeleteTaskRequest.Validate if the designated constraints aren't met.
type DeleteTaskRequestValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e DeleteTaskRequestValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e DeleteTaskRequestValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e DeleteTaskRequestValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e DeleteTaskRequestValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e DeleteTaskRequestValidationError) ErrorName() string {
return "DeleteTaskRequestValidationError"
}
// Error satisfies the builtin error interface
func (e DeleteTaskRequestValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sDeleteTaskRequest.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = DeleteTaskRequestValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = DeleteTaskRequestValidationError{}

View File

@ -1,132 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package dfdaemon;
import "pkg/rpc/base/base.proto";
import "google/protobuf/empty.proto";
import "validate/validate.proto";
option go_package = "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon";
message DownRequest{
// Identify one downloading, the framework will fill it automatically.
string uuid = 1 [(validate.rules).string.uuid = true];
// Download file from the url, not only for http.
string url = 2 [(validate.rules).string.uri = true];
// Pieces will be written to output path directly,
// at the same time, dfdaemon workspace also makes soft link to the output.
string output = 3 [(validate.rules).string.min_len = 1];
// Timeout duration.
uint64 timeout = 4 [(validate.rules).uint64.gte = 0];
// Rate limit in bytes per second.
double limit = 5 [(validate.rules).double.gte = 0];
// Disable back-to-source.
bool disable_back_source = 6;
// URL meta info.
base.UrlMeta url_meta = 7;
// Pattern has p2p/seed-peer/source, default is p2p.
string pattern = 8 [(validate.rules).string = {in:["p2p", "seed-peer", "source"], ignore_empty:true}];
// Call system.
string callsystem = 9;
// User id.
int64 uid = 10;
// Group id.
int64 gid = 11;
// Keep original offset, used for ranged request, only available for hard link, otherwise will failed.
bool keep_original_offset = 12;
}
message DownResult{
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
// Task has completed length.
uint64 completed_length = 4 [(validate.rules).uint64.gte = 0];
// Task has been completed.
bool done = 5;
}
message StatTaskRequest{
// Download url.
string url = 1 [(validate.rules).string.min_len = 1];
// URL meta info.
base.UrlMeta url_meta = 2;
// Check local cache only.
bool local_only = 3;
}
message ImportTaskRequest{
// Download url.
string url = 1 [(validate.rules).string.min_len = 1];
// URL meta info.
base.UrlMeta url_meta = 2;
// File to be imported.
string path = 3 [(validate.rules).string.min_len = 1];
// Task type.
base.TaskType type = 4;
}
message ExportTaskRequest{
// Download url.
string url = 1 [(validate.rules).string.min_len = 1];
// Output path of downloaded file.
string output = 2 [(validate.rules).string.min_len = 1];
// Timeout duration.
uint64 timeout = 3 [(validate.rules).uint64.gte = 0];
// Rate limit in bytes per second.
double limit = 4 [(validate.rules).double.gte = 0];
// URL meta info.
base.UrlMeta url_meta = 5;
// Call system.
string callsystem = 6;
// User id.
int64 uid = 7;
// Group id.
int64 gid = 8;
// Only export from local storage.
bool local_only = 9;
}
message DeleteTaskRequest{
// Download url.
string url = 1 [(validate.rules).string.min_len = 1];
// URL meta info.
base.UrlMeta url_meta = 2;
}
// Daemon Client RPC Service
service Daemon{
// Trigger client to download file
rpc Download(DownRequest) returns(stream DownResult);
// Get piece tasks from other peers
rpc GetPieceTasks(base.PieceTaskRequest)returns(base.PiecePacket);
// Check daemon health
rpc CheckHealth(google.protobuf.Empty)returns(google.protobuf.Empty);
// Sync piece tasks with other peers
rpc SyncPieceTasks(stream base.PieceTaskRequest)returns(stream base.PiecePacket);
// Check if given task exists in P2P cache system
rpc StatTask(StatTaskRequest) returns(google.protobuf.Empty);
// Import the given file into P2P cache system
rpc ImportTask(ImportTaskRequest) returns(google.protobuf.Empty);
// Export or download file from P2P cache system
rpc ExportTask(ExportTaskRequest) returns(google.protobuf.Empty);
// Delete file from P2P cache system
rpc DeleteTask(DeleteTaskRequest) returns(google.protobuf.Empty);
}

View File

@ -1,854 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: dfdaemon/dfdaemon.pb.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
base "d7y.io/dragonfly/v2/pkg/rpc/base"
dfdaemon "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// MockDaemonClient is a mock of DaemonClient interface.
type MockDaemonClient struct {
ctrl *gomock.Controller
recorder *MockDaemonClientMockRecorder
}
// MockDaemonClientMockRecorder is the mock recorder for MockDaemonClient.
type MockDaemonClientMockRecorder struct {
mock *MockDaemonClient
}
// NewMockDaemonClient creates a new mock instance.
func NewMockDaemonClient(ctrl *gomock.Controller) *MockDaemonClient {
mock := &MockDaemonClient{ctrl: ctrl}
mock.recorder = &MockDaemonClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDaemonClient) EXPECT() *MockDaemonClientMockRecorder {
return m.recorder
}
// CheckHealth mocks base method.
func (m *MockDaemonClient) CheckHealth(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "CheckHealth", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CheckHealth indicates an expected call of CheckHealth.
func (mr *MockDaemonClientMockRecorder) CheckHealth(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckHealth", reflect.TypeOf((*MockDaemonClient)(nil).CheckHealth), varargs...)
}
// DeleteTask mocks base method.
func (m *MockDaemonClient) DeleteTask(ctx context.Context, in *dfdaemon.DeleteTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "DeleteTask", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteTask indicates an expected call of DeleteTask.
func (mr *MockDaemonClientMockRecorder) DeleteTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTask", reflect.TypeOf((*MockDaemonClient)(nil).DeleteTask), varargs...)
}
// Download mocks base method.
func (m *MockDaemonClient) Download(ctx context.Context, in *dfdaemon.DownRequest, opts ...grpc.CallOption) (dfdaemon.Daemon_DownloadClient, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "Download", varargs...)
ret0, _ := ret[0].(dfdaemon.Daemon_DownloadClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Download indicates an expected call of Download.
func (mr *MockDaemonClientMockRecorder) Download(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockDaemonClient)(nil).Download), varargs...)
}
// ExportTask mocks base method.
func (m *MockDaemonClient) ExportTask(ctx context.Context, in *dfdaemon.ExportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ExportTask", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ExportTask indicates an expected call of ExportTask.
func (mr *MockDaemonClientMockRecorder) ExportTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportTask", reflect.TypeOf((*MockDaemonClient)(nil).ExportTask), varargs...)
}
// GetPieceTasks mocks base method.
func (m *MockDaemonClient) GetPieceTasks(ctx context.Context, in *base.PieceTaskRequest, opts ...grpc.CallOption) (*base.PiecePacket, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetPieceTasks", varargs...)
ret0, _ := ret[0].(*base.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieceTasks indicates an expected call of GetPieceTasks.
func (mr *MockDaemonClientMockRecorder) GetPieceTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockDaemonClient)(nil).GetPieceTasks), varargs...)
}
// ImportTask mocks base method.
func (m *MockDaemonClient) ImportTask(ctx context.Context, in *dfdaemon.ImportTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ImportTask", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImportTask indicates an expected call of ImportTask.
func (mr *MockDaemonClientMockRecorder) ImportTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportTask", reflect.TypeOf((*MockDaemonClient)(nil).ImportTask), varargs...)
}
// StatTask mocks base method.
func (m *MockDaemonClient) StatTask(ctx context.Context, in *dfdaemon.StatTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "StatTask", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StatTask indicates an expected call of StatTask.
func (mr *MockDaemonClientMockRecorder) StatTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockDaemonClient)(nil).StatTask), varargs...)
}
// SyncPieceTasks mocks base method.
func (m *MockDaemonClient) SyncPieceTasks(ctx context.Context, opts ...grpc.CallOption) (dfdaemon.Daemon_SyncPieceTasksClient, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "SyncPieceTasks", varargs...)
ret0, _ := ret[0].(dfdaemon.Daemon_SyncPieceTasksClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SyncPieceTasks indicates an expected call of SyncPieceTasks.
func (mr *MockDaemonClientMockRecorder) SyncPieceTasks(ctx interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockDaemonClient)(nil).SyncPieceTasks), varargs...)
}
// MockDaemon_DownloadClient is a mock of Daemon_DownloadClient interface.
type MockDaemon_DownloadClient struct {
ctrl *gomock.Controller
recorder *MockDaemon_DownloadClientMockRecorder
}
// MockDaemon_DownloadClientMockRecorder is the mock recorder for MockDaemon_DownloadClient.
type MockDaemon_DownloadClientMockRecorder struct {
mock *MockDaemon_DownloadClient
}
// NewMockDaemon_DownloadClient creates a new mock instance.
func NewMockDaemon_DownloadClient(ctrl *gomock.Controller) *MockDaemon_DownloadClient {
mock := &MockDaemon_DownloadClient{ctrl: ctrl}
mock.recorder = &MockDaemon_DownloadClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDaemon_DownloadClient) EXPECT() *MockDaemon_DownloadClientMockRecorder {
return m.recorder
}
// CloseSend mocks base method.
func (m *MockDaemon_DownloadClient) CloseSend() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CloseSend")
ret0, _ := ret[0].(error)
return ret0
}
// CloseSend indicates an expected call of CloseSend.
func (mr *MockDaemon_DownloadClientMockRecorder) CloseSend() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).CloseSend))
}
// Context mocks base method.
func (m *MockDaemon_DownloadClient) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockDaemon_DownloadClientMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Context))
}
// Header mocks base method.
func (m *MockDaemon_DownloadClient) Header() (metadata.MD, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Header")
ret0, _ := ret[0].(metadata.MD)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Header indicates an expected call of Header.
func (mr *MockDaemon_DownloadClientMockRecorder) Header() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Header))
}
// Recv mocks base method.
func (m *MockDaemon_DownloadClient) Recv() (*dfdaemon.DownResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*dfdaemon.DownResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockDaemon_DownloadClientMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockDaemon_DownloadClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockDaemon_DownloadClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).RecvMsg), m)
}
// SendMsg mocks base method.
func (m_2 *MockDaemon_DownloadClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockDaemon_DownloadClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).SendMsg), m)
}
// Trailer mocks base method.
func (m *MockDaemon_DownloadClient) Trailer() metadata.MD {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Trailer")
ret0, _ := ret[0].(metadata.MD)
return ret0
}
// Trailer indicates an expected call of Trailer.
func (mr *MockDaemon_DownloadClientMockRecorder) Trailer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockDaemon_DownloadClient)(nil).Trailer))
}
// MockDaemon_SyncPieceTasksClient is a mock of Daemon_SyncPieceTasksClient interface.
type MockDaemon_SyncPieceTasksClient struct {
ctrl *gomock.Controller
recorder *MockDaemon_SyncPieceTasksClientMockRecorder
}
// MockDaemon_SyncPieceTasksClientMockRecorder is the mock recorder for MockDaemon_SyncPieceTasksClient.
type MockDaemon_SyncPieceTasksClientMockRecorder struct {
mock *MockDaemon_SyncPieceTasksClient
}
// NewMockDaemon_SyncPieceTasksClient creates a new mock instance.
func NewMockDaemon_SyncPieceTasksClient(ctrl *gomock.Controller) *MockDaemon_SyncPieceTasksClient {
mock := &MockDaemon_SyncPieceTasksClient{ctrl: ctrl}
mock.recorder = &MockDaemon_SyncPieceTasksClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDaemon_SyncPieceTasksClient) EXPECT() *MockDaemon_SyncPieceTasksClientMockRecorder {
return m.recorder
}
// CloseSend mocks base method.
func (m *MockDaemon_SyncPieceTasksClient) CloseSend() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CloseSend")
ret0, _ := ret[0].(error)
return ret0
}
// CloseSend indicates an expected call of CloseSend.
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) CloseSend() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).CloseSend))
}
// Context mocks base method.
func (m *MockDaemon_SyncPieceTasksClient) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Context))
}
// Header mocks base method.
func (m *MockDaemon_SyncPieceTasksClient) Header() (metadata.MD, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Header")
ret0, _ := ret[0].(metadata.MD)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Header indicates an expected call of Header.
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Header() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Header))
}
// Recv mocks base method.
func (m *MockDaemon_SyncPieceTasksClient) Recv() (*base.PiecePacket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*base.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockDaemon_SyncPieceTasksClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockDaemon_SyncPieceTasksClient) Send(arg0 *base.PieceTaskRequest) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Send), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockDaemon_SyncPieceTasksClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).SendMsg), m)
}
// Trailer mocks base method.
func (m *MockDaemon_SyncPieceTasksClient) Trailer() metadata.MD {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Trailer")
ret0, _ := ret[0].(metadata.MD)
return ret0
}
// Trailer indicates an expected call of Trailer.
func (mr *MockDaemon_SyncPieceTasksClientMockRecorder) Trailer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockDaemon_SyncPieceTasksClient)(nil).Trailer))
}
// MockDaemonServer is a mock of DaemonServer interface.
type MockDaemonServer struct {
ctrl *gomock.Controller
recorder *MockDaemonServerMockRecorder
}
// MockDaemonServerMockRecorder is the mock recorder for MockDaemonServer.
type MockDaemonServerMockRecorder struct {
mock *MockDaemonServer
}
// NewMockDaemonServer creates a new mock instance.
func NewMockDaemonServer(ctrl *gomock.Controller) *MockDaemonServer {
mock := &MockDaemonServer{ctrl: ctrl}
mock.recorder = &MockDaemonServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDaemonServer) EXPECT() *MockDaemonServerMockRecorder {
return m.recorder
}
// CheckHealth mocks base method.
func (m *MockDaemonServer) CheckHealth(arg0 context.Context, arg1 *emptypb.Empty) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CheckHealth", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CheckHealth indicates an expected call of CheckHealth.
func (mr *MockDaemonServerMockRecorder) CheckHealth(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckHealth", reflect.TypeOf((*MockDaemonServer)(nil).CheckHealth), arg0, arg1)
}
// DeleteTask mocks base method.
func (m *MockDaemonServer) DeleteTask(arg0 context.Context, arg1 *dfdaemon.DeleteTaskRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DeleteTask indicates an expected call of DeleteTask.
func (mr *MockDaemonServerMockRecorder) DeleteTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTask", reflect.TypeOf((*MockDaemonServer)(nil).DeleteTask), arg0, arg1)
}
// Download mocks base method.
func (m *MockDaemonServer) Download(arg0 *dfdaemon.DownRequest, arg1 dfdaemon.Daemon_DownloadServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Download", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Download indicates an expected call of Download.
func (mr *MockDaemonServerMockRecorder) Download(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Download", reflect.TypeOf((*MockDaemonServer)(nil).Download), arg0, arg1)
}
// ExportTask mocks base method.
func (m *MockDaemonServer) ExportTask(arg0 context.Context, arg1 *dfdaemon.ExportTaskRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExportTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ExportTask indicates an expected call of ExportTask.
func (mr *MockDaemonServerMockRecorder) ExportTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportTask", reflect.TypeOf((*MockDaemonServer)(nil).ExportTask), arg0, arg1)
}
// GetPieceTasks mocks base method.
func (m *MockDaemonServer) GetPieceTasks(arg0 context.Context, arg1 *base.PieceTaskRequest) (*base.PiecePacket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1)
ret0, _ := ret[0].(*base.PiecePacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieceTasks indicates an expected call of GetPieceTasks.
func (mr *MockDaemonServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieceTasks", reflect.TypeOf((*MockDaemonServer)(nil).GetPieceTasks), arg0, arg1)
}
// ImportTask mocks base method.
func (m *MockDaemonServer) ImportTask(arg0 context.Context, arg1 *dfdaemon.ImportTaskRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImportTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ImportTask indicates an expected call of ImportTask.
func (mr *MockDaemonServerMockRecorder) ImportTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportTask", reflect.TypeOf((*MockDaemonServer)(nil).ImportTask), arg0, arg1)
}
// StatTask mocks base method.
func (m *MockDaemonServer) StatTask(arg0 context.Context, arg1 *dfdaemon.StatTaskRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StatTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StatTask indicates an expected call of StatTask.
func (mr *MockDaemonServerMockRecorder) StatTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockDaemonServer)(nil).StatTask), arg0, arg1)
}
// SyncPieceTasks mocks base method.
func (m *MockDaemonServer) SyncPieceTasks(arg0 dfdaemon.Daemon_SyncPieceTasksServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SyncPieceTasks", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SyncPieceTasks indicates an expected call of SyncPieceTasks.
func (mr *MockDaemonServerMockRecorder) SyncPieceTasks(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieceTasks", reflect.TypeOf((*MockDaemonServer)(nil).SyncPieceTasks), arg0)
}
// MockDaemon_DownloadServer is a mock of Daemon_DownloadServer interface.
type MockDaemon_DownloadServer struct {
ctrl *gomock.Controller
recorder *MockDaemon_DownloadServerMockRecorder
}
// MockDaemon_DownloadServerMockRecorder is the mock recorder for MockDaemon_DownloadServer.
type MockDaemon_DownloadServerMockRecorder struct {
mock *MockDaemon_DownloadServer
}
// NewMockDaemon_DownloadServer creates a new mock instance.
func NewMockDaemon_DownloadServer(ctrl *gomock.Controller) *MockDaemon_DownloadServer {
mock := &MockDaemon_DownloadServer{ctrl: ctrl}
mock.recorder = &MockDaemon_DownloadServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDaemon_DownloadServer) EXPECT() *MockDaemon_DownloadServerMockRecorder {
return m.recorder
}
// Context mocks base method.
func (m *MockDaemon_DownloadServer) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockDaemon_DownloadServerMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).Context))
}
// RecvMsg mocks base method.
func (m_2 *MockDaemon_DownloadServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockDaemon_DownloadServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockDaemon_DownloadServer) Send(arg0 *dfdaemon.DownResult) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockDaemon_DownloadServerMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).Send), arg0)
}
// SendHeader mocks base method.
func (m *MockDaemon_DownloadServer) SendHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockDaemon_DownloadServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockDaemon_DownloadServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockDaemon_DownloadServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SendMsg), m)
}
// SetHeader mocks base method.
func (m *MockDaemon_DownloadServer) SetHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockDaemon_DownloadServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SetHeader), arg0)
}
// SetTrailer mocks base method.
func (m *MockDaemon_DownloadServer) SetTrailer(arg0 metadata.MD) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetTrailer", arg0)
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockDaemon_DownloadServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockDaemon_DownloadServer)(nil).SetTrailer), arg0)
}
// MockDaemon_SyncPieceTasksServer is a mock of Daemon_SyncPieceTasksServer interface.
type MockDaemon_SyncPieceTasksServer struct {
ctrl *gomock.Controller
recorder *MockDaemon_SyncPieceTasksServerMockRecorder
}
// MockDaemon_SyncPieceTasksServerMockRecorder is the mock recorder for MockDaemon_SyncPieceTasksServer.
type MockDaemon_SyncPieceTasksServerMockRecorder struct {
mock *MockDaemon_SyncPieceTasksServer
}
// NewMockDaemon_SyncPieceTasksServer creates a new mock instance.
func NewMockDaemon_SyncPieceTasksServer(ctrl *gomock.Controller) *MockDaemon_SyncPieceTasksServer {
mock := &MockDaemon_SyncPieceTasksServer{ctrl: ctrl}
mock.recorder = &MockDaemon_SyncPieceTasksServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDaemon_SyncPieceTasksServer) EXPECT() *MockDaemon_SyncPieceTasksServerMockRecorder {
return m.recorder
}
// Context mocks base method.
func (m *MockDaemon_SyncPieceTasksServer) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).Context))
}
// Recv mocks base method.
func (m *MockDaemon_SyncPieceTasksServer) Recv() (*base.PieceTaskRequest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*base.PieceTaskRequest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockDaemon_SyncPieceTasksServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockDaemon_SyncPieceTasksServer) Send(arg0 *base.PiecePacket) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).Send), arg0)
}
// SendHeader mocks base method.
func (m *MockDaemon_SyncPieceTasksServer) SendHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockDaemon_SyncPieceTasksServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SendMsg), m)
}
// SetHeader mocks base method.
func (m *MockDaemon_SyncPieceTasksServer) SetHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SetHeader), arg0)
}
// SetTrailer mocks base method.
func (m *MockDaemon_SyncPieceTasksServer) SetTrailer(arg0 metadata.MD) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetTrailer", arg0)
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockDaemon_SyncPieceTasksServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockDaemon_SyncPieceTasksServer)(nil).SetTrailer), arg0)
}

View File

@ -8,8 +8,8 @@ import (
context "context" context "context"
reflect "reflect" reflect "reflect"
base "d7y.io/dragonfly/v2/pkg/rpc/base" v1 "d7y.io/api/pkg/apis/common/v1"
dfdaemon "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" v10 "d7y.io/api/pkg/apis/dfdaemon/v1"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
) )
@ -51,7 +51,7 @@ func (mr *MockDaemonServerMockRecorder) CheckHealth(arg0 interface{}) *gomock.Ca
} }
// DeleteTask mocks base method. // DeleteTask mocks base method.
func (m *MockDaemonServer) DeleteTask(arg0 context.Context, arg1 *dfdaemon.DeleteTaskRequest) error { func (m *MockDaemonServer) DeleteTask(arg0 context.Context, arg1 *v10.DeleteTaskRequest) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteTask", arg0, arg1) ret := m.ctrl.Call(m, "DeleteTask", arg0, arg1)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
@ -65,7 +65,7 @@ func (mr *MockDaemonServerMockRecorder) DeleteTask(arg0, arg1 interface{}) *gomo
} }
// Download mocks base method. // Download mocks base method.
func (m *MockDaemonServer) Download(arg0 context.Context, arg1 *dfdaemon.DownRequest, arg2 chan<- *dfdaemon.DownResult) error { func (m *MockDaemonServer) Download(arg0 context.Context, arg1 *v10.DownRequest, arg2 chan<- *v10.DownResult) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Download", arg0, arg1, arg2) ret := m.ctrl.Call(m, "Download", arg0, arg1, arg2)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
@ -79,7 +79,7 @@ func (mr *MockDaemonServerMockRecorder) Download(arg0, arg1, arg2 interface{}) *
} }
// ExportTask mocks base method. // ExportTask mocks base method.
func (m *MockDaemonServer) ExportTask(arg0 context.Context, arg1 *dfdaemon.ExportTaskRequest) error { func (m *MockDaemonServer) ExportTask(arg0 context.Context, arg1 *v10.ExportTaskRequest) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ExportTask", arg0, arg1) ret := m.ctrl.Call(m, "ExportTask", arg0, arg1)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
@ -93,10 +93,10 @@ func (mr *MockDaemonServerMockRecorder) ExportTask(arg0, arg1 interface{}) *gomo
} }
// GetPieceTasks mocks base method. // GetPieceTasks mocks base method.
func (m *MockDaemonServer) GetPieceTasks(arg0 context.Context, arg1 *base.PieceTaskRequest) (*base.PiecePacket, error) { func (m *MockDaemonServer) GetPieceTasks(arg0 context.Context, arg1 *v1.PieceTaskRequest) (*v1.PiecePacket, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1) ret := m.ctrl.Call(m, "GetPieceTasks", arg0, arg1)
ret0, _ := ret[0].(*base.PiecePacket) ret0, _ := ret[0].(*v1.PiecePacket)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -108,7 +108,7 @@ func (mr *MockDaemonServerMockRecorder) GetPieceTasks(arg0, arg1 interface{}) *g
} }
// ImportTask mocks base method. // ImportTask mocks base method.
func (m *MockDaemonServer) ImportTask(arg0 context.Context, arg1 *dfdaemon.ImportTaskRequest) error { func (m *MockDaemonServer) ImportTask(arg0 context.Context, arg1 *v10.ImportTaskRequest) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ImportTask", arg0, arg1) ret := m.ctrl.Call(m, "ImportTask", arg0, arg1)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
@ -122,7 +122,7 @@ func (mr *MockDaemonServerMockRecorder) ImportTask(arg0, arg1 interface{}) *gomo
} }
// StatTask mocks base method. // StatTask mocks base method.
func (m *MockDaemonServer) StatTask(arg0 context.Context, arg1 *dfdaemon.StatTaskRequest) error { func (m *MockDaemonServer) StatTask(arg0 context.Context, arg1 *v10.StatTaskRequest) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StatTask", arg0, arg1) ret := m.ctrl.Call(m, "StatTask", arg0, arg1)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
@ -136,7 +136,7 @@ func (mr *MockDaemonServerMockRecorder) StatTask(arg0, arg1 interface{}) *gomock
} }
// SyncPieceTasks mocks base method. // SyncPieceTasks mocks base method.
func (m *MockDaemonServer) SyncPieceTasks(arg0 dfdaemon.Daemon_SyncPieceTasksServer) error { func (m *MockDaemonServer) SyncPieceTasks(arg0 v10.Daemon_SyncPieceTasksServer) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SyncPieceTasks", arg0) ret := m.ctrl.Call(m, "SyncPieceTasks", arg0)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)

View File

@ -26,46 +26,47 @@ import (
"google.golang.org/grpc/peer" "google.golang.org/grpc/peer"
"google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/emptypb"
commonv1 "d7y.io/api/pkg/apis/common/v1"
dfdaemonv1 "d7y.io/api/pkg/apis/dfdaemon/v1"
"d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/internal/dferrors"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc" "d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
"d7y.io/dragonfly/v2/pkg/safe" "d7y.io/dragonfly/v2/pkg/safe"
) )
// DaemonServer refer to dfdaemon.DaemonServer // DaemonServer refer to dfdaemonv1.DaemonServer
type DaemonServer interface { type DaemonServer interface {
// Download triggers client to download file // Download triggers client to download file
Download(context.Context, *dfdaemon.DownRequest, chan<- *dfdaemon.DownResult) error Download(context.Context, *dfdaemonv1.DownRequest, chan<- *dfdaemonv1.DownResult) error
// GetPieceTasks get piece tasks from other peers // GetPieceTasks get piece tasks from other peers
GetPieceTasks(context.Context, *base.PieceTaskRequest) (*base.PiecePacket, error) GetPieceTasks(context.Context, *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error)
// SyncPieceTasks sync piece tasks info with other peers // SyncPieceTasks sync piece tasks info with other peers
SyncPieceTasks(dfdaemon.Daemon_SyncPieceTasksServer) error SyncPieceTasks(dfdaemonv1.Daemon_SyncPieceTasksServer) error
// CheckHealth check daemon health // CheckHealth check daemon health
CheckHealth(context.Context) error CheckHealth(context.Context) error
// Check if the given task exists in P2P cache system // Check if the given task exists in P2P cache system
StatTask(context.Context, *dfdaemon.StatTaskRequest) error StatTask(context.Context, *dfdaemonv1.StatTaskRequest) error
// Import the given file into P2P cache system // Import the given file into P2P cache system
ImportTask(context.Context, *dfdaemon.ImportTaskRequest) error ImportTask(context.Context, *dfdaemonv1.ImportTaskRequest) error
// Export or download file from P2P cache system // Export or download file from P2P cache system
ExportTask(context.Context, *dfdaemon.ExportTaskRequest) error ExportTask(context.Context, *dfdaemonv1.ExportTaskRequest) error
// Delete file from P2P cache system // Delete file from P2P cache system
DeleteTask(context.Context, *dfdaemon.DeleteTaskRequest) error DeleteTask(context.Context, *dfdaemonv1.DeleteTaskRequest) error
} }
type proxy struct { type proxy struct {
server DaemonServer server DaemonServer
dfdaemon.UnimplementedDaemonServer dfdaemonv1.UnimplementedDaemonServer
} }
func New(daemonServer DaemonServer, opts ...grpc.ServerOption) *grpc.Server { func New(daemonServer DaemonServer, opts ...grpc.ServerOption) *grpc.Server {
grpcServer := grpc.NewServer(append(rpc.DefaultServerOptions(), opts...)...) grpcServer := grpc.NewServer(append(rpc.DefaultServerOptions(), opts...)...)
dfdaemon.RegisterDaemonServer(grpcServer, &proxy{server: daemonServer}) dfdaemonv1.RegisterDaemonServer(grpcServer, &proxy{server: daemonServer})
return grpcServer return grpcServer
} }
func (p *proxy) Download(req *dfdaemon.DownRequest, stream dfdaemon.Daemon_DownloadServer) (err error) { func (p *proxy) Download(req *dfdaemonv1.DownRequest, stream dfdaemonv1.Daemon_DownloadServer) (err error) {
ctx, cancel := context.WithCancel(stream.Context()) ctx, cancel := context.WithCancel(stream.Context())
defer cancel() defer cancel()
@ -76,7 +77,7 @@ func (p *proxy) Download(req *dfdaemon.DownRequest, stream dfdaemon.Daemon_Downl
logger.Infof("trigger download for url: %s, from: %s, uuid: %s", req.Url, peerAddr, req.Uuid) logger.Infof("trigger download for url: %s, from: %s, uuid: %s", req.Url, peerAddr, req.Uuid)
errChan := make(chan error, 10) errChan := make(chan error, 10)
drc := make(chan *dfdaemon.DownResult, 4) drc := make(chan *dfdaemonv1.DownResult, 4)
once := new(sync.Once) once := new(sync.Once)
closeDrc := func() { closeDrc := func() {
@ -97,11 +98,11 @@ func (p *proxy) Download(req *dfdaemon.DownRequest, stream dfdaemon.Daemon_Downl
return return
} }
func (p *proxy) GetPieceTasks(ctx context.Context, ptr *base.PieceTaskRequest) (*base.PiecePacket, error) { func (p *proxy) GetPieceTasks(ctx context.Context, ptr *commonv1.PieceTaskRequest) (*commonv1.PiecePacket, error) {
return p.server.GetPieceTasks(ctx, ptr) return p.server.GetPieceTasks(ctx, ptr)
} }
func (p *proxy) SyncPieceTasks(sync dfdaemon.Daemon_SyncPieceTasksServer) error { func (p *proxy) SyncPieceTasks(sync dfdaemonv1.Daemon_SyncPieceTasksServer) error {
return p.server.SyncPieceTasks(sync) return p.server.SyncPieceTasks(sync)
} }
@ -109,23 +110,23 @@ func (p *proxy) CheckHealth(ctx context.Context, req *emptypb.Empty) (*emptypb.E
return new(emptypb.Empty), p.server.CheckHealth(ctx) return new(emptypb.Empty), p.server.CheckHealth(ctx)
} }
func (p *proxy) StatTask(ctx context.Context, req *dfdaemon.StatTaskRequest) (*emptypb.Empty, error) { func (p *proxy) StatTask(ctx context.Context, req *dfdaemonv1.StatTaskRequest) (*emptypb.Empty, error) {
return new(emptypb.Empty), p.server.StatTask(ctx, req) return new(emptypb.Empty), p.server.StatTask(ctx, req)
} }
func (p *proxy) ImportTask(ctx context.Context, req *dfdaemon.ImportTaskRequest) (*emptypb.Empty, error) { func (p *proxy) ImportTask(ctx context.Context, req *dfdaemonv1.ImportTaskRequest) (*emptypb.Empty, error) {
return new(emptypb.Empty), p.server.ImportTask(ctx, req) return new(emptypb.Empty), p.server.ImportTask(ctx, req)
} }
func (p *proxy) ExportTask(ctx context.Context, req *dfdaemon.ExportTaskRequest) (*emptypb.Empty, error) { func (p *proxy) ExportTask(ctx context.Context, req *dfdaemonv1.ExportTaskRequest) (*emptypb.Empty, error) {
return new(emptypb.Empty), p.server.ExportTask(ctx, req) return new(emptypb.Empty), p.server.ExportTask(ctx, req)
} }
func (p *proxy) DeleteTask(ctx context.Context, req *dfdaemon.DeleteTaskRequest) (*emptypb.Empty, error) { func (p *proxy) DeleteTask(ctx context.Context, req *dfdaemonv1.DeleteTaskRequest) (*emptypb.Empty, error) {
return new(emptypb.Empty), p.server.DeleteTask(ctx, req) return new(emptypb.Empty), p.server.DeleteTask(ctx, req)
} }
func send(drc chan *dfdaemon.DownResult, closeDrc func(), stream dfdaemon.Daemon_DownloadServer, errChan chan error) { func send(drc chan *dfdaemonv1.DownResult, closeDrc func(), stream dfdaemonv1.Daemon_DownloadServer, errChan chan error) {
err := safe.Call(func() { err := safe.Call(func() {
defer closeDrc() defer closeDrc()
@ -148,7 +149,7 @@ func send(drc chan *dfdaemon.DownResult, closeDrc func(), stream dfdaemon.Daemon
} }
} }
func call(ctx context.Context, drc chan *dfdaemon.DownResult, p *proxy, req *dfdaemon.DownRequest, errChan chan error) { func call(ctx context.Context, drc chan *dfdaemonv1.DownResult, p *proxy, req *dfdaemonv1.DownRequest, errChan chan error) {
err := safe.Call(func() { err := safe.Call(func() {
if err := p.server.Download(ctx, req, drc); err != nil { if err := p.server.Download(ctx, req, drc); err != nil {
errChan <- err errChan <- err

View File

@ -1,178 +0,0 @@
//
// Copyright 2022 The Dragonfly Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.19.4
// source: pkg/rpc/errordetails/error_details.proto
package errordetails
import (
base "d7y.io/dragonfly/v2/pkg/rpc/base"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SourceError struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Temporary bool `protobuf:"varint,1,opt,name=temporary,proto3" json:"temporary,omitempty"`
// source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header
Metadata *base.ExtendAttribute `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"`
}
func (x *SourceError) Reset() {
*x = SourceError{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_rpc_errordetails_error_details_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SourceError) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SourceError) ProtoMessage() {}
func (x *SourceError) ProtoReflect() protoreflect.Message {
mi := &file_pkg_rpc_errordetails_error_details_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SourceError.ProtoReflect.Descriptor instead.
func (*SourceError) Descriptor() ([]byte, []int) {
return file_pkg_rpc_errordetails_error_details_proto_rawDescGZIP(), []int{0}
}
func (x *SourceError) GetTemporary() bool {
if x != nil {
return x.Temporary
}
return false
}
func (x *SourceError) GetMetadata() *base.ExtendAttribute {
if x != nil {
return x.Metadata
}
return nil
}
var File_pkg_rpc_errordetails_error_details_proto protoreflect.FileDescriptor
var file_pkg_rpc_errordetails_error_details_proto_rawDesc = []byte{
0x0a, 0x28, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64,
0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74,
0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x65, 0x72, 0x72, 0x6f,
0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x17, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70,
0x63, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x22, 0x5e, 0x0a, 0x0b, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72,
0x12, 0x1c, 0x0a, 0x09, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x12, 0x31,
0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x15, 0x2e, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74,
0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0x42, 0x2a, 0x5a, 0x28, 0x64, 0x37, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x64, 0x72, 0x61, 0x67,
0x6f, 0x6e, 0x66, 0x6c, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63,
0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_pkg_rpc_errordetails_error_details_proto_rawDescOnce sync.Once
file_pkg_rpc_errordetails_error_details_proto_rawDescData = file_pkg_rpc_errordetails_error_details_proto_rawDesc
)
func file_pkg_rpc_errordetails_error_details_proto_rawDescGZIP() []byte {
file_pkg_rpc_errordetails_error_details_proto_rawDescOnce.Do(func() {
file_pkg_rpc_errordetails_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_rpc_errordetails_error_details_proto_rawDescData)
})
return file_pkg_rpc_errordetails_error_details_proto_rawDescData
}
var file_pkg_rpc_errordetails_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_pkg_rpc_errordetails_error_details_proto_goTypes = []interface{}{
(*SourceError)(nil), // 0: errordetails.SourceError
(*base.ExtendAttribute)(nil), // 1: base.ExtendAttribute
}
var file_pkg_rpc_errordetails_error_details_proto_depIdxs = []int32{
1, // 0: errordetails.SourceError.metadata:type_name -> base.ExtendAttribute
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_pkg_rpc_errordetails_error_details_proto_init() }
func file_pkg_rpc_errordetails_error_details_proto_init() {
if File_pkg_rpc_errordetails_error_details_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_pkg_rpc_errordetails_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SourceError); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkg_rpc_errordetails_error_details_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_pkg_rpc_errordetails_error_details_proto_goTypes,
DependencyIndexes: file_pkg_rpc_errordetails_error_details_proto_depIdxs,
MessageInfos: file_pkg_rpc_errordetails_error_details_proto_msgTypes,
}.Build()
File_pkg_rpc_errordetails_error_details_proto = out.File
file_pkg_rpc_errordetails_error_details_proto_rawDesc = nil
file_pkg_rpc_errordetails_error_details_proto_goTypes = nil
file_pkg_rpc_errordetails_error_details_proto_depIdxs = nil
}

View File

@ -1,111 +0,0 @@
// Code generated by protoc-gen-validate. DO NOT EDIT.
// source: pkg/rpc/errordetails/error_details.proto
package errordetails
import (
"bytes"
"errors"
"fmt"
"net"
"net/mail"
"net/url"
"regexp"
"strings"
"time"
"unicode/utf8"
"google.golang.org/protobuf/types/known/anypb"
)
// ensure the imports are used
var (
_ = bytes.MinRead
_ = errors.New("")
_ = fmt.Print
_ = utf8.UTFMax
_ = (*regexp.Regexp)(nil)
_ = (*strings.Reader)(nil)
_ = net.IPv4len
_ = time.Duration(0)
_ = (*url.URL)(nil)
_ = (*mail.Address)(nil)
_ = anypb.Any{}
)
// Validate checks the field values on SourceError with the rules defined in
// the proto definition for this message. If any rules are violated, an error
// is returned.
func (m *SourceError) Validate() error {
if m == nil {
return nil
}
// no validation rules for Temporary
if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok {
if err := v.Validate(); err != nil {
return SourceErrorValidationError{
field: "Metadata",
reason: "embedded message failed validation",
cause: err,
}
}
}
return nil
}
// SourceErrorValidationError is the validation error returned by
// SourceError.Validate if the designated constraints aren't met.
type SourceErrorValidationError struct {
field string
reason string
cause error
key bool
}
// Field function returns field value.
func (e SourceErrorValidationError) Field() string { return e.field }
// Reason function returns reason value.
func (e SourceErrorValidationError) Reason() string { return e.reason }
// Cause function returns cause value.
func (e SourceErrorValidationError) Cause() error { return e.cause }
// Key function returns key value.
func (e SourceErrorValidationError) Key() bool { return e.key }
// ErrorName returns error name.
func (e SourceErrorValidationError) ErrorName() string { return "SourceErrorValidationError" }
// Error satisfies the builtin error interface
func (e SourceErrorValidationError) Error() string {
cause := ""
if e.cause != nil {
cause = fmt.Sprintf(" | caused by: %v", e.cause)
}
key := ""
if e.key {
key = "key for "
}
return fmt.Sprintf(
"invalid %sSourceError.%s: %s%s",
key,
e.field,
e.reason,
cause)
}
var _ error = SourceErrorValidationError{}
var _ interface {
Field() string
Reason() string
Key() bool
Cause() error
ErrorName() string
} = SourceErrorValidationError{}

View File

@ -1,30 +0,0 @@
/*
* Copyright 2022 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package errordetails;
import "pkg/rpc/base/base.proto";
option go_package = "d7y.io/dragonfly/v2/pkg/rpc/errordetails";
message SourceError {
bool temporary = 1;
// source response metadata, eg: HTTP Status Code, HTTP Status, HTTP Header
base.ExtendAttribute metadata = 2;
}

View File

@ -32,10 +32,11 @@ import (
"google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
managerv1 "d7y.io/api/pkg/apis/manager/v1"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
"d7y.io/dragonfly/v2/pkg/reachable" "d7y.io/dragonfly/v2/pkg/reachable"
"d7y.io/dragonfly/v2/pkg/rpc/manager"
) )
const ( const (
@ -49,25 +50,25 @@ const (
// Client is the interface for grpc client. // Client is the interface for grpc client.
type Client interface { type Client interface {
// Update Seed peer configuration. // Update Seed peer configuration.
UpdateSeedPeer(*manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) UpdateSeedPeer(*managerv1.UpdateSeedPeerRequest) (*managerv1.SeedPeer, error)
// Get Scheduler and Scheduler cluster configuration. // Get Scheduler and Scheduler cluster configuration.
GetScheduler(*manager.GetSchedulerRequest) (*manager.Scheduler, error) GetScheduler(*managerv1.GetSchedulerRequest) (*managerv1.Scheduler, error)
// Update scheduler configuration. // Update scheduler configuration.
UpdateScheduler(*manager.UpdateSchedulerRequest) (*manager.Scheduler, error) UpdateScheduler(*managerv1.UpdateSchedulerRequest) (*managerv1.Scheduler, error)
// List acitve schedulers configuration. // List acitve schedulers configuration.
ListSchedulers(*manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) ListSchedulers(*managerv1.ListSchedulersRequest) (*managerv1.ListSchedulersResponse, error)
// Get object storage configuration. // Get object storage configuration.
GetObjectStorage(*manager.GetObjectStorageRequest) (*manager.ObjectStorage, error) GetObjectStorage(*managerv1.GetObjectStorageRequest) (*managerv1.ObjectStorage, error)
// List buckets configuration. // List buckets configuration.
ListBuckets(*manager.ListBucketsRequest) (*manager.ListBucketsResponse, error) ListBuckets(*managerv1.ListBucketsRequest) (*managerv1.ListBucketsResponse, error)
// KeepAlive with manager. // KeepAlive with manager.
KeepAlive(time.Duration, *manager.KeepAliveRequest) KeepAlive(time.Duration, *managerv1.KeepAliveRequest)
// Close client connect. // Close client connect.
Close() error Close() error
@ -75,7 +76,7 @@ type Client interface {
// client provides manager grpc function. // client provides manager grpc function.
type client struct { type client struct {
manager.ManagerClient managerv1.ManagerClient
conn *grpc.ClientConn conn *grpc.ClientConn
} }
@ -103,7 +104,7 @@ func New(target string) (Client, error) {
} }
return &client{ return &client{
ManagerClient: manager.NewManagerClient(conn), ManagerClient: managerv1.NewManagerClient(conn),
conn: conn, conn: conn,
}, nil }, nil
} }
@ -123,7 +124,7 @@ func NewWithAddrs(netAddrs []dfnet.NetAddr) (Client, error) {
} }
// Update SeedPeer configuration. // Update SeedPeer configuration.
func (c *client) UpdateSeedPeer(req *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) { func (c *client) UpdateSeedPeer(req *managerv1.UpdateSeedPeerRequest) (*managerv1.SeedPeer, error) {
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
defer cancel() defer cancel()
@ -131,7 +132,7 @@ func (c *client) UpdateSeedPeer(req *manager.UpdateSeedPeerRequest) (*manager.Se
} }
// Get Scheduler and Scheduler cluster configuration. // Get Scheduler and Scheduler cluster configuration.
func (c *client) GetScheduler(req *manager.GetSchedulerRequest) (*manager.Scheduler, error) { func (c *client) GetScheduler(req *managerv1.GetSchedulerRequest) (*managerv1.Scheduler, error) {
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
defer cancel() defer cancel()
@ -139,7 +140,7 @@ func (c *client) GetScheduler(req *manager.GetSchedulerRequest) (*manager.Schedu
} }
// Update scheduler configuration. // Update scheduler configuration.
func (c *client) UpdateScheduler(req *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) { func (c *client) UpdateScheduler(req *managerv1.UpdateSchedulerRequest) (*managerv1.Scheduler, error) {
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
defer cancel() defer cancel()
@ -147,7 +148,7 @@ func (c *client) UpdateScheduler(req *manager.UpdateSchedulerRequest) (*manager.
} }
// List acitve schedulers configuration. // List acitve schedulers configuration.
func (c *client) ListSchedulers(req *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) { func (c *client) ListSchedulers(req *managerv1.ListSchedulersRequest) (*managerv1.ListSchedulersResponse, error) {
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
defer cancel() defer cancel()
@ -155,7 +156,7 @@ func (c *client) ListSchedulers(req *manager.ListSchedulersRequest) (*manager.Li
} }
// Get object storage configuration. // Get object storage configuration.
func (c *client) GetObjectStorage(req *manager.GetObjectStorageRequest) (*manager.ObjectStorage, error) { func (c *client) GetObjectStorage(req *managerv1.GetObjectStorageRequest) (*managerv1.ObjectStorage, error) {
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
defer cancel() defer cancel()
@ -163,7 +164,7 @@ func (c *client) GetObjectStorage(req *manager.GetObjectStorageRequest) (*manage
} }
// List buckets configuration. // List buckets configuration.
func (c *client) ListBuckets(req *manager.ListBucketsRequest) (*manager.ListBucketsResponse, error) { func (c *client) ListBuckets(req *managerv1.ListBucketsRequest) (*managerv1.ListBucketsResponse, error) {
ctx, cancel := context.WithTimeout(context.Background(), contextTimeout) ctx, cancel := context.WithTimeout(context.Background(), contextTimeout)
defer cancel() defer cancel()
@ -171,7 +172,7 @@ func (c *client) ListBuckets(req *manager.ListBucketsRequest) (*manager.ListBuck
} }
// List acitve schedulers configuration. // List acitve schedulers configuration.
func (c *client) KeepAlive(interval time.Duration, keepalive *manager.KeepAliveRequest) { func (c *client) KeepAlive(interval time.Duration, keepalive *managerv1.KeepAliveRequest) {
retry: retry:
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
stream, err := c.ManagerClient.KeepAlive(ctx) stream, err := c.ManagerClient.KeepAlive(ctx)
@ -191,7 +192,7 @@ retry:
for { for {
select { select {
case <-tick.C: case <-tick.C:
if err := stream.Send(&manager.KeepAliveRequest{ if err := stream.Send(&managerv1.KeepAliveRequest{
SourceType: keepalive.SourceType, SourceType: keepalive.SourceType,
HostName: keepalive.HostName, HostName: keepalive.HostName,
Ip: keepalive.Ip, Ip: keepalive.Ip,

View File

@ -8,7 +8,7 @@ import (
reflect "reflect" reflect "reflect"
time "time" time "time"
manager "d7y.io/dragonfly/v2/pkg/rpc/manager" v1 "d7y.io/api/pkg/apis/manager/v1"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
) )
@ -50,10 +50,10 @@ func (mr *MockClientMockRecorder) Close() *gomock.Call {
} }
// GetObjectStorage mocks base method. // GetObjectStorage mocks base method.
func (m *MockClient) GetObjectStorage(arg0 *manager.GetObjectStorageRequest) (*manager.ObjectStorage, error) { func (m *MockClient) GetObjectStorage(arg0 *v1.GetObjectStorageRequest) (*v1.ObjectStorage, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetObjectStorage", arg0) ret := m.ctrl.Call(m, "GetObjectStorage", arg0)
ret0, _ := ret[0].(*manager.ObjectStorage) ret0, _ := ret[0].(*v1.ObjectStorage)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -65,10 +65,10 @@ func (mr *MockClientMockRecorder) GetObjectStorage(arg0 interface{}) *gomock.Cal
} }
// GetScheduler mocks base method. // GetScheduler mocks base method.
func (m *MockClient) GetScheduler(arg0 *manager.GetSchedulerRequest) (*manager.Scheduler, error) { func (m *MockClient) GetScheduler(arg0 *v1.GetSchedulerRequest) (*v1.Scheduler, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetScheduler", arg0) ret := m.ctrl.Call(m, "GetScheduler", arg0)
ret0, _ := ret[0].(*manager.Scheduler) ret0, _ := ret[0].(*v1.Scheduler)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -80,7 +80,7 @@ func (mr *MockClientMockRecorder) GetScheduler(arg0 interface{}) *gomock.Call {
} }
// KeepAlive mocks base method. // KeepAlive mocks base method.
func (m *MockClient) KeepAlive(arg0 time.Duration, arg1 *manager.KeepAliveRequest) { func (m *MockClient) KeepAlive(arg0 time.Duration, arg1 *v1.KeepAliveRequest) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
m.ctrl.Call(m, "KeepAlive", arg0, arg1) m.ctrl.Call(m, "KeepAlive", arg0, arg1)
} }
@ -92,10 +92,10 @@ func (mr *MockClientMockRecorder) KeepAlive(arg0, arg1 interface{}) *gomock.Call
} }
// ListBuckets mocks base method. // ListBuckets mocks base method.
func (m *MockClient) ListBuckets(arg0 *manager.ListBucketsRequest) (*manager.ListBucketsResponse, error) { func (m *MockClient) ListBuckets(arg0 *v1.ListBucketsRequest) (*v1.ListBucketsResponse, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListBuckets", arg0) ret := m.ctrl.Call(m, "ListBuckets", arg0)
ret0, _ := ret[0].(*manager.ListBucketsResponse) ret0, _ := ret[0].(*v1.ListBucketsResponse)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -107,10 +107,10 @@ func (mr *MockClientMockRecorder) ListBuckets(arg0 interface{}) *gomock.Call {
} }
// ListSchedulers mocks base method. // ListSchedulers mocks base method.
func (m *MockClient) ListSchedulers(arg0 *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) { func (m *MockClient) ListSchedulers(arg0 *v1.ListSchedulersRequest) (*v1.ListSchedulersResponse, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListSchedulers", arg0) ret := m.ctrl.Call(m, "ListSchedulers", arg0)
ret0, _ := ret[0].(*manager.ListSchedulersResponse) ret0, _ := ret[0].(*v1.ListSchedulersResponse)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -122,10 +122,10 @@ func (mr *MockClientMockRecorder) ListSchedulers(arg0 interface{}) *gomock.Call
} }
// UpdateScheduler mocks base method. // UpdateScheduler mocks base method.
func (m *MockClient) UpdateScheduler(arg0 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) { func (m *MockClient) UpdateScheduler(arg0 *v1.UpdateSchedulerRequest) (*v1.Scheduler, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateScheduler", arg0) ret := m.ctrl.Call(m, "UpdateScheduler", arg0)
ret0, _ := ret[0].(*manager.Scheduler) ret0, _ := ret[0].(*v1.Scheduler)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -137,10 +137,10 @@ func (mr *MockClientMockRecorder) UpdateScheduler(arg0 interface{}) *gomock.Call
} }
// UpdateSeedPeer mocks base method. // UpdateSeedPeer mocks base method.
func (m *MockClient) UpdateSeedPeer(arg0 *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) { func (m *MockClient) UpdateSeedPeer(arg0 *v1.UpdateSeedPeerRequest) (*v1.SeedPeer, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateSeedPeer", arg0) ret := m.ctrl.Call(m, "UpdateSeedPeer", arg0)
ret0, _ := ret[0].(*manager.SeedPeer) ret0, _ := ret[0].(*v1.SeedPeer)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,321 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package manager;
import "google/protobuf/empty.proto";
import "validate/validate.proto";
option go_package = "d7y.io/dragonfly/v2/pkg/rpc/manager";
// Request source type.
enum SourceType {
// Scheduler service.
SCHEDULER_SOURCE = 0;
// Peer service.
PEER_SOURCE = 1;
// SeedPeer service.
SEED_PEER_SOURCE = 2;
}
// SecurityGroup represents security group of cluster.
message SecurityGroup {
// Group id.
uint64 id = 1;
// Group name.
string name = 2;
// Group biography.
string bio = 3;
// Group domain.
string domain = 4;
// Group proxy domain.
string proxy_domain = 5;
}
// SeedPeerCluster represents cluster of seed peer.
message SeedPeerCluster {
// Cluster id.
uint64 id = 1;
// Cluster name.
string name = 2;
// Cluster biography.
string bio = 3;
// Cluster configuration.
bytes config = 4;
// Cluster scopes.
bytes scopes = 5;
// Security group to which the seed peer cluster belongs.
SecurityGroup security_group = 6;
}
// SeedPeer represents seed peer for network.
message SeedPeer {
// Seed peer id.
uint64 id = 1;
// Seed peer hostname.
string host_name = 2;
// Seed peer type.
string type = 3;
// Seed peer idc.
string idc = 5;
// Seed peer network topology.
string net_topology = 6;
// Seed peer location.
string location = 7;
// Seed peer ip.
string ip = 8;
// Seed peer grpc port.
int32 port = 9;
// Seed peer download port.
int32 download_port = 10;
// Seed peer state.
string state = 11;
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 12;
// Cluster to which the seed peer belongs.
SeedPeerCluster seed_peer_cluster = 13;
// Schedulers included in seed peer.
repeated Scheduler schedulers = 14;
// Seed peer object storage port.
int32 object_storage_port = 15;
}
// GetSeedPeerRequest represents request of GetSeedPeer.
message GetSeedPeerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Seed peer hostname.
string host_name = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Seed peer ip.
string ip = 4 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// UpdateSeedPeerRequest represents request of UpdateSeedPeer.
message UpdateSeedPeerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Seed peer hostname.
string host_name = 2 [(validate.rules).string.hostname = true];
// Seed peer type.
string type = 3 [(validate.rules).string = {in: ["super", "strong", "weak"]}];
// Seed peer idc.
string idc = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Seed peer network topology.
string net_topology = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Seed peer location.
string location = 7 [(validate.rules).string = {max_len: 1024, ignore_empty: true}];
// Seed peer ip.
string ip = 8 [(validate.rules).string = {ip: true}];
// Seed peer port.
int32 port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Seed peer download port.
int32 download_port = 10 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// ID of the cluster to which the seed peer belongs.
uint64 seed_peer_cluster_id = 11 [(validate.rules).uint64 = {gte: 1}];
// Seed peer object storage port.
int32 object_storage_port = 12 [(validate.rules).int32 = {gte: 1024, lt: 65535, ignore_empty: true}];
}
// SeedPeerCluster represents cluster of scheduler.
message SchedulerCluster {
// Cluster id.
uint64 id = 1;
// Cluster name.
string name = 2;
// Cluster biography.
string bio = 3;
// Cluster config.
bytes config = 4;
// Cluster client config.
bytes client_config = 5;
// Cluster scopes.
bytes scopes = 6;
// Security group to which the scheduler cluster belongs.
SecurityGroup security_group = 7;
}
// SeedPeerCluster represents scheduler for network.
message Scheduler {
// Scheduler id.
uint64 id = 1;
// Scheduler hostname.
string host_name = 2;
// Deprecated: Do not use.
string vips = 3;
// Scheduler idc.
string idc = 4;
// Scheduler location.
string location = 5;
// Deprecated: Use net_topology instead.
bytes net_config = 6;
// Scheduler ip.
string ip = 7;
// Scheduler grpc port.
int32 port = 8;
// Scheduler state.
string state = 9;
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 10;
// Cluster to which the scheduler belongs.
SchedulerCluster scheduler_cluster = 11;
// Seed peers to which the scheduler belongs.
repeated SeedPeer seed_peers = 13;
// Scheduler network topology.
string net_topology = 14;
}
// GetSchedulerRequest represents request of GetScheduler.
message GetSchedulerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Scheduler hostname.
string host_name = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Scheduler ip.
string ip = 4 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// UpdateSchedulerRequest represents request of UpdateScheduler.
message UpdateSchedulerRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Scheduler hostname.
string host_name = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the scheduler belongs.
uint64 scheduler_cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Deprecated: Do not use.
string vips = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Scheduler idc.
string idc = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Scheduler location.
string location = 6 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Deprecated: Use net_topology instead.
bytes net_config = 7 [(validate.rules).bytes = {min_len: 1, ignore_empty: true}];
// Scheduler ip.
string ip = 8 [(validate.rules).string = {ip: true}];
// Scheduler port.
int32 port = 9 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Scheduler network topology.
string net_topology = 10 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
}
// ListSchedulersRequest represents request of ListSchedulers.
message ListSchedulersRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string host_name = 2 [(validate.rules).string.hostname = true];
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
// Source service host information.
map<string, string> host_info = 5 [(validate.rules).map.ignore_empty = true];
}
// ListSchedulersResponse represents response of ListSchedulers.
message ListSchedulersResponse {
// Schedulers to which the source service belongs.
repeated Scheduler schedulers = 1;
}
// ObjectStorage represents config of object storage.
message ObjectStorage {
// Object storage name of type.
string name = 1 [(validate.rules).string = {min_len: 1, max_len: 1024}];
// Storage region.
string region = 2 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Datacenter endpoint.
string endpoint = 3 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Access key id.
string access_key = 4 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
// Access key secret.
string secret_key = 5 [(validate.rules).string = {min_len: 1, max_len: 1024, ignore_empty: true}];
}
// GetObjectStorageRequest represents request of GetObjectStorage.
message GetObjectStorageRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string host_name = 2 [(validate.rules).string.hostname = true];
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
}
// Bucket represents config of bucket.
message Bucket {
// Bucket name.
string name = 1 [(validate.rules).string = {min_len: 1, max_len: 1024}];
}
// ListSchedulersRequest represents request of ListBuckets.
message ListBucketsRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string host_name = 2 [(validate.rules).string.hostname = true];
// Source service ip.
string ip = 3 [(validate.rules).string.ip = true];
}
// ListBucketsResponse represents response of ListBuckets.
message ListBucketsResponse {
// Bucket configs.
repeated Bucket buckets = 1;
}
// KeepAliveRequest represents request of KeepAlive.
message KeepAliveRequest {
// Request source type.
SourceType source_type = 1 [(validate.rules).enum.defined_only = true];
// Source service hostname.
string host_name = 2 [(validate.rules).string.hostname = true];
// ID of the cluster to which the source service belongs.
uint64 cluster_id = 3 [(validate.rules).uint64 = {gte: 1}];
// Source service ip.
string ip = 4 [(validate.rules).string = {ip: true, ignore_empty: true}];
}
// Manager RPC Service.
service Manager {
// Get SeedPeer and SeedPeer cluster configuration.
rpc GetSeedPeer(GetSeedPeerRequest) returns(SeedPeer);
// Update SeedPeer configuration.
rpc UpdateSeedPeer(UpdateSeedPeerRequest) returns(SeedPeer);
// Get Scheduler and Scheduler cluster configuration.
rpc GetScheduler(GetSchedulerRequest)returns(Scheduler);
// Update scheduler configuration.
rpc UpdateScheduler(UpdateSchedulerRequest) returns(Scheduler);
// List acitve schedulers configuration.
rpc ListSchedulers(ListSchedulersRequest)returns(ListSchedulersResponse);
// Get ObjectStorage configuration.
rpc GetObjectStorage(GetObjectStorageRequest) returns(ObjectStorage);
// List buckets configuration.
rpc ListBuckets(ListBucketsRequest)returns(ListBucketsResponse);
// KeepAlive with manager.
rpc KeepAlive(stream KeepAliveRequest)returns(google.protobuf.Empty);
}

View File

@ -1,612 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: manager/manager.pb.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
manager "d7y.io/dragonfly/v2/pkg/rpc/manager"
gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// MockManagerClient is a mock of ManagerClient interface.
type MockManagerClient struct {
ctrl *gomock.Controller
recorder *MockManagerClientMockRecorder
}
// MockManagerClientMockRecorder is the mock recorder for MockManagerClient.
type MockManagerClientMockRecorder struct {
mock *MockManagerClient
}
// NewMockManagerClient creates a new mock instance.
func NewMockManagerClient(ctrl *gomock.Controller) *MockManagerClient {
mock := &MockManagerClient{ctrl: ctrl}
mock.recorder = &MockManagerClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManagerClient) EXPECT() *MockManagerClientMockRecorder {
return m.recorder
}
// GetObjectStorage mocks base method.
func (m *MockManagerClient) GetObjectStorage(ctx context.Context, in *manager.GetObjectStorageRequest, opts ...grpc.CallOption) (*manager.ObjectStorage, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetObjectStorage", varargs...)
ret0, _ := ret[0].(*manager.ObjectStorage)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetObjectStorage indicates an expected call of GetObjectStorage.
func (mr *MockManagerClientMockRecorder) GetObjectStorage(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorage", reflect.TypeOf((*MockManagerClient)(nil).GetObjectStorage), varargs...)
}
// GetScheduler mocks base method.
func (m *MockManagerClient) GetScheduler(ctx context.Context, in *manager.GetSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetScheduler", varargs...)
ret0, _ := ret[0].(*manager.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetScheduler indicates an expected call of GetScheduler.
func (mr *MockManagerClientMockRecorder) GetScheduler(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheduler", reflect.TypeOf((*MockManagerClient)(nil).GetScheduler), varargs...)
}
// GetSeedPeer mocks base method.
func (m *MockManagerClient) GetSeedPeer(ctx context.Context, in *manager.GetSeedPeerRequest, opts ...grpc.CallOption) (*manager.SeedPeer, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "GetSeedPeer", varargs...)
ret0, _ := ret[0].(*manager.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSeedPeer indicates an expected call of GetSeedPeer.
func (mr *MockManagerClientMockRecorder) GetSeedPeer(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSeedPeer", reflect.TypeOf((*MockManagerClient)(nil).GetSeedPeer), varargs...)
}
// KeepAlive mocks base method.
func (m *MockManagerClient) KeepAlive(ctx context.Context, opts ...grpc.CallOption) (manager.Manager_KeepAliveClient, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "KeepAlive", varargs...)
ret0, _ := ret[0].(manager.Manager_KeepAliveClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// KeepAlive indicates an expected call of KeepAlive.
func (mr *MockManagerClientMockRecorder) KeepAlive(ctx interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockManagerClient)(nil).KeepAlive), varargs...)
}
// ListBuckets mocks base method.
func (m *MockManagerClient) ListBuckets(ctx context.Context, in *manager.ListBucketsRequest, opts ...grpc.CallOption) (*manager.ListBucketsResponse, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListBuckets", varargs...)
ret0, _ := ret[0].(*manager.ListBucketsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListBuckets indicates an expected call of ListBuckets.
func (mr *MockManagerClientMockRecorder) ListBuckets(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBuckets", reflect.TypeOf((*MockManagerClient)(nil).ListBuckets), varargs...)
}
// ListSchedulers mocks base method.
func (m *MockManagerClient) ListSchedulers(ctx context.Context, in *manager.ListSchedulersRequest, opts ...grpc.CallOption) (*manager.ListSchedulersResponse, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ListSchedulers", varargs...)
ret0, _ := ret[0].(*manager.ListSchedulersResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListSchedulers indicates an expected call of ListSchedulers.
func (mr *MockManagerClientMockRecorder) ListSchedulers(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerClient)(nil).ListSchedulers), varargs...)
}
// UpdateScheduler mocks base method.
func (m *MockManagerClient) UpdateScheduler(ctx context.Context, in *manager.UpdateSchedulerRequest, opts ...grpc.CallOption) (*manager.Scheduler, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpdateScheduler", varargs...)
ret0, _ := ret[0].(*manager.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateScheduler indicates an expected call of UpdateScheduler.
func (mr *MockManagerClientMockRecorder) UpdateScheduler(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockManagerClient)(nil).UpdateScheduler), varargs...)
}
// UpdateSeedPeer mocks base method.
func (m *MockManagerClient) UpdateSeedPeer(ctx context.Context, in *manager.UpdateSeedPeerRequest, opts ...grpc.CallOption) (*manager.SeedPeer, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UpdateSeedPeer", varargs...)
ret0, _ := ret[0].(*manager.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateSeedPeer indicates an expected call of UpdateSeedPeer.
func (mr *MockManagerClientMockRecorder) UpdateSeedPeer(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSeedPeer", reflect.TypeOf((*MockManagerClient)(nil).UpdateSeedPeer), varargs...)
}
// MockManager_KeepAliveClient is a mock of Manager_KeepAliveClient interface.
type MockManager_KeepAliveClient struct {
ctrl *gomock.Controller
recorder *MockManager_KeepAliveClientMockRecorder
}
// MockManager_KeepAliveClientMockRecorder is the mock recorder for MockManager_KeepAliveClient.
type MockManager_KeepAliveClientMockRecorder struct {
mock *MockManager_KeepAliveClient
}
// NewMockManager_KeepAliveClient creates a new mock instance.
func NewMockManager_KeepAliveClient(ctrl *gomock.Controller) *MockManager_KeepAliveClient {
mock := &MockManager_KeepAliveClient{ctrl: ctrl}
mock.recorder = &MockManager_KeepAliveClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager_KeepAliveClient) EXPECT() *MockManager_KeepAliveClientMockRecorder {
return m.recorder
}
// CloseAndRecv mocks base method.
func (m *MockManager_KeepAliveClient) CloseAndRecv() (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CloseAndRecv")
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// CloseAndRecv indicates an expected call of CloseAndRecv.
func (mr *MockManager_KeepAliveClientMockRecorder) CloseAndRecv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAndRecv", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).CloseAndRecv))
}
// CloseSend mocks base method.
func (m *MockManager_KeepAliveClient) CloseSend() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CloseSend")
ret0, _ := ret[0].(error)
return ret0
}
// CloseSend indicates an expected call of CloseSend.
func (mr *MockManager_KeepAliveClientMockRecorder) CloseSend() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).CloseSend))
}
// Context mocks base method.
func (m *MockManager_KeepAliveClient) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockManager_KeepAliveClientMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Context))
}
// Header mocks base method.
func (m *MockManager_KeepAliveClient) Header() (metadata.MD, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Header")
ret0, _ := ret[0].(metadata.MD)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Header indicates an expected call of Header.
func (mr *MockManager_KeepAliveClientMockRecorder) Header() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Header))
}
// RecvMsg mocks base method.
func (m_2 *MockManager_KeepAliveClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockManager_KeepAliveClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockManager_KeepAliveClient) Send(arg0 *manager.KeepAliveRequest) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockManager_KeepAliveClientMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Send), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockManager_KeepAliveClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockManager_KeepAliveClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).SendMsg), m)
}
// Trailer mocks base method.
func (m *MockManager_KeepAliveClient) Trailer() metadata.MD {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Trailer")
ret0, _ := ret[0].(metadata.MD)
return ret0
}
// Trailer indicates an expected call of Trailer.
func (mr *MockManager_KeepAliveClientMockRecorder) Trailer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockManager_KeepAliveClient)(nil).Trailer))
}
// MockManagerServer is a mock of ManagerServer interface.
type MockManagerServer struct {
ctrl *gomock.Controller
recorder *MockManagerServerMockRecorder
}
// MockManagerServerMockRecorder is the mock recorder for MockManagerServer.
type MockManagerServerMockRecorder struct {
mock *MockManagerServer
}
// NewMockManagerServer creates a new mock instance.
func NewMockManagerServer(ctrl *gomock.Controller) *MockManagerServer {
mock := &MockManagerServer{ctrl: ctrl}
mock.recorder = &MockManagerServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManagerServer) EXPECT() *MockManagerServerMockRecorder {
return m.recorder
}
// GetObjectStorage mocks base method.
func (m *MockManagerServer) GetObjectStorage(arg0 context.Context, arg1 *manager.GetObjectStorageRequest) (*manager.ObjectStorage, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetObjectStorage", arg0, arg1)
ret0, _ := ret[0].(*manager.ObjectStorage)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetObjectStorage indicates an expected call of GetObjectStorage.
func (mr *MockManagerServerMockRecorder) GetObjectStorage(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectStorage", reflect.TypeOf((*MockManagerServer)(nil).GetObjectStorage), arg0, arg1)
}
// GetScheduler mocks base method.
func (m *MockManagerServer) GetScheduler(arg0 context.Context, arg1 *manager.GetSchedulerRequest) (*manager.Scheduler, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetScheduler", arg0, arg1)
ret0, _ := ret[0].(*manager.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetScheduler indicates an expected call of GetScheduler.
func (mr *MockManagerServerMockRecorder) GetScheduler(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetScheduler", reflect.TypeOf((*MockManagerServer)(nil).GetScheduler), arg0, arg1)
}
// GetSeedPeer mocks base method.
func (m *MockManagerServer) GetSeedPeer(arg0 context.Context, arg1 *manager.GetSeedPeerRequest) (*manager.SeedPeer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetSeedPeer", arg0, arg1)
ret0, _ := ret[0].(*manager.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetSeedPeer indicates an expected call of GetSeedPeer.
func (mr *MockManagerServerMockRecorder) GetSeedPeer(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSeedPeer", reflect.TypeOf((*MockManagerServer)(nil).GetSeedPeer), arg0, arg1)
}
// KeepAlive mocks base method.
func (m *MockManagerServer) KeepAlive(arg0 manager.Manager_KeepAliveServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "KeepAlive", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// KeepAlive indicates an expected call of KeepAlive.
func (mr *MockManagerServerMockRecorder) KeepAlive(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KeepAlive", reflect.TypeOf((*MockManagerServer)(nil).KeepAlive), arg0)
}
// ListBuckets mocks base method.
func (m *MockManagerServer) ListBuckets(arg0 context.Context, arg1 *manager.ListBucketsRequest) (*manager.ListBucketsResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListBuckets", arg0, arg1)
ret0, _ := ret[0].(*manager.ListBucketsResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListBuckets indicates an expected call of ListBuckets.
func (mr *MockManagerServerMockRecorder) ListBuckets(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBuckets", reflect.TypeOf((*MockManagerServer)(nil).ListBuckets), arg0, arg1)
}
// ListSchedulers mocks base method.
func (m *MockManagerServer) ListSchedulers(arg0 context.Context, arg1 *manager.ListSchedulersRequest) (*manager.ListSchedulersResponse, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListSchedulers", arg0, arg1)
ret0, _ := ret[0].(*manager.ListSchedulersResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ListSchedulers indicates an expected call of ListSchedulers.
func (mr *MockManagerServerMockRecorder) ListSchedulers(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSchedulers", reflect.TypeOf((*MockManagerServer)(nil).ListSchedulers), arg0, arg1)
}
// UpdateScheduler mocks base method.
func (m *MockManagerServer) UpdateScheduler(arg0 context.Context, arg1 *manager.UpdateSchedulerRequest) (*manager.Scheduler, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateScheduler", arg0, arg1)
ret0, _ := ret[0].(*manager.Scheduler)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateScheduler indicates an expected call of UpdateScheduler.
func (mr *MockManagerServerMockRecorder) UpdateScheduler(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockManagerServer)(nil).UpdateScheduler), arg0, arg1)
}
// UpdateSeedPeer mocks base method.
func (m *MockManagerServer) UpdateSeedPeer(arg0 context.Context, arg1 *manager.UpdateSeedPeerRequest) (*manager.SeedPeer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateSeedPeer", arg0, arg1)
ret0, _ := ret[0].(*manager.SeedPeer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpdateSeedPeer indicates an expected call of UpdateSeedPeer.
func (mr *MockManagerServerMockRecorder) UpdateSeedPeer(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSeedPeer", reflect.TypeOf((*MockManagerServer)(nil).UpdateSeedPeer), arg0, arg1)
}
// MockManager_KeepAliveServer is a mock of Manager_KeepAliveServer interface.
type MockManager_KeepAliveServer struct {
ctrl *gomock.Controller
recorder *MockManager_KeepAliveServerMockRecorder
}
// MockManager_KeepAliveServerMockRecorder is the mock recorder for MockManager_KeepAliveServer.
type MockManager_KeepAliveServerMockRecorder struct {
mock *MockManager_KeepAliveServer
}
// NewMockManager_KeepAliveServer creates a new mock instance.
func NewMockManager_KeepAliveServer(ctrl *gomock.Controller) *MockManager_KeepAliveServer {
mock := &MockManager_KeepAliveServer{ctrl: ctrl}
mock.recorder = &MockManager_KeepAliveServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager_KeepAliveServer) EXPECT() *MockManager_KeepAliveServerMockRecorder {
return m.recorder
}
// Context mocks base method.
func (m *MockManager_KeepAliveServer) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockManager_KeepAliveServerMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).Context))
}
// Recv mocks base method.
func (m *MockManager_KeepAliveServer) Recv() (*manager.KeepAliveRequest, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*manager.KeepAliveRequest)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockManager_KeepAliveServerMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockManager_KeepAliveServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockManager_KeepAliveServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).RecvMsg), m)
}
// SendAndClose mocks base method.
func (m *MockManager_KeepAliveServer) SendAndClose(arg0 *emptypb.Empty) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendAndClose", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SendAndClose indicates an expected call of SendAndClose.
func (mr *MockManager_KeepAliveServerMockRecorder) SendAndClose(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAndClose", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendAndClose), arg0)
}
// SendHeader mocks base method.
func (m *MockManager_KeepAliveServer) SendHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockManager_KeepAliveServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockManager_KeepAliveServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockManager_KeepAliveServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SendMsg), m)
}
// SetHeader mocks base method.
func (m *MockManager_KeepAliveServer) SetHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockManager_KeepAliveServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SetHeader), arg0)
}
// SetTrailer mocks base method.
func (m *MockManager_KeepAliveServer) SetTrailer(arg0 metadata.MD) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetTrailer", arg0)
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockManager_KeepAliveServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockManager_KeepAliveServer)(nil).SetTrailer), arg0)
}

View File

@ -25,32 +25,33 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
commonv1 "d7y.io/api/pkg/apis/common/v1"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/dfnet" "d7y.io/dragonfly/v2/pkg/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc" "d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/rpc/common"
"d7y.io/dragonfly/v2/pkg/rpc/base/common"
"d7y.io/dragonfly/v2/pkg/rpc/scheduler"
) )
// NewBeginOfPiece creates begin of piece. // NewBeginOfPiece creates begin of piece.
func NewBeginOfPiece(taskID, peerID string) *scheduler.PieceResult { func NewBeginOfPiece(taskID, peerID string) *schedulerv1.PieceResult {
return &scheduler.PieceResult{ return &schedulerv1.PieceResult{
TaskId: taskID, TaskId: taskID,
SrcPid: peerID, SrcPid: peerID,
PieceInfo: &base.PieceInfo{ PieceInfo: &commonv1.PieceInfo{
PieceNum: common.BeginOfPiece, PieceNum: common.BeginOfPiece,
}, },
} }
} }
// NewBeginOfPiece creates end of piece. // NewBeginOfPiece creates end of piece.
func NewEndOfPiece(taskID, peerID string, finishedCount int32) *scheduler.PieceResult { func NewEndOfPiece(taskID, peerID string, finishedCount int32) *schedulerv1.PieceResult {
return &scheduler.PieceResult{ return &schedulerv1.PieceResult{
TaskId: taskID, TaskId: taskID,
SrcPid: peerID, SrcPid: peerID,
FinishedCount: finishedCount, FinishedCount: finishedCount,
PieceInfo: &base.PieceInfo{ PieceInfo: &commonv1.PieceInfo{
PieceNum: common.EndOfPiece, PieceNum: common.EndOfPiece,
}, },
} }
@ -74,22 +75,22 @@ func GetClientByAddr(addrs []dfnet.NetAddr, opts ...grpc.DialOption) (Client, er
// Client is the interface for grpc client. // Client is the interface for grpc client.
type Client interface { type Client interface {
// RegisterPeerTask registers a peer into task. // RegisterPeerTask registers a peer into task.
RegisterPeerTask(context.Context, *scheduler.PeerTaskRequest, ...grpc.CallOption) (*scheduler.RegisterResult, error) RegisterPeerTask(context.Context, *schedulerv1.PeerTaskRequest, ...grpc.CallOption) (*schedulerv1.RegisterResult, error)
// ReportPieceResult reports piece results and receives peer packets. // ReportPieceResult reports piece results and receives peer packets.
ReportPieceResult(context.Context, *scheduler.PeerTaskRequest, ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) ReportPieceResult(context.Context, *schedulerv1.PeerTaskRequest, ...grpc.CallOption) (schedulerv1.Scheduler_ReportPieceResultClient, error)
// ReportPeerResult reports downloading result for the peer. // ReportPeerResult reports downloading result for the peer.
ReportPeerResult(context.Context, *scheduler.PeerResult, ...grpc.CallOption) error ReportPeerResult(context.Context, *schedulerv1.PeerResult, ...grpc.CallOption) error
// LeaveTask makes the peer leaving from task. // LeaveTask makes the peer leaving from task.
LeaveTask(context.Context, *scheduler.PeerTarget, ...grpc.CallOption) error LeaveTask(context.Context, *schedulerv1.PeerTarget, ...grpc.CallOption) error
// Checks if any peer has the given task. // Checks if any peer has the given task.
StatTask(context.Context, *scheduler.StatTaskRequest, ...grpc.CallOption) (*scheduler.Task, error) StatTask(context.Context, *schedulerv1.StatTaskRequest, ...grpc.CallOption) (*schedulerv1.Task, error)
// A peer announces that it has the announced task to other peers. // A peer announces that it has the announced task to other peers.
AnnounceTask(context.Context, *scheduler.AnnounceTaskRequest, ...grpc.CallOption) error AnnounceTask(context.Context, *schedulerv1.AnnounceTaskRequest, ...grpc.CallOption) error
// Update grpc addresses. // Update grpc addresses.
UpdateState([]dfnet.NetAddr) UpdateState([]dfnet.NetAddr)
@ -107,17 +108,17 @@ type client struct {
} }
// getClient gets scheduler client with hashkey. // getClient gets scheduler client with hashkey.
func (sc *client) getClient(key string, stick bool) (scheduler.SchedulerClient, string, error) { func (sc *client) getClient(key string, stick bool) (schedulerv1.SchedulerClient, string, error) {
clientConn, err := sc.Connection.GetClientConn(key, stick) clientConn, err := sc.Connection.GetClientConn(key, stick)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
return scheduler.NewSchedulerClient(clientConn), clientConn.Target(), nil return schedulerv1.NewSchedulerClient(clientConn), clientConn.Target(), nil
} }
// RegisterPeerTask registers a peer into task. // RegisterPeerTask registers a peer into task.
func (sc *client) RegisterPeerTask(ctx context.Context, req *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (*scheduler.RegisterResult, error) { func (sc *client) RegisterPeerTask(ctx context.Context, req *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) (*schedulerv1.RegisterResult, error) {
// Generate task id. // Generate task id.
client, target, err := sc.getClient(req.TaskId, false) client, target, err := sc.getClient(req.TaskId, false)
if err != nil { if err != nil {
@ -134,7 +135,7 @@ func (sc *client) RegisterPeerTask(ctx context.Context, req *scheduler.PeerTaskR
} }
// ReportPieceResult reports piece results and receives peer packets. // ReportPieceResult reports piece results and receives peer packets.
func (sc *client) ReportPieceResult(ctx context.Context, req *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) { func (sc *client) ReportPieceResult(ctx context.Context, req *schedulerv1.PeerTaskRequest, opts ...grpc.CallOption) (schedulerv1.Scheduler_ReportPieceResultClient, error) {
client, target, err := sc.getClient(req.TaskId, false) client, target, err := sc.getClient(req.TaskId, false)
if err != nil { if err != nil {
return nil, err return nil, err
@ -150,7 +151,7 @@ func (sc *client) ReportPieceResult(ctx context.Context, req *scheduler.PeerTask
} }
// ReportPeerResult reports downloading result for the peer. // ReportPeerResult reports downloading result for the peer.
func (sc *client) ReportPeerResult(ctx context.Context, req *scheduler.PeerResult, opts ...grpc.CallOption) error { func (sc *client) ReportPeerResult(ctx context.Context, req *schedulerv1.PeerResult, opts ...grpc.CallOption) error {
client, target, err := sc.getClient(req.TaskId, false) client, target, err := sc.getClient(req.TaskId, false)
if err != nil { if err != nil {
return err return err
@ -165,7 +166,7 @@ func (sc *client) ReportPeerResult(ctx context.Context, req *scheduler.PeerResul
} }
// LeaveTask makes the peer leaving from task. // LeaveTask makes the peer leaving from task.
func (sc *client) LeaveTask(ctx context.Context, req *scheduler.PeerTarget, opts ...grpc.CallOption) error { func (sc *client) LeaveTask(ctx context.Context, req *schedulerv1.PeerTarget, opts ...grpc.CallOption) error {
client, target, err := sc.getClient(req.TaskId, false) client, target, err := sc.getClient(req.TaskId, false)
if err != nil { if err != nil {
return err return err
@ -180,7 +181,7 @@ func (sc *client) LeaveTask(ctx context.Context, req *scheduler.PeerTarget, opts
} }
// Checks if any peer has the given task. // Checks if any peer has the given task.
func (sc *client) StatTask(ctx context.Context, req *scheduler.StatTaskRequest, opts ...grpc.CallOption) (*scheduler.Task, error) { func (sc *client) StatTask(ctx context.Context, req *schedulerv1.StatTaskRequest, opts ...grpc.CallOption) (*schedulerv1.Task, error) {
client, target, err := sc.getClient(req.TaskId, false) client, target, err := sc.getClient(req.TaskId, false)
if err != nil { if err != nil {
return nil, err return nil, err
@ -196,7 +197,7 @@ func (sc *client) StatTask(ctx context.Context, req *scheduler.StatTaskRequest,
} }
// A peer announces that it has the announced task to other peers. // A peer announces that it has the announced task to other peers.
func (sc *client) AnnounceTask(ctx context.Context, req *scheduler.AnnounceTaskRequest, opts ...grpc.CallOption) error { func (sc *client) AnnounceTask(ctx context.Context, req *schedulerv1.AnnounceTaskRequest, opts ...grpc.CallOption) error {
client, target, err := sc.getClient(req.TaskId, false) client, target, err := sc.getClient(req.TaskId, false)
if err != nil { if err != nil {
return err return err

View File

@ -8,8 +8,8 @@ import (
context "context" context "context"
reflect "reflect" reflect "reflect"
v1 "d7y.io/api/pkg/apis/scheduler/v1"
dfnet "d7y.io/dragonfly/v2/pkg/dfnet" dfnet "d7y.io/dragonfly/v2/pkg/dfnet"
scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc" grpc "google.golang.org/grpc"
) )
@ -38,7 +38,7 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder {
} }
// AnnounceTask mocks base method. // AnnounceTask mocks base method.
func (m *MockClient) AnnounceTask(arg0 context.Context, arg1 *scheduler.AnnounceTaskRequest, arg2 ...grpc.CallOption) error { func (m *MockClient) AnnounceTask(arg0 context.Context, arg1 *v1.AnnounceTaskRequest, arg2 ...grpc.CallOption) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1} varargs := []interface{}{arg0, arg1}
for _, a := range arg2 { for _, a := range arg2 {
@ -85,7 +85,7 @@ func (mr *MockClientMockRecorder) GetState() *gomock.Call {
} }
// LeaveTask mocks base method. // LeaveTask mocks base method.
func (m *MockClient) LeaveTask(arg0 context.Context, arg1 *scheduler.PeerTarget, arg2 ...grpc.CallOption) error { func (m *MockClient) LeaveTask(arg0 context.Context, arg1 *v1.PeerTarget, arg2 ...grpc.CallOption) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1} varargs := []interface{}{arg0, arg1}
for _, a := range arg2 { for _, a := range arg2 {
@ -104,14 +104,14 @@ func (mr *MockClientMockRecorder) LeaveTask(arg0, arg1 interface{}, arg2 ...inte
} }
// RegisterPeerTask mocks base method. // RegisterPeerTask mocks base method.
func (m *MockClient) RegisterPeerTask(arg0 context.Context, arg1 *scheduler.PeerTaskRequest, arg2 ...grpc.CallOption) (*scheduler.RegisterResult, error) { func (m *MockClient) RegisterPeerTask(arg0 context.Context, arg1 *v1.PeerTaskRequest, arg2 ...grpc.CallOption) (*v1.RegisterResult, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1} varargs := []interface{}{arg0, arg1}
for _, a := range arg2 { for _, a := range arg2 {
varargs = append(varargs, a) varargs = append(varargs, a)
} }
ret := m.ctrl.Call(m, "RegisterPeerTask", varargs...) ret := m.ctrl.Call(m, "RegisterPeerTask", varargs...)
ret0, _ := ret[0].(*scheduler.RegisterResult) ret0, _ := ret[0].(*v1.RegisterResult)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -124,7 +124,7 @@ func (mr *MockClientMockRecorder) RegisterPeerTask(arg0, arg1 interface{}, arg2
} }
// ReportPeerResult mocks base method. // ReportPeerResult mocks base method.
func (m *MockClient) ReportPeerResult(arg0 context.Context, arg1 *scheduler.PeerResult, arg2 ...grpc.CallOption) error { func (m *MockClient) ReportPeerResult(arg0 context.Context, arg1 *v1.PeerResult, arg2 ...grpc.CallOption) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1} varargs := []interface{}{arg0, arg1}
for _, a := range arg2 { for _, a := range arg2 {
@ -143,14 +143,14 @@ func (mr *MockClientMockRecorder) ReportPeerResult(arg0, arg1 interface{}, arg2
} }
// ReportPieceResult mocks base method. // ReportPieceResult mocks base method.
func (m *MockClient) ReportPieceResult(arg0 context.Context, arg1 *scheduler.PeerTaskRequest, arg2 ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) { func (m *MockClient) ReportPieceResult(arg0 context.Context, arg1 *v1.PeerTaskRequest, arg2 ...grpc.CallOption) (v1.Scheduler_ReportPieceResultClient, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1} varargs := []interface{}{arg0, arg1}
for _, a := range arg2 { for _, a := range arg2 {
varargs = append(varargs, a) varargs = append(varargs, a)
} }
ret := m.ctrl.Call(m, "ReportPieceResult", varargs...) ret := m.ctrl.Call(m, "ReportPieceResult", varargs...)
ret0, _ := ret[0].(scheduler.Scheduler_ReportPieceResultClient) ret0, _ := ret[0].(v1.Scheduler_ReportPieceResultClient)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -163,14 +163,14 @@ func (mr *MockClientMockRecorder) ReportPieceResult(arg0, arg1 interface{}, arg2
} }
// StatTask mocks base method. // StatTask mocks base method.
func (m *MockClient) StatTask(arg0 context.Context, arg1 *scheduler.StatTaskRequest, arg2 ...grpc.CallOption) (*scheduler.Task, error) { func (m *MockClient) StatTask(arg0 context.Context, arg1 *v1.StatTaskRequest, arg2 ...grpc.CallOption) (*v1.Task, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1} varargs := []interface{}{arg0, arg1}
for _, a := range arg2 { for _, a := range arg2 {
varargs = append(varargs, a) varargs = append(varargs, a)
} }
ret := m.ctrl.Call(m, "StatTask", varargs...) ret := m.ctrl.Call(m, "StatTask", varargs...)
ret0, _ := ret[0].(*scheduler.Task) ret0, _ := ret[0].(*v1.Task)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }

View File

@ -1,647 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: scheduler/scheduler.pb.go
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
scheduler "d7y.io/dragonfly/v2/pkg/rpc/scheduler"
gomock "github.com/golang/mock/gomock"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
emptypb "google.golang.org/protobuf/types/known/emptypb"
)
// MockisRegisterResult_DirectPiece is a mock of isRegisterResult_DirectPiece interface.
type MockisRegisterResult_DirectPiece struct {
ctrl *gomock.Controller
recorder *MockisRegisterResult_DirectPieceMockRecorder
}
// MockisRegisterResult_DirectPieceMockRecorder is the mock recorder for MockisRegisterResult_DirectPiece.
type MockisRegisterResult_DirectPieceMockRecorder struct {
mock *MockisRegisterResult_DirectPiece
}
// NewMockisRegisterResult_DirectPiece creates a new mock instance.
func NewMockisRegisterResult_DirectPiece(ctrl *gomock.Controller) *MockisRegisterResult_DirectPiece {
mock := &MockisRegisterResult_DirectPiece{ctrl: ctrl}
mock.recorder = &MockisRegisterResult_DirectPieceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockisRegisterResult_DirectPiece) EXPECT() *MockisRegisterResult_DirectPieceMockRecorder {
return m.recorder
}
// isRegisterResult_DirectPiece mocks base method.
func (m *MockisRegisterResult_DirectPiece) isRegisterResult_DirectPiece() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "isRegisterResult_DirectPiece")
}
// isRegisterResult_DirectPiece indicates an expected call of isRegisterResult_DirectPiece.
func (mr *MockisRegisterResult_DirectPieceMockRecorder) isRegisterResult_DirectPiece() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isRegisterResult_DirectPiece", reflect.TypeOf((*MockisRegisterResult_DirectPiece)(nil).isRegisterResult_DirectPiece))
}
// MockisPeerPacket_ErrorDetail is a mock of isPeerPacket_ErrorDetail interface.
type MockisPeerPacket_ErrorDetail struct {
ctrl *gomock.Controller
recorder *MockisPeerPacket_ErrorDetailMockRecorder
}
// MockisPeerPacket_ErrorDetailMockRecorder is the mock recorder for MockisPeerPacket_ErrorDetail.
type MockisPeerPacket_ErrorDetailMockRecorder struct {
mock *MockisPeerPacket_ErrorDetail
}
// NewMockisPeerPacket_ErrorDetail creates a new mock instance.
func NewMockisPeerPacket_ErrorDetail(ctrl *gomock.Controller) *MockisPeerPacket_ErrorDetail {
mock := &MockisPeerPacket_ErrorDetail{ctrl: ctrl}
mock.recorder = &MockisPeerPacket_ErrorDetailMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockisPeerPacket_ErrorDetail) EXPECT() *MockisPeerPacket_ErrorDetailMockRecorder {
return m.recorder
}
// isPeerPacket_ErrorDetail mocks base method.
func (m *MockisPeerPacket_ErrorDetail) isPeerPacket_ErrorDetail() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "isPeerPacket_ErrorDetail")
}
// isPeerPacket_ErrorDetail indicates an expected call of isPeerPacket_ErrorDetail.
func (mr *MockisPeerPacket_ErrorDetailMockRecorder) isPeerPacket_ErrorDetail() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isPeerPacket_ErrorDetail", reflect.TypeOf((*MockisPeerPacket_ErrorDetail)(nil).isPeerPacket_ErrorDetail))
}
// MockisPeerResult_ErrorDetail is a mock of isPeerResult_ErrorDetail interface.
type MockisPeerResult_ErrorDetail struct {
ctrl *gomock.Controller
recorder *MockisPeerResult_ErrorDetailMockRecorder
}
// MockisPeerResult_ErrorDetailMockRecorder is the mock recorder for MockisPeerResult_ErrorDetail.
type MockisPeerResult_ErrorDetailMockRecorder struct {
mock *MockisPeerResult_ErrorDetail
}
// NewMockisPeerResult_ErrorDetail creates a new mock instance.
func NewMockisPeerResult_ErrorDetail(ctrl *gomock.Controller) *MockisPeerResult_ErrorDetail {
mock := &MockisPeerResult_ErrorDetail{ctrl: ctrl}
mock.recorder = &MockisPeerResult_ErrorDetailMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockisPeerResult_ErrorDetail) EXPECT() *MockisPeerResult_ErrorDetailMockRecorder {
return m.recorder
}
// isPeerResult_ErrorDetail mocks base method.
func (m *MockisPeerResult_ErrorDetail) isPeerResult_ErrorDetail() {
m.ctrl.T.Helper()
m.ctrl.Call(m, "isPeerResult_ErrorDetail")
}
// isPeerResult_ErrorDetail indicates an expected call of isPeerResult_ErrorDetail.
func (mr *MockisPeerResult_ErrorDetailMockRecorder) isPeerResult_ErrorDetail() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "isPeerResult_ErrorDetail", reflect.TypeOf((*MockisPeerResult_ErrorDetail)(nil).isPeerResult_ErrorDetail))
}
// MockSchedulerClient is a mock of SchedulerClient interface.
type MockSchedulerClient struct {
ctrl *gomock.Controller
recorder *MockSchedulerClientMockRecorder
}
// MockSchedulerClientMockRecorder is the mock recorder for MockSchedulerClient.
type MockSchedulerClientMockRecorder struct {
mock *MockSchedulerClient
}
// NewMockSchedulerClient creates a new mock instance.
func NewMockSchedulerClient(ctrl *gomock.Controller) *MockSchedulerClient {
mock := &MockSchedulerClient{ctrl: ctrl}
mock.recorder = &MockSchedulerClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSchedulerClient) EXPECT() *MockSchedulerClientMockRecorder {
return m.recorder
}
// AnnounceTask mocks base method.
func (m *MockSchedulerClient) AnnounceTask(ctx context.Context, in *scheduler.AnnounceTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "AnnounceTask", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AnnounceTask indicates an expected call of AnnounceTask.
func (mr *MockSchedulerClientMockRecorder) AnnounceTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceTask", reflect.TypeOf((*MockSchedulerClient)(nil).AnnounceTask), varargs...)
}
// LeaveTask mocks base method.
func (m *MockSchedulerClient) LeaveTask(ctx context.Context, in *scheduler.PeerTarget, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "LeaveTask", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LeaveTask indicates an expected call of LeaveTask.
func (mr *MockSchedulerClientMockRecorder) LeaveTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveTask", reflect.TypeOf((*MockSchedulerClient)(nil).LeaveTask), varargs...)
}
// RegisterPeerTask mocks base method.
func (m *MockSchedulerClient) RegisterPeerTask(ctx context.Context, in *scheduler.PeerTaskRequest, opts ...grpc.CallOption) (*scheduler.RegisterResult, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "RegisterPeerTask", varargs...)
ret0, _ := ret[0].(*scheduler.RegisterResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RegisterPeerTask indicates an expected call of RegisterPeerTask.
func (mr *MockSchedulerClientMockRecorder) RegisterPeerTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterPeerTask", reflect.TypeOf((*MockSchedulerClient)(nil).RegisterPeerTask), varargs...)
}
// ReportPeerResult mocks base method.
func (m *MockSchedulerClient) ReportPeerResult(ctx context.Context, in *scheduler.PeerResult, opts ...grpc.CallOption) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ReportPeerResult", varargs...)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReportPeerResult indicates an expected call of ReportPeerResult.
func (mr *MockSchedulerClientMockRecorder) ReportPeerResult(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeerResult", reflect.TypeOf((*MockSchedulerClient)(nil).ReportPeerResult), varargs...)
}
// ReportPieceResult mocks base method.
func (m *MockSchedulerClient) ReportPieceResult(ctx context.Context, opts ...grpc.CallOption) (scheduler.Scheduler_ReportPieceResultClient, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ReportPieceResult", varargs...)
ret0, _ := ret[0].(scheduler.Scheduler_ReportPieceResultClient)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReportPieceResult indicates an expected call of ReportPieceResult.
func (mr *MockSchedulerClientMockRecorder) ReportPieceResult(ctx interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPieceResult", reflect.TypeOf((*MockSchedulerClient)(nil).ReportPieceResult), varargs...)
}
// StatTask mocks base method.
func (m *MockSchedulerClient) StatTask(ctx context.Context, in *scheduler.StatTaskRequest, opts ...grpc.CallOption) (*scheduler.Task, error) {
m.ctrl.T.Helper()
varargs := []interface{}{ctx, in}
for _, a := range opts {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "StatTask", varargs...)
ret0, _ := ret[0].(*scheduler.Task)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StatTask indicates an expected call of StatTask.
func (mr *MockSchedulerClientMockRecorder) StatTask(ctx, in interface{}, opts ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{ctx, in}, opts...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockSchedulerClient)(nil).StatTask), varargs...)
}
// MockScheduler_ReportPieceResultClient is a mock of Scheduler_ReportPieceResultClient interface.
type MockScheduler_ReportPieceResultClient struct {
ctrl *gomock.Controller
recorder *MockScheduler_ReportPieceResultClientMockRecorder
}
// MockScheduler_ReportPieceResultClientMockRecorder is the mock recorder for MockScheduler_ReportPieceResultClient.
type MockScheduler_ReportPieceResultClientMockRecorder struct {
mock *MockScheduler_ReportPieceResultClient
}
// NewMockScheduler_ReportPieceResultClient creates a new mock instance.
func NewMockScheduler_ReportPieceResultClient(ctrl *gomock.Controller) *MockScheduler_ReportPieceResultClient {
mock := &MockScheduler_ReportPieceResultClient{ctrl: ctrl}
mock.recorder = &MockScheduler_ReportPieceResultClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockScheduler_ReportPieceResultClient) EXPECT() *MockScheduler_ReportPieceResultClientMockRecorder {
return m.recorder
}
// CloseSend mocks base method.
func (m *MockScheduler_ReportPieceResultClient) CloseSend() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CloseSend")
ret0, _ := ret[0].(error)
return ret0
}
// CloseSend indicates an expected call of CloseSend.
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) CloseSend() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseSend", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).CloseSend))
}
// Context mocks base method.
func (m *MockScheduler_ReportPieceResultClient) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Context))
}
// Header mocks base method.
func (m *MockScheduler_ReportPieceResultClient) Header() (metadata.MD, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Header")
ret0, _ := ret[0].(metadata.MD)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Header indicates an expected call of Header.
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Header() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Header", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Header))
}
// Recv mocks base method.
func (m *MockScheduler_ReportPieceResultClient) Recv() (*scheduler.PeerPacket, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*scheduler.PeerPacket)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockScheduler_ReportPieceResultClient) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockScheduler_ReportPieceResultClient) Send(arg0 *scheduler.PieceResult) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Send), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockScheduler_ReportPieceResultClient) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).SendMsg), m)
}
// Trailer mocks base method.
func (m *MockScheduler_ReportPieceResultClient) Trailer() metadata.MD {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Trailer")
ret0, _ := ret[0].(metadata.MD)
return ret0
}
// Trailer indicates an expected call of Trailer.
func (mr *MockScheduler_ReportPieceResultClientMockRecorder) Trailer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trailer", reflect.TypeOf((*MockScheduler_ReportPieceResultClient)(nil).Trailer))
}
// MockSchedulerServer is a mock of SchedulerServer interface.
type MockSchedulerServer struct {
ctrl *gomock.Controller
recorder *MockSchedulerServerMockRecorder
}
// MockSchedulerServerMockRecorder is the mock recorder for MockSchedulerServer.
type MockSchedulerServerMockRecorder struct {
mock *MockSchedulerServer
}
// NewMockSchedulerServer creates a new mock instance.
func NewMockSchedulerServer(ctrl *gomock.Controller) *MockSchedulerServer {
mock := &MockSchedulerServer{ctrl: ctrl}
mock.recorder = &MockSchedulerServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSchedulerServer) EXPECT() *MockSchedulerServerMockRecorder {
return m.recorder
}
// AnnounceTask mocks base method.
func (m *MockSchedulerServer) AnnounceTask(arg0 context.Context, arg1 *scheduler.AnnounceTaskRequest) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AnnounceTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AnnounceTask indicates an expected call of AnnounceTask.
func (mr *MockSchedulerServerMockRecorder) AnnounceTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AnnounceTask", reflect.TypeOf((*MockSchedulerServer)(nil).AnnounceTask), arg0, arg1)
}
// LeaveTask mocks base method.
func (m *MockSchedulerServer) LeaveTask(arg0 context.Context, arg1 *scheduler.PeerTarget) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LeaveTask", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// LeaveTask indicates an expected call of LeaveTask.
func (mr *MockSchedulerServerMockRecorder) LeaveTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveTask", reflect.TypeOf((*MockSchedulerServer)(nil).LeaveTask), arg0, arg1)
}
// RegisterPeerTask mocks base method.
func (m *MockSchedulerServer) RegisterPeerTask(arg0 context.Context, arg1 *scheduler.PeerTaskRequest) (*scheduler.RegisterResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RegisterPeerTask", arg0, arg1)
ret0, _ := ret[0].(*scheduler.RegisterResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RegisterPeerTask indicates an expected call of RegisterPeerTask.
func (mr *MockSchedulerServerMockRecorder) RegisterPeerTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterPeerTask", reflect.TypeOf((*MockSchedulerServer)(nil).RegisterPeerTask), arg0, arg1)
}
// ReportPeerResult mocks base method.
func (m *MockSchedulerServer) ReportPeerResult(arg0 context.Context, arg1 *scheduler.PeerResult) (*emptypb.Empty, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReportPeerResult", arg0, arg1)
ret0, _ := ret[0].(*emptypb.Empty)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReportPeerResult indicates an expected call of ReportPeerResult.
func (mr *MockSchedulerServerMockRecorder) ReportPeerResult(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPeerResult", reflect.TypeOf((*MockSchedulerServer)(nil).ReportPeerResult), arg0, arg1)
}
// ReportPieceResult mocks base method.
func (m *MockSchedulerServer) ReportPieceResult(arg0 scheduler.Scheduler_ReportPieceResultServer) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReportPieceResult", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// ReportPieceResult indicates an expected call of ReportPieceResult.
func (mr *MockSchedulerServerMockRecorder) ReportPieceResult(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReportPieceResult", reflect.TypeOf((*MockSchedulerServer)(nil).ReportPieceResult), arg0)
}
// StatTask mocks base method.
func (m *MockSchedulerServer) StatTask(arg0 context.Context, arg1 *scheduler.StatTaskRequest) (*scheduler.Task, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StatTask", arg0, arg1)
ret0, _ := ret[0].(*scheduler.Task)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StatTask indicates an expected call of StatTask.
func (mr *MockSchedulerServerMockRecorder) StatTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatTask", reflect.TypeOf((*MockSchedulerServer)(nil).StatTask), arg0, arg1)
}
// MockScheduler_ReportPieceResultServer is a mock of Scheduler_ReportPieceResultServer interface.
type MockScheduler_ReportPieceResultServer struct {
ctrl *gomock.Controller
recorder *MockScheduler_ReportPieceResultServerMockRecorder
}
// MockScheduler_ReportPieceResultServerMockRecorder is the mock recorder for MockScheduler_ReportPieceResultServer.
type MockScheduler_ReportPieceResultServerMockRecorder struct {
mock *MockScheduler_ReportPieceResultServer
}
// NewMockScheduler_ReportPieceResultServer creates a new mock instance.
func NewMockScheduler_ReportPieceResultServer(ctrl *gomock.Controller) *MockScheduler_ReportPieceResultServer {
mock := &MockScheduler_ReportPieceResultServer{ctrl: ctrl}
mock.recorder = &MockScheduler_ReportPieceResultServerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockScheduler_ReportPieceResultServer) EXPECT() *MockScheduler_ReportPieceResultServerMockRecorder {
return m.recorder
}
// Context mocks base method.
func (m *MockScheduler_ReportPieceResultServer) Context() context.Context {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Context")
ret0, _ := ret[0].(context.Context)
return ret0
}
// Context indicates an expected call of Context.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Context() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).Context))
}
// Recv mocks base method.
func (m *MockScheduler_ReportPieceResultServer) Recv() (*scheduler.PieceResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Recv")
ret0, _ := ret[0].(*scheduler.PieceResult)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Recv indicates an expected call of Recv.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Recv() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).Recv))
}
// RecvMsg mocks base method.
func (m_2 *MockScheduler_ReportPieceResultServer) RecvMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "RecvMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// RecvMsg indicates an expected call of RecvMsg.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) RecvMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).RecvMsg), m)
}
// Send mocks base method.
func (m *MockScheduler_ReportPieceResultServer) Send(arg0 *scheduler.PeerPacket) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Send", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Send indicates an expected call of Send.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) Send(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).Send), arg0)
}
// SendHeader mocks base method.
func (m *MockScheduler_ReportPieceResultServer) SendHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SendHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SendHeader indicates an expected call of SendHeader.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SendHeader), arg0)
}
// SendMsg mocks base method.
func (m_2 *MockScheduler_ReportPieceResultServer) SendMsg(m interface{}) error {
m_2.ctrl.T.Helper()
ret := m_2.ctrl.Call(m_2, "SendMsg", m)
ret0, _ := ret[0].(error)
return ret0
}
// SendMsg indicates an expected call of SendMsg.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SendMsg(m interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SendMsg), m)
}
// SetHeader mocks base method.
func (m *MockScheduler_ReportPieceResultServer) SetHeader(arg0 metadata.MD) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetHeader", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SetHeader indicates an expected call of SetHeader.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SetHeader), arg0)
}
// SetTrailer mocks base method.
func (m *MockScheduler_ReportPieceResultServer) SetTrailer(arg0 metadata.MD) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetTrailer", arg0)
}
// SetTrailer indicates an expected call of SetTrailer.
func (mr *MockScheduler_ReportPieceResultServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockScheduler_ReportPieceResultServer)(nil).SetTrailer), arg0)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,257 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
syntax = "proto3";
package scheduler;
import "pkg/rpc/base/base.proto";
import "pkg/rpc/errordetails/error_details.proto";
import "validate/validate.proto";
import "google/protobuf/empty.proto";
option go_package = "d7y.io/dragonfly/v2/pkg/rpc/scheduler";
// PeerTaskRequest represents request of RegisterPeerTask.
message PeerTaskRequest{
// Download url.
string url = 1 [(validate.rules).string.uri = true];
// URL meta info.
base.UrlMeta url_meta = 2 [(validate.rules).message.required = true];
// Peer id and it must be global uniqueness.
string peer_id = 3 [(validate.rules).string.min_len = 1];
// Peer host info.
PeerHost peer_host = 4;
// Peer host load.
base.HostLoad host_load = 5;
// Whether this request is caused by migration.
bool is_migrating = 6;
// Pattern includes p2p, seed-peer and source.
base.Pattern pattern = 7;
// Task id.
string task_id = 8;
}
// RegisterResult represents response of RegisterPeerTask.
message RegisterResult{
// Task type.
base.TaskType task_type = 1;
// Task id
string task_id = 2 [(validate.rules).string.min_len = 1];
// File size scope.
base.SizeScope size_scope = 3 [(validate.rules).enum.defined_only = true];
// Download the only piece directly for small or tiny file.
oneof direct_piece{
// Return single piece info when size scope is small.
SinglePiece single_piece = 4;
// Return task content when size scope is tiny.
bytes piece_content = 5;
}
// Task extend attribute,
// only direct_piece will carry extend attribute.
base.ExtendAttribute extend_attribute = 6;
}
// SinglePiece represents information of single piece.
message SinglePiece{
// Destination peer id.
string dst_pid = 1 [(validate.rules).string.min_len = 1];
// Destination download address.
string dst_addr = 2 [(validate.rules).string.min_len = 1];
// Piece info.
base.PieceInfo piece_info = 3;
}
// PeerHost represents information of peer host.
message PeerHost{
// Peer host id.
string id = 1 [(validate.rules).string.min_len = 1];
// peer host ip
string ip = 2 [(validate.rules).string.ip = true];
// Port of grpc service.
int32 rpc_port = 3 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Port of download server.
int32 down_port = 4 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Peer hostname.
string host_name = 5 [(validate.rules).string.hostname = true];
// Security domain for network.
string security_domain = 6;
// Location path(area|country|province|city|...).
string location = 7;
// IDC where the peer host is located
string idc = 8;
// Network topology(switch|router|...).
string net_topology = 9;
}
// PieceResult represents request of ReportPieceResult.
message PieceResult{
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Source peer id.
string src_pid = 2 [(validate.rules).string.min_len = 1];
// Destination peer id.
string dst_pid = 3;
// Piece info.
base.PieceInfo piece_info = 4;
// Begin time of the piece downloading.
uint64 begin_time = 5;
// End time of the piece downloading.
uint64 end_time = 6;
// Whether the piece downloading is successfully.
bool success = 7;
// Result code.
base.Code code = 8;
// Peer host load.
base.HostLoad host_load = 9;
// Finished count.
int32 finished_count = 10;
// Task extend attribute,
// only first success back source piece will carry extend attribute.
base.ExtendAttribute extend_attribute = 11;
}
// PeerPacket represents response of ReportPieceResult.
message PeerPacket{
message DestPeer{
// Destination ip.
string ip = 1 [(validate.rules).string.ip = true];
// Port of grpc service.
int32 rpc_port = 2 [(validate.rules).int32 = {gte: 1024, lt: 65535}];
// Destination peer id.
string peer_id = 3 [(validate.rules).string.min_len = 1];
}
// Task id.
string task_id = 2 [(validate.rules).string.min_len = 1];
// Source peer id.
string src_pid = 3 [(validate.rules).string.min_len = 1];
// Concurrent downloading count from main peer.
int32 parallel_count = 4 [(validate.rules).int32.gte = 1];
// Main peer.
DestPeer main_peer = 5;
// Candidate peers.
repeated DestPeer candidate_peers = 6;
// Result code.
base.Code code = 7;
// Error detail.
oneof error_detail{
// Source error.
errordetails.SourceError source_error = 8;
}
}
// PeerResult represents response of ReportPeerResult.
message PeerResult{
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 2 [(validate.rules).string.min_len = 1];
// Source host ip.
string src_ip = 3 [(validate.rules).string.ip = true];
// Security domain.
string security_domain = 4;
// IDC where the peer host is located
string idc = 5;
// Download url.
string url = 6 [(validate.rules).string.uri = true];
// Total content length.
int64 content_length = 7 [(validate.rules).int64.gte = -1];
// Total network traffic.
uint64 traffic = 8;
// Total cost time.
uint32 cost = 9;
// Whether peer downloading file is successfully.
bool success = 10;
// Result code.
base.Code code = 11;
// Task total piece count.
int32 total_piece_count = 12 [(validate.rules).int32.gte = -1];
// Error detail.
oneof error_detail{
// Source error.
errordetails.SourceError source_error = 13;
}
}
// PeerTarget represents request of LeaveTask.
message PeerTarget{
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Peer id.
string peer_id = 2 [(validate.rules).string.min_len = 1];
}
// StatTaskRequest represents request of StatTask.
message StatTaskRequest{
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
}
// Task represents download task.
message Task{
// Task id.
string id = 1 [(validate.rules).string.min_len = 1];
// Task type.
base.TaskType type = 2;
// Task content length.
int64 content_length = 3 [(validate.rules).int64.gte = 1];
// Task total piece count.
int32 total_piece_count = 4 [(validate.rules).int32.gte = 1];
// Task state.
string state = 5 [(validate.rules).string.min_len = 1];
// Task peer count.
int32 peer_count = 6 [(validate.rules).int32.gte = 0];
// Task contains available peer.
bool hasAvailablePeer = 7;
}
// AnnounceTaskRequest represents request of AnnounceTask.
message AnnounceTaskRequest{
// Task id.
string task_id = 1 [(validate.rules).string.min_len = 1];
// Download url.
string url = 2 [(validate.rules).string = {uri: true, ignore_empty: true}];
// URL meta info.
base.UrlMeta url_meta = 3 [(validate.rules).message.required = true];
// Peer host info.
PeerHost peer_host = 4;
// Task piece info.
base.PiecePacket piece_packet = 5 [(validate.rules).message.required = true];
// Task type.
base.TaskType task_type = 6;
}
// Scheduler RPC Service.
service Scheduler{
// RegisterPeerTask registers a peer into task.
rpc RegisterPeerTask(PeerTaskRequest)returns(RegisterResult);
// ReportPieceResult reports piece results and receives peer packets.
rpc ReportPieceResult(stream PieceResult)returns(stream PeerPacket);
// ReportPeerResult reports downloading result for the peer.
rpc ReportPeerResult(PeerResult)returns(google.protobuf.Empty);
// LeaveTask makes the peer leaving from task.
rpc LeaveTask(PeerTarget)returns(google.protobuf.Empty);
// Checks if any peer has the given task.
rpc StatTask(StatTaskRequest)returns(Task);
// A peer announces that it has the announced task to other peers.
rpc AnnounceTask(AnnounceTaskRequest) returns(google.protobuf.Empty);
}

Some files were not shown because too many files have changed in this diff Show More