feat: add AnnouncePeers to task in resource (#2051)
Signed-off-by: Gaius <gaius.qi@gmail.com>
This commit is contained in:
parent
7dc3c826f2
commit
e1dd1efca1
|
|
@ -153,9 +153,9 @@ type Peer struct {
|
||||||
// Cost is the cost of downloading.
|
// Cost is the cost of downloading.
|
||||||
Cost *atomic.Duration
|
Cost *atomic.Duration
|
||||||
|
|
||||||
// ReportPieceStream is the grpc stream of Scheduler_ReportPieceResultServer,
|
// ReportPieceResultStream is the grpc stream of Scheduler_ReportPieceResultServer,
|
||||||
// Used only in v1 version of the grpc.
|
// Used only in v1 version of the grpc.
|
||||||
ReportPieceStream *atomic.Value
|
ReportPieceResultStream *atomic.Value
|
||||||
|
|
||||||
// AnnouncePeerStream is the grpc stream of Scheduler_AnnouncePeerServer,
|
// AnnouncePeerStream is the grpc stream of Scheduler_AnnouncePeerServer,
|
||||||
// Used only in v2 version of the grpc.
|
// Used only in v2 version of the grpc.
|
||||||
|
|
@ -203,24 +203,24 @@ type Peer struct {
|
||||||
// New Peer instance.
|
// New Peer instance.
|
||||||
func NewPeer(id string, task *Task, host *Host, options ...PeerOption) *Peer {
|
func NewPeer(id string, task *Task, host *Host, options ...PeerOption) *Peer {
|
||||||
p := &Peer{
|
p := &Peer{
|
||||||
ID: id,
|
ID: id,
|
||||||
Tag: DefaultTag,
|
Tag: DefaultTag,
|
||||||
Application: DefaultApplication,
|
Application: DefaultApplication,
|
||||||
Pieces: set.NewSafeSet[*Piece](),
|
Pieces: set.NewSafeSet[*Piece](),
|
||||||
FinishedPieces: &bitset.BitSet{},
|
FinishedPieces: &bitset.BitSet{},
|
||||||
pieceCosts: []int64{},
|
pieceCosts: []int64{},
|
||||||
Cost: atomic.NewDuration(0),
|
Cost: atomic.NewDuration(0),
|
||||||
ReportPieceStream: &atomic.Value{},
|
ReportPieceResultStream: &atomic.Value{},
|
||||||
AnnouncePeerStream: &atomic.Value{},
|
AnnouncePeerStream: &atomic.Value{},
|
||||||
Task: task,
|
Task: task,
|
||||||
Host: host,
|
Host: host,
|
||||||
BlockParents: set.NewSafeSet[string](),
|
BlockParents: set.NewSafeSet[string](),
|
||||||
NeedBackToSource: atomic.NewBool(false),
|
NeedBackToSource: atomic.NewBool(false),
|
||||||
IsBackToSource: atomic.NewBool(false),
|
IsBackToSource: atomic.NewBool(false),
|
||||||
PieceUpdatedAt: atomic.NewTime(time.Now()),
|
PieceUpdatedAt: atomic.NewTime(time.Now()),
|
||||||
CreatedAt: atomic.NewTime(time.Now()),
|
CreatedAt: atomic.NewTime(time.Now()),
|
||||||
UpdatedAt: atomic.NewTime(time.Now()),
|
UpdatedAt: atomic.NewTime(time.Now()),
|
||||||
Log: logger.WithPeer(host.ID, task.ID, id),
|
Log: logger.WithPeer(host.ID, task.ID, id),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize state machine.
|
// Initialize state machine.
|
||||||
|
|
@ -334,10 +334,10 @@ func (p *Peer) PieceCosts() []int64 {
|
||||||
return p.pieceCosts
|
return p.pieceCosts
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadReportPieceStream return the grpc stream of Scheduler_ReportPieceResultServer,
|
// LoadReportPieceResultStream return the grpc stream of Scheduler_ReportPieceResultServer,
|
||||||
// Used only in v1 version of the grpc.
|
// Used only in v1 version of the grpc.
|
||||||
func (p *Peer) LoadReportPieceStream() (schedulerv1.Scheduler_ReportPieceResultServer, bool) {
|
func (p *Peer) LoadReportPieceResultStream() (schedulerv1.Scheduler_ReportPieceResultServer, bool) {
|
||||||
rawStream := p.ReportPieceStream.Load()
|
rawStream := p.ReportPieceResultStream.Load()
|
||||||
if rawStream == nil {
|
if rawStream == nil {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
@ -345,22 +345,22 @@ func (p *Peer) LoadReportPieceStream() (schedulerv1.Scheduler_ReportPieceResultS
|
||||||
return rawStream.(schedulerv1.Scheduler_ReportPieceResultServer), true
|
return rawStream.(schedulerv1.Scheduler_ReportPieceResultServer), true
|
||||||
}
|
}
|
||||||
|
|
||||||
// StoreReportPieceStream set the grpc stream of Scheduler_ReportPieceResultServer,
|
// StoreReportPieceResultStream set the grpc stream of Scheduler_ReportPieceResultServer,
|
||||||
// Used only in v1 version of the grpc.
|
// Used only in v1 version of the grpc.
|
||||||
func (p *Peer) StoreReportPieceStream(stream schedulerv1.Scheduler_ReportPieceResultServer) {
|
func (p *Peer) StoreReportPieceResultStream(stream schedulerv1.Scheduler_ReportPieceResultServer) {
|
||||||
p.ReportPieceStream.Store(stream)
|
p.ReportPieceResultStream.Store(stream)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteReportPieceStream deletes the grpc stream of Scheduler_ReportPieceResultServer,
|
// DeleteReportPieceResultStream deletes the grpc stream of Scheduler_ReportPieceResultServer,
|
||||||
// Used only in v1 version of the grpc.
|
// Used only in v1 version of the grpc.
|
||||||
func (p *Peer) DeleteReportPieceStream() {
|
func (p *Peer) DeleteReportPieceResultStream() {
|
||||||
p.ReportPieceStream = &atomic.Value{}
|
p.ReportPieceResultStream = &atomic.Value{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadAnnouncePeerStream return the grpc stream of Scheduler_AnnouncePeerServer,
|
// LoadAnnouncePeerStream return the grpc stream of Scheduler_AnnouncePeerServer,
|
||||||
// Used only in v2 version of the grpc.
|
// Used only in v2 version of the grpc.
|
||||||
func (p *Peer) LoadAnnouncePeerStream() (schedulerv2.Scheduler_AnnouncePeerServer, bool) {
|
func (p *Peer) LoadAnnouncePeerStream() (schedulerv2.Scheduler_AnnouncePeerServer, bool) {
|
||||||
rawStream := p.ReportPieceStream.Load()
|
rawStream := p.ReportPieceResultStream.Load()
|
||||||
if rawStream == nil {
|
if rawStream == nil {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
@ -371,13 +371,13 @@ func (p *Peer) LoadAnnouncePeerStream() (schedulerv2.Scheduler_AnnouncePeerServe
|
||||||
// StoreAnnouncePeerStream set the grpc stream of Scheduler_AnnouncePeerServer,
|
// StoreAnnouncePeerStream set the grpc stream of Scheduler_AnnouncePeerServer,
|
||||||
// Used only in v2 version of the grpc.
|
// Used only in v2 version of the grpc.
|
||||||
func (p *Peer) StoreAnnouncePeerStream(stream schedulerv2.Scheduler_AnnouncePeerServer) {
|
func (p *Peer) StoreAnnouncePeerStream(stream schedulerv2.Scheduler_AnnouncePeerServer) {
|
||||||
p.ReportPieceStream.Store(stream)
|
p.ReportPieceResultStream.Store(stream)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteAnnouncePeerStream deletes the grpc stream of Scheduler_AnnouncePeerServer,
|
// DeleteAnnouncePeerStream deletes the grpc stream of Scheduler_AnnouncePeerServer,
|
||||||
// Used only in v2 version of the grpc.
|
// Used only in v2 version of the grpc.
|
||||||
func (p *Peer) DeleteAnnouncePeerStream() {
|
func (p *Peer) DeleteAnnouncePeerStream() {
|
||||||
p.ReportPieceStream = &atomic.Value{}
|
p.ReportPieceResultStream = &atomic.Value{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parents returns parents of peer.
|
// Parents returns parents of peer.
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,7 @@ import (
|
||||||
commonv2 "d7y.io/api/pkg/apis/common/v2"
|
commonv2 "d7y.io/api/pkg/apis/common/v2"
|
||||||
managerv2 "d7y.io/api/pkg/apis/manager/v2"
|
managerv2 "d7y.io/api/pkg/apis/manager/v2"
|
||||||
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
|
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
|
||||||
"d7y.io/api/pkg/apis/scheduler/v1/mocks"
|
v1mocks "d7y.io/api/pkg/apis/scheduler/v1/mocks"
|
||||||
schedulerv2 "d7y.io/api/pkg/apis/scheduler/v2"
|
schedulerv2 "d7y.io/api/pkg/apis/scheduler/v2"
|
||||||
v2mocks "d7y.io/api/pkg/apis/scheduler/v2/mocks"
|
v2mocks "d7y.io/api/pkg/apis/scheduler/v2/mocks"
|
||||||
|
|
||||||
|
|
@ -64,7 +64,7 @@ func TestPeer_NewPeer(t *testing.T) {
|
||||||
assert.Equal(peer.Pieces.Len(), uint(0))
|
assert.Equal(peer.Pieces.Len(), uint(0))
|
||||||
assert.Empty(peer.FinishedPieces)
|
assert.Empty(peer.FinishedPieces)
|
||||||
assert.Equal(len(peer.PieceCosts()), 0)
|
assert.Equal(len(peer.PieceCosts()), 0)
|
||||||
assert.Empty(peer.ReportPieceStream)
|
assert.Empty(peer.ReportPieceResultStream)
|
||||||
assert.Empty(peer.AnnouncePeerStream)
|
assert.Empty(peer.AnnouncePeerStream)
|
||||||
assert.Equal(peer.FSM.Current(), PeerStatePending)
|
assert.Equal(peer.FSM.Current(), PeerStatePending)
|
||||||
assert.EqualValues(peer.Task, mockTask)
|
assert.EqualValues(peer.Task, mockTask)
|
||||||
|
|
@ -86,8 +86,7 @@ func TestPeer_NewPeer(t *testing.T) {
|
||||||
assert.Equal(peer.Pieces.Len(), uint(0))
|
assert.Equal(peer.Pieces.Len(), uint(0))
|
||||||
assert.Empty(peer.FinishedPieces)
|
assert.Empty(peer.FinishedPieces)
|
||||||
assert.Equal(len(peer.PieceCosts()), 0)
|
assert.Equal(len(peer.PieceCosts()), 0)
|
||||||
assert.Empty(peer.ReportPieceStream)
|
assert.Empty(peer.ReportPieceResultStream)
|
||||||
assert.Empty(peer.AnnouncePeerStream)
|
|
||||||
assert.Equal(peer.FSM.Current(), PeerStatePending)
|
assert.Equal(peer.FSM.Current(), PeerStatePending)
|
||||||
assert.EqualValues(peer.Task, mockTask)
|
assert.EqualValues(peer.Task, mockTask)
|
||||||
assert.EqualValues(peer.Host, mockHost)
|
assert.EqualValues(peer.Host, mockHost)
|
||||||
|
|
@ -183,7 +182,7 @@ func TestPeer_PieceCosts(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPeer_LoadReportPieceStream(t *testing.T) {
|
func TestPeer_LoadReportPieceResultStream(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
expect func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer)
|
expect func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer)
|
||||||
|
|
@ -192,8 +191,8 @@ func TestPeer_LoadReportPieceStream(t *testing.T) {
|
||||||
name: "load stream",
|
name: "load stream",
|
||||||
expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) {
|
expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
newStream, loaded := peer.LoadReportPieceStream()
|
newStream, loaded := peer.LoadReportPieceResultStream()
|
||||||
assert.Equal(loaded, true)
|
assert.Equal(loaded, true)
|
||||||
assert.EqualValues(newStream, stream)
|
assert.EqualValues(newStream, stream)
|
||||||
},
|
},
|
||||||
|
|
@ -202,7 +201,7 @@ func TestPeer_LoadReportPieceStream(t *testing.T) {
|
||||||
name: "stream does not exist",
|
name: "stream does not exist",
|
||||||
expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) {
|
expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
_, loaded := peer.LoadReportPieceStream()
|
_, loaded := peer.LoadReportPieceResultStream()
|
||||||
assert.Equal(loaded, false)
|
assert.Equal(loaded, false)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -212,7 +211,7 @@ func TestPeer_LoadReportPieceStream(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
ctl := gomock.NewController(t)
|
ctl := gomock.NewController(t)
|
||||||
defer ctl.Finish()
|
defer ctl.Finish()
|
||||||
stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
stream := v1mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
||||||
|
|
||||||
mockHost := NewHost(
|
mockHost := NewHost(
|
||||||
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
||||||
|
|
@ -224,7 +223,7 @@ func TestPeer_LoadReportPieceStream(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPeer_StoreReportPieceStream(t *testing.T) {
|
func TestPeer_StoreReportPieceResultStream(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
expect func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer)
|
expect func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer)
|
||||||
|
|
@ -233,8 +232,8 @@ func TestPeer_StoreReportPieceStream(t *testing.T) {
|
||||||
name: "store stream",
|
name: "store stream",
|
||||||
expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) {
|
expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
newStream, loaded := peer.LoadReportPieceStream()
|
newStream, loaded := peer.LoadReportPieceResultStream()
|
||||||
assert.Equal(loaded, true)
|
assert.Equal(loaded, true)
|
||||||
assert.EqualValues(newStream, stream)
|
assert.EqualValues(newStream, stream)
|
||||||
},
|
},
|
||||||
|
|
@ -245,7 +244,7 @@ func TestPeer_StoreReportPieceStream(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
ctl := gomock.NewController(t)
|
ctl := gomock.NewController(t)
|
||||||
defer ctl.Finish()
|
defer ctl.Finish()
|
||||||
stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
stream := v1mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
||||||
|
|
||||||
mockHost := NewHost(
|
mockHost := NewHost(
|
||||||
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
||||||
|
|
@ -257,7 +256,7 @@ func TestPeer_StoreReportPieceStream(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPeer_DeleteReportPieceStream(t *testing.T) {
|
func TestPeer_DeleteReportPieceResultStream(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
expect func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer)
|
expect func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer)
|
||||||
|
|
@ -266,9 +265,9 @@ func TestPeer_DeleteReportPieceStream(t *testing.T) {
|
||||||
name: "delete stream",
|
name: "delete stream",
|
||||||
expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) {
|
expect: func(t *testing.T, peer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
peer.DeleteReportPieceStream()
|
peer.DeleteReportPieceResultStream()
|
||||||
_, loaded := peer.LoadReportPieceStream()
|
_, loaded := peer.LoadReportPieceResultStream()
|
||||||
assert.Equal(loaded, false)
|
assert.Equal(loaded, false)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -278,7 +277,7 @@ func TestPeer_DeleteReportPieceStream(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
ctl := gomock.NewController(t)
|
ctl := gomock.NewController(t)
|
||||||
defer ctl.Finish()
|
defer ctl.Finish()
|
||||||
stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
stream := v1mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
||||||
|
|
||||||
mockHost := NewHost(
|
mockHost := NewHost(
|
||||||
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
||||||
|
|
@ -430,7 +429,7 @@ func TestPeer_Parents(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
ctl := gomock.NewController(t)
|
ctl := gomock.NewController(t)
|
||||||
defer ctl.Finish()
|
defer ctl.Finish()
|
||||||
stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
stream := v1mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
||||||
|
|
||||||
mockHost := NewHost(
|
mockHost := NewHost(
|
||||||
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
||||||
|
|
@ -476,7 +475,7 @@ func TestPeer_Children(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
ctl := gomock.NewController(t)
|
ctl := gomock.NewController(t)
|
||||||
defer ctl.Finish()
|
defer ctl.Finish()
|
||||||
stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
stream := v1mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
||||||
|
|
||||||
mockHost := NewHost(
|
mockHost := NewHost(
|
||||||
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@ import (
|
||||||
|
|
||||||
commonv1 "d7y.io/api/pkg/apis/common/v1"
|
commonv1 "d7y.io/api/pkg/apis/common/v1"
|
||||||
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
|
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
|
||||||
|
schedulerv2 "d7y.io/api/pkg/apis/scheduler/v2"
|
||||||
|
|
||||||
logger "d7y.io/dragonfly/v2/internal/dflog"
|
logger "d7y.io/dragonfly/v2/internal/dflog"
|
||||||
"d7y.io/dragonfly/v2/pkg/container/set"
|
"d7y.io/dragonfly/v2/pkg/container/set"
|
||||||
|
|
@ -439,8 +440,9 @@ func (t *Task) CanReuseDirectPiece() bool {
|
||||||
return len(t.DirectPiece) > 0 && int64(len(t.DirectPiece)) == t.ContentLength.Load()
|
return len(t.DirectPiece) > 0 && int64(len(t.DirectPiece)) == t.ContentLength.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NotifyPeers notify all peers in the task with the state code.
|
// ReportPieceResultToPeers reports all peers in the task with the state code.
|
||||||
func (t *Task) NotifyPeers(peerPacket *schedulerv1.PeerPacket, event string) {
|
// Used only in v1 version of the grpc.
|
||||||
|
func (t *Task) ReportPieceResultToPeers(peerPacket *schedulerv1.PeerPacket, event string) {
|
||||||
for _, vertex := range t.DAG.GetVertices() {
|
for _, vertex := range t.DAG.GetVertices() {
|
||||||
peer := vertex.Value
|
peer := vertex.Value
|
||||||
if peer == nil {
|
if peer == nil {
|
||||||
|
|
@ -448,7 +450,7 @@ func (t *Task) NotifyPeers(peerPacket *schedulerv1.PeerPacket, event string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if peer.FSM.Is(PeerStateRunning) {
|
if peer.FSM.Is(PeerStateRunning) {
|
||||||
stream, loaded := peer.LoadReportPieceStream()
|
stream, loaded := peer.LoadReportPieceResultStream()
|
||||||
if !loaded {
|
if !loaded {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -457,7 +459,36 @@ func (t *Task) NotifyPeers(peerPacket *schedulerv1.PeerPacket, event string) {
|
||||||
t.Log.Errorf("send packet to peer %s failed: %s", peer.ID, err.Error())
|
t.Log.Errorf("send packet to peer %s failed: %s", peer.ID, err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
t.Log.Infof("task notify peer %s code %s", peer.ID, peerPacket.Code)
|
t.Log.Infof("task reports peer %s code %s", peer.ID, peerPacket.Code)
|
||||||
|
|
||||||
|
if err := peer.FSM.Event(context.Background(), event); err != nil {
|
||||||
|
peer.Log.Errorf("peer fsm event failed: %s", err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnouncePeers announces all peers in the task with the state code.
|
||||||
|
// Used only in v2 version of the grpc.
|
||||||
|
func (t *Task) AnnouncePeers(resp *schedulerv2.AnnouncePeerResponse, event string) {
|
||||||
|
for _, vertex := range t.DAG.GetVertices() {
|
||||||
|
peer := vertex.Value
|
||||||
|
if peer == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if peer.FSM.Is(PeerStateRunning) {
|
||||||
|
stream, loaded := peer.LoadAnnouncePeerStream()
|
||||||
|
if !loaded {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := stream.Send(resp); err != nil {
|
||||||
|
t.Log.Errorf("send response to peer %s failed: %s", peer.ID, err.Error())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Log.Infof("task announces peer %s response %#v", peer.ID, resp.Response)
|
||||||
|
|
||||||
if err := peer.FSM.Event(context.Background(), event); err != nil {
|
if err := peer.FSM.Event(context.Background(), event); err != nil {
|
||||||
peer.Log.Errorf("peer fsm event failed: %s", err.Error())
|
peer.Log.Errorf("peer fsm event failed: %s", err.Error())
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,9 @@ import (
|
||||||
|
|
||||||
commonv1 "d7y.io/api/pkg/apis/common/v1"
|
commonv1 "d7y.io/api/pkg/apis/common/v1"
|
||||||
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
|
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
|
||||||
"d7y.io/api/pkg/apis/scheduler/v1/mocks"
|
v1mocks "d7y.io/api/pkg/apis/scheduler/v1/mocks"
|
||||||
|
schedulerv2 "d7y.io/api/pkg/apis/scheduler/v2"
|
||||||
|
v2mocks "d7y.io/api/pkg/apis/scheduler/v2/mocks"
|
||||||
|
|
||||||
"d7y.io/dragonfly/v2/pkg/container/set"
|
"d7y.io/dragonfly/v2/pkg/container/set"
|
||||||
"d7y.io/dragonfly/v2/pkg/idgen"
|
"d7y.io/dragonfly/v2/pkg/idgen"
|
||||||
|
|
@ -1584,16 +1586,16 @@ func TestTask_CanReuseDirectPiece(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTask_NotifyPeers(t *testing.T) {
|
func TestTask_ReportPieceResultToPeers(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
run func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder)
|
run func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *v1mocks.MockScheduler_ReportPieceResultServerMockRecorder)
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "peer state is PeerStatePending",
|
name: "peer state is PeerStatePending",
|
||||||
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) {
|
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *v1mocks.MockScheduler_ReportPieceResultServerMockRecorder) {
|
||||||
mockPeer.FSM.SetState(PeerStatePending)
|
mockPeer.FSM.SetState(PeerStatePending)
|
||||||
task.NotifyPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed)
|
task.ReportPieceResultToPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed)
|
||||||
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.True(mockPeer.FSM.Is(PeerStatePending))
|
assert.True(mockPeer.FSM.Is(PeerStatePending))
|
||||||
|
|
@ -1601,9 +1603,9 @@ func TestTask_NotifyPeers(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "peer state is PeerStateRunning and stream is empty",
|
name: "peer state is PeerStateRunning and stream is empty",
|
||||||
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) {
|
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *v1mocks.MockScheduler_ReportPieceResultServerMockRecorder) {
|
||||||
mockPeer.FSM.SetState(PeerStateRunning)
|
mockPeer.FSM.SetState(PeerStateRunning)
|
||||||
task.NotifyPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed)
|
task.ReportPieceResultToPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed)
|
||||||
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.True(mockPeer.FSM.Is(PeerStateRunning))
|
assert.True(mockPeer.FSM.Is(PeerStateRunning))
|
||||||
|
|
@ -1611,12 +1613,12 @@ func TestTask_NotifyPeers(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "peer state is PeerStateRunning and stream sending failed",
|
name: "peer state is PeerStateRunning and stream sending failed",
|
||||||
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) {
|
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *v1mocks.MockScheduler_ReportPieceResultServerMockRecorder) {
|
||||||
mockPeer.FSM.SetState(PeerStateRunning)
|
mockPeer.FSM.SetState(PeerStateRunning)
|
||||||
mockPeer.StoreReportPieceStream(stream)
|
mockPeer.StoreReportPieceResultStream(stream)
|
||||||
ms.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError})).Return(errors.New("foo")).Times(1)
|
ms.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError})).Return(errors.New("foo")).Times(1)
|
||||||
|
|
||||||
task.NotifyPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed)
|
task.ReportPieceResultToPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed)
|
||||||
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.True(mockPeer.FSM.Is(PeerStateRunning))
|
assert.True(mockPeer.FSM.Is(PeerStateRunning))
|
||||||
|
|
@ -1624,25 +1626,25 @@ func TestTask_NotifyPeers(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "peer state is PeerStateRunning and state changing failed",
|
name: "peer state is PeerStateRunning and state changing failed",
|
||||||
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) {
|
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *v1mocks.MockScheduler_ReportPieceResultServerMockRecorder) {
|
||||||
mockPeer.FSM.SetState(PeerStateRunning)
|
mockPeer.FSM.SetState(PeerStateRunning)
|
||||||
mockPeer.StoreReportPieceStream(stream)
|
mockPeer.StoreReportPieceResultStream(stream)
|
||||||
ms.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError})).Return(errors.New("foo")).Times(1)
|
ms.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError})).Return(errors.New("foo")).Times(1)
|
||||||
|
|
||||||
task.NotifyPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed)
|
task.ReportPieceResultToPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed)
|
||||||
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.True(mockPeer.FSM.Is(PeerStateRunning))
|
assert.True(mockPeer.FSM.Is(PeerStateRunning))
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "peer state is PeerStateRunning and notify peer successfully",
|
name: "peer state is PeerStateRunning and report peer successfully",
|
||||||
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *mocks.MockScheduler_ReportPieceResultServerMockRecorder) {
|
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv1.Scheduler_ReportPieceResultServer, ms *v1mocks.MockScheduler_ReportPieceResultServerMockRecorder) {
|
||||||
mockPeer.FSM.SetState(PeerStateRunning)
|
mockPeer.FSM.SetState(PeerStateRunning)
|
||||||
mockPeer.StoreReportPieceStream(stream)
|
mockPeer.StoreReportPieceResultStream(stream)
|
||||||
ms.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError})).Return(nil).Times(1)
|
ms.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError})).Return(nil).Times(1)
|
||||||
|
|
||||||
task.NotifyPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed)
|
task.ReportPieceResultToPeers(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedTaskStatusError}, PeerEventDownloadFailed)
|
||||||
|
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.True(mockPeer.FSM.Is(PeerStateFailed))
|
assert.True(mockPeer.FSM.Is(PeerStateFailed))
|
||||||
|
|
@ -1654,7 +1656,90 @@ func TestTask_NotifyPeers(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
ctl := gomock.NewController(t)
|
ctl := gomock.NewController(t)
|
||||||
defer ctl.Finish()
|
defer ctl.Finish()
|
||||||
stream := mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
stream := v1mocks.NewMockScheduler_ReportPieceResultServer(ctl)
|
||||||
|
|
||||||
|
mockHost := NewHost(
|
||||||
|
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
||||||
|
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
|
||||||
|
task := NewTask(mockTaskID, mockTaskURL, commonv1.TaskType_Normal, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
|
||||||
|
mockPeer := NewPeer(mockPeerID, task, mockHost)
|
||||||
|
task.StorePeer(mockPeer)
|
||||||
|
tc.run(t, task, mockPeer, stream, stream.EXPECT())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTask_AnnouncePeers(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
run func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv2.Scheduler_AnnouncePeerServer, ms *v2mocks.MockScheduler_AnnouncePeerServerMockRecorder)
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "peer state is PeerStatePending",
|
||||||
|
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv2.Scheduler_AnnouncePeerServer, ms *v2mocks.MockScheduler_AnnouncePeerServerMockRecorder) {
|
||||||
|
mockPeer.FSM.SetState(PeerStatePending)
|
||||||
|
task.AnnouncePeers(&schedulerv2.AnnouncePeerResponse{}, PeerEventDownloadFailed)
|
||||||
|
|
||||||
|
assert := assert.New(t)
|
||||||
|
assert.True(mockPeer.FSM.Is(PeerStatePending))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "peer state is PeerStateRunning and stream is empty",
|
||||||
|
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv2.Scheduler_AnnouncePeerServer, ms *v2mocks.MockScheduler_AnnouncePeerServerMockRecorder) {
|
||||||
|
mockPeer.FSM.SetState(PeerStateRunning)
|
||||||
|
task.AnnouncePeers(&schedulerv2.AnnouncePeerResponse{}, PeerEventDownloadFailed)
|
||||||
|
|
||||||
|
assert := assert.New(t)
|
||||||
|
assert.True(mockPeer.FSM.Is(PeerStateRunning))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "peer state is PeerStateRunning and stream sending failed",
|
||||||
|
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv2.Scheduler_AnnouncePeerServer, ms *v2mocks.MockScheduler_AnnouncePeerServerMockRecorder) {
|
||||||
|
mockPeer.FSM.SetState(PeerStateRunning)
|
||||||
|
mockPeer.StoreAnnouncePeerStream(stream)
|
||||||
|
ms.Send(gomock.Eq(&schedulerv2.AnnouncePeerResponse{})).Return(errors.New("foo")).Times(1)
|
||||||
|
|
||||||
|
task.AnnouncePeers(&schedulerv2.AnnouncePeerResponse{}, PeerEventDownloadFailed)
|
||||||
|
|
||||||
|
assert := assert.New(t)
|
||||||
|
assert.True(mockPeer.FSM.Is(PeerStateRunning))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "peer state is PeerStateRunning and state changing failed",
|
||||||
|
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv2.Scheduler_AnnouncePeerServer, ms *v2mocks.MockScheduler_AnnouncePeerServerMockRecorder) {
|
||||||
|
mockPeer.FSM.SetState(PeerStateRunning)
|
||||||
|
mockPeer.StoreAnnouncePeerStream(stream)
|
||||||
|
ms.Send(gomock.Eq(&schedulerv2.AnnouncePeerResponse{})).Return(errors.New("foo")).Times(1)
|
||||||
|
|
||||||
|
task.AnnouncePeers(&schedulerv2.AnnouncePeerResponse{}, PeerEventDownloadFailed)
|
||||||
|
|
||||||
|
assert := assert.New(t)
|
||||||
|
assert.True(mockPeer.FSM.Is(PeerStateRunning))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "peer state is PeerStateRunning and announce peer successfully",
|
||||||
|
run: func(t *testing.T, task *Task, mockPeer *Peer, stream schedulerv2.Scheduler_AnnouncePeerServer, ms *v2mocks.MockScheduler_AnnouncePeerServerMockRecorder) {
|
||||||
|
mockPeer.FSM.SetState(PeerStateRunning)
|
||||||
|
mockPeer.StoreAnnouncePeerStream(stream)
|
||||||
|
ms.Send(gomock.Eq(&schedulerv2.AnnouncePeerResponse{})).Return(nil).Times(1)
|
||||||
|
|
||||||
|
task.AnnouncePeers(&schedulerv2.AnnouncePeerResponse{}, PeerEventDownloadFailed)
|
||||||
|
|
||||||
|
assert := assert.New(t)
|
||||||
|
assert.True(mockPeer.FSM.Is(PeerStateFailed))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
ctl := gomock.NewController(t)
|
||||||
|
defer ctl.Finish()
|
||||||
|
stream := v2mocks.NewMockScheduler_AnnouncePeerServer(ctl)
|
||||||
|
|
||||||
mockHost := NewHost(
|
mockHost := NewHost(
|
||||||
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
|
||||||
|
|
|
||||||
|
|
@ -80,7 +80,7 @@ func (s *scheduler) ScheduleParent(ctx context.Context, peer *resource.Peer, blo
|
||||||
peer.Log.Infof("peer needs to back-to-source: %t", needBackToSource)
|
peer.Log.Infof("peer needs to back-to-source: %t", needBackToSource)
|
||||||
if (n >= s.config.RetryBackToSourceLimit || needBackToSource) &&
|
if (n >= s.config.RetryBackToSourceLimit || needBackToSource) &&
|
||||||
peer.Task.CanBackToSource() {
|
peer.Task.CanBackToSource() {
|
||||||
stream, loaded := peer.LoadReportPieceStream()
|
stream, loaded := peer.LoadReportPieceResultStream()
|
||||||
if !loaded {
|
if !loaded {
|
||||||
peer.Log.Error("load stream failed")
|
peer.Log.Error("load stream failed")
|
||||||
return
|
return
|
||||||
|
|
@ -112,7 +112,7 @@ func (s *scheduler) ScheduleParent(ctx context.Context, peer *resource.Peer, blo
|
||||||
|
|
||||||
// Handle peer schedule failed.
|
// Handle peer schedule failed.
|
||||||
if n >= s.config.RetryLimit {
|
if n >= s.config.RetryLimit {
|
||||||
stream, loaded := peer.LoadReportPieceStream()
|
stream, loaded := peer.LoadReportPieceResultStream()
|
||||||
if !loaded {
|
if !loaded {
|
||||||
peer.Log.Error("load stream failed")
|
peer.Log.Error("load stream failed")
|
||||||
return
|
return
|
||||||
|
|
@ -193,7 +193,7 @@ func (s *scheduler) NotifyAndFindParent(ctx context.Context, peer *resource.Peer
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send scheduling success message.
|
// Send scheduling success message.
|
||||||
stream, loaded := peer.LoadReportPieceStream()
|
stream, loaded := peer.LoadReportPieceResultStream()
|
||||||
if !loaded {
|
if !loaded {
|
||||||
peer.Log.Error("load peer stream failed")
|
peer.Log.Error("load peer stream failed")
|
||||||
return []*resource.Peer{}, false
|
return []*resource.Peer{}, false
|
||||||
|
|
|
||||||
|
|
@ -233,7 +233,7 @@ func TestScheduler_ScheduleParent(t *testing.T) {
|
||||||
task.StorePeer(peer)
|
task.StorePeer(peer)
|
||||||
peer.NeedBackToSource.Store(true)
|
peer.NeedBackToSource.Store(true)
|
||||||
peer.FSM.SetState(resource.PeerStateRunning)
|
peer.FSM.SetState(resource.PeerStateRunning)
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
|
|
||||||
mr.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedNeedBackSource})).Return(errors.New("foo")).Times(1)
|
mr.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedNeedBackSource})).Return(errors.New("foo")).Times(1)
|
||||||
},
|
},
|
||||||
|
|
@ -250,7 +250,7 @@ func TestScheduler_ScheduleParent(t *testing.T) {
|
||||||
task.StorePeer(peer)
|
task.StorePeer(peer)
|
||||||
peer.NeedBackToSource.Store(true)
|
peer.NeedBackToSource.Store(true)
|
||||||
peer.FSM.SetState(resource.PeerStateRunning)
|
peer.FSM.SetState(resource.PeerStateRunning)
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
|
|
||||||
mr.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedNeedBackSource})).Return(nil).Times(1)
|
mr.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedNeedBackSource})).Return(nil).Times(1)
|
||||||
},
|
},
|
||||||
|
|
@ -269,7 +269,7 @@ func TestScheduler_ScheduleParent(t *testing.T) {
|
||||||
peer.NeedBackToSource.Store(true)
|
peer.NeedBackToSource.Store(true)
|
||||||
peer.FSM.SetState(resource.PeerStateRunning)
|
peer.FSM.SetState(resource.PeerStateRunning)
|
||||||
task.FSM.SetState(resource.TaskStateFailed)
|
task.FSM.SetState(resource.TaskStateFailed)
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
|
|
||||||
mr.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedNeedBackSource})).Return(nil).Times(1)
|
mr.Send(gomock.Eq(&schedulerv1.PeerPacket{Code: commonv1.Code_SchedNeedBackSource})).Return(nil).Times(1)
|
||||||
},
|
},
|
||||||
|
|
@ -315,7 +315,7 @@ func TestScheduler_ScheduleParent(t *testing.T) {
|
||||||
task.StorePeer(peer)
|
task.StorePeer(peer)
|
||||||
peer.FSM.SetState(resource.PeerStateRunning)
|
peer.FSM.SetState(resource.PeerStateRunning)
|
||||||
peer.Task.BackToSourceLimit.Store(-1)
|
peer.Task.BackToSourceLimit.Store(-1)
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(2),
|
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(2),
|
||||||
|
|
@ -335,7 +335,7 @@ func TestScheduler_ScheduleParent(t *testing.T) {
|
||||||
task.StorePeer(peer)
|
task.StorePeer(peer)
|
||||||
peer.FSM.SetState(resource.PeerStateRunning)
|
peer.FSM.SetState(resource.PeerStateRunning)
|
||||||
peer.Task.BackToSourceLimit.Store(-1)
|
peer.Task.BackToSourceLimit.Store(-1)
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
|
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(2),
|
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(2),
|
||||||
|
|
@ -356,7 +356,7 @@ func TestScheduler_ScheduleParent(t *testing.T) {
|
||||||
task.StorePeer(seedPeer)
|
task.StorePeer(seedPeer)
|
||||||
peer.FSM.SetState(resource.PeerStateRunning)
|
peer.FSM.SetState(resource.PeerStateRunning)
|
||||||
seedPeer.FSM.SetState(resource.PeerStateRunning)
|
seedPeer.FSM.SetState(resource.PeerStateRunning)
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1),
|
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1),
|
||||||
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{
|
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{
|
||||||
|
|
@ -605,7 +605,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) {
|
||||||
peer.Task.StorePeer(peer)
|
peer.Task.StorePeer(peer)
|
||||||
peer.Task.StorePeer(mockPeer)
|
peer.Task.StorePeer(mockPeer)
|
||||||
mockPeer.FinishedPieces.Set(0)
|
mockPeer.FinishedPieces.Set(0)
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1),
|
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1),
|
||||||
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{
|
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{
|
||||||
|
|
@ -636,7 +636,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) {
|
||||||
mockPeer.IsBackToSource.Store(true)
|
mockPeer.IsBackToSource.Store(true)
|
||||||
candidatePeer.IsBackToSource.Store(true)
|
candidatePeer.IsBackToSource.Store(true)
|
||||||
mockPeer.FinishedPieces.Set(0)
|
mockPeer.FinishedPieces.Set(0)
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
gomock.InOrder(
|
gomock.InOrder(
|
||||||
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1),
|
md.GetSchedulerClusterConfig().Return(types.SchedulerClusterConfig{}, errors.New("foo")).Times(1),
|
||||||
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{
|
md.GetSchedulerClusterClientConfig().Return(types.SchedulerClusterClientConfig{
|
||||||
|
|
|
||||||
|
|
@ -208,8 +208,8 @@ func (v *V1) ReportPieceResult(stream schedulerv1.Scheduler_ReportPieceResultSer
|
||||||
}
|
}
|
||||||
|
|
||||||
// Peer setting stream.
|
// Peer setting stream.
|
||||||
peer.StoreReportPieceStream(stream)
|
peer.StoreReportPieceResultStream(stream)
|
||||||
defer peer.DeleteReportPieceStream()
|
defer peer.DeleteReportPieceResultStream()
|
||||||
}
|
}
|
||||||
|
|
||||||
if piece.PieceInfo != nil {
|
if piece.PieceInfo != nil {
|
||||||
|
|
@ -1020,7 +1020,7 @@ func (v *V1) handlePieceFailure(ctx context.Context, peer *resource.Peer, piece
|
||||||
|
|
||||||
// Returns an scheduling error if the peer
|
// Returns an scheduling error if the peer
|
||||||
// state is not PeerStateRunning.
|
// state is not PeerStateRunning.
|
||||||
stream, loaded := peer.LoadReportPieceStream()
|
stream, loaded := peer.LoadReportPieceResultStream()
|
||||||
if !loaded {
|
if !loaded {
|
||||||
peer.Log.Error("load stream failed")
|
peer.Log.Error("load stream failed")
|
||||||
return
|
return
|
||||||
|
|
@ -1131,7 +1131,7 @@ func (v *V1) handleTaskFailure(ctx context.Context, task *resource.Task, backToS
|
||||||
// and return the source metadata to peer.
|
// and return the source metadata to peer.
|
||||||
if backToSourceErr != nil {
|
if backToSourceErr != nil {
|
||||||
if !backToSourceErr.Temporary {
|
if !backToSourceErr.Temporary {
|
||||||
task.NotifyPeers(&schedulerv1.PeerPacket{
|
task.ReportPieceResultToPeers(&schedulerv1.PeerPacket{
|
||||||
Code: commonv1.Code_BackToSourceAborted,
|
Code: commonv1.Code_BackToSourceAborted,
|
||||||
Errordetails: &schedulerv1.PeerPacket_SourceError{
|
Errordetails: &schedulerv1.PeerPacket_SourceError{
|
||||||
SourceError: backToSourceErr,
|
SourceError: backToSourceErr,
|
||||||
|
|
@ -1162,7 +1162,7 @@ func (v *V1) handleTaskFailure(ctx context.Context, task *resource.Task, backToS
|
||||||
task.URLMeta.Tag, task.URLMeta.Application, proto, "0").Inc()
|
task.URLMeta.Tag, task.URLMeta.Application, proto, "0").Inc()
|
||||||
}
|
}
|
||||||
if !d.Temporary {
|
if !d.Temporary {
|
||||||
task.NotifyPeers(&schedulerv1.PeerPacket{
|
task.ReportPieceResultToPeers(&schedulerv1.PeerPacket{
|
||||||
Code: commonv1.Code_BackToSourceAborted,
|
Code: commonv1.Code_BackToSourceAborted,
|
||||||
Errordetails: &schedulerv1.PeerPacket_SourceError{
|
Errordetails: &schedulerv1.PeerPacket_SourceError{
|
||||||
SourceError: d,
|
SourceError: d,
|
||||||
|
|
@ -1176,7 +1176,7 @@ func (v *V1) handleTaskFailure(ctx context.Context, task *resource.Task, backToS
|
||||||
} else if task.PeerFailedCount.Load() > resource.FailedPeerCountLimit {
|
} else if task.PeerFailedCount.Load() > resource.FailedPeerCountLimit {
|
||||||
// If the number of failed peers in the task is greater than FailedPeerCountLimit,
|
// If the number of failed peers in the task is greater than FailedPeerCountLimit,
|
||||||
// then scheduler notifies running peers of failure.
|
// then scheduler notifies running peers of failure.
|
||||||
task.NotifyPeers(&schedulerv1.PeerPacket{
|
task.ReportPieceResultToPeers(&schedulerv1.PeerPacket{
|
||||||
Code: commonv1.Code_SchedTaskStatusError,
|
Code: commonv1.Code_SchedTaskStatusError,
|
||||||
}, resource.PeerEventDownloadFailed)
|
}, resource.PeerEventDownloadFailed)
|
||||||
task.PeerFailedCount.Store(0)
|
task.PeerFailedCount.Store(0)
|
||||||
|
|
|
||||||
|
|
@ -1011,7 +1011,7 @@ func TestService_ReportPieceResult(t *testing.T) {
|
||||||
expect: func(t *testing.T, peer *resource.Peer, err error) {
|
expect: func(t *testing.T, peer *resource.Peer, err error) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
_, loaded := peer.LoadReportPieceStream()
|
_, loaded := peer.LoadReportPieceResultStream()
|
||||||
assert.False(loaded)
|
assert.False(loaded)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -1039,7 +1039,7 @@ func TestService_ReportPieceResult(t *testing.T) {
|
||||||
expect: func(t *testing.T, peer *resource.Peer, err error) {
|
expect: func(t *testing.T, peer *resource.Peer, err error) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
_, loaded := peer.LoadReportPieceStream()
|
_, loaded := peer.LoadReportPieceResultStream()
|
||||||
assert.False(loaded)
|
assert.False(loaded)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -1068,7 +1068,7 @@ func TestService_ReportPieceResult(t *testing.T) {
|
||||||
expect: func(t *testing.T, peer *resource.Peer, err error) {
|
expect: func(t *testing.T, peer *resource.Peer, err error) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
_, loaded := peer.LoadReportPieceStream()
|
_, loaded := peer.LoadReportPieceResultStream()
|
||||||
assert.False(loaded)
|
assert.False(loaded)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -1094,7 +1094,7 @@ func TestService_ReportPieceResult(t *testing.T) {
|
||||||
expect: func(t *testing.T, peer *resource.Peer, err error) {
|
expect: func(t *testing.T, peer *resource.Peer, err error) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
_, loaded := peer.LoadReportPieceStream()
|
_, loaded := peer.LoadReportPieceResultStream()
|
||||||
assert.False(loaded)
|
assert.False(loaded)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
@ -1121,7 +1121,7 @@ func TestService_ReportPieceResult(t *testing.T) {
|
||||||
expect: func(t *testing.T, peer *resource.Peer, err error) {
|
expect: func(t *testing.T, peer *resource.Peer, err error) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
_, loaded := peer.LoadReportPieceStream()
|
_, loaded := peer.LoadReportPieceResultStream()
|
||||||
assert.False(loaded)
|
assert.False(loaded)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue