refactor: resource task with v2 version of grpc (#2078)

Signed-off-by: Gaius <gaius.qi@gmail.com>
This commit is contained in:
Gaius 2023-02-14 20:01:24 +08:00
parent 79024c87ca
commit a5685582ce
No known key found for this signature in database
GPG Key ID: 8B4E5D1290FA2FFB
43 changed files with 1179 additions and 943 deletions

View File

@ -2,11 +2,11 @@ name: CodeQL Analysis
on:
push:
branches: [ main, release-* ]
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.svg', '**/docs/**' ]
branches: [main, release-*]
paths-ignore: ['**.md', '**.png', '**.jpg', '**.svg', '**/docs/**']
pull_request:
branches: [ main, release-* ]
paths-ignore: [ '**.md', '**.png', '**.jpg', '**.svg', '**/docs/**' ]
branches: [main, release-*]
paths-ignore: ['**.md', '**.png', '**.jpg', '**.svg', '**/docs/**']
schedule:
- cron: '0 4 * * *'
@ -18,7 +18,7 @@ jobs:
strategy:
fail-fast: false
matrix:
language: [ go ]
language: [go]
steps:
- name: Checkout repository

View File

@ -2,9 +2,9 @@ name: Lint
on:
push:
branches: [ main, release-* ]
branches: [main, release-*]
pull_request:
branches: [ main, release-* ]
branches: [main, release-*]
env:
GO_VERSION: 1.19

View File

@ -132,8 +132,8 @@ metrics:
enable: false
# Metrics service address.
addr: ':8000'
# Enable peer host metrics.
enablePeerHost: false
# Enable host metrics.
enableHost: false
security:
# autoIssueCert indicates to issue client certificates for all grpc call.

14
go.mod
View File

@ -3,7 +3,7 @@ module d7y.io/dragonfly/v2
go 1.19
require (
d7y.io/api v1.4.9
d7y.io/api v1.5.1
github.com/RichardKnop/machinery v1.10.6
github.com/Showmax/go-fqdn v1.0.0
github.com/VividCortex/mysqlerr v1.0.0
@ -72,12 +72,12 @@ require (
go.uber.org/zap v1.24.0
golang.org/x/crypto v0.5.0
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783
golang.org/x/oauth2 v0.4.0
golang.org/x/sync v0.1.0
golang.org/x/sys v0.4.0
golang.org/x/time v0.3.0
google.golang.org/api v0.109.0
google.golang.org/grpc v1.52.3
google.golang.org/grpc v1.53.0
google.golang.org/protobuf v1.28.1
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v3 v3.0.1
@ -92,8 +92,8 @@ require (
)
require (
cloud.google.com/go v0.105.0 // indirect
cloud.google.com/go/compute v1.14.0 // indirect
cloud.google.com/go v0.107.0 // indirect
cloud.google.com/go/compute v1.15.1 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v0.8.0 // indirect
cloud.google.com/go/pubsub v1.27.1 // indirect
@ -103,7 +103,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/bradfitz/gomemcache v0.0.0-20220106215444-fb4bf637b56d // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
@ -205,7 +205,7 @@ require (
golang.org/x/text v0.6.0 // indirect
golang.org/x/tools v0.5.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gorm.io/driver/sqlite v1.4.3 // indirect

27
go.sum
View File

@ -20,16 +20,16 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go v0.76.0/go.mod h1:r9EvIAvLrunusnetGdQ50M/gKui1x3zdGW/VELGkdpw=
cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww=
cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0=
cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE=
cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
@ -51,8 +51,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
d7y.io/api v1.4.9 h1:xH2cU0r1q2h5vS+soo322v6Xt6uH2S2Cz0/NvrS6Kds=
d7y.io/api v1.4.9/go.mod h1:pW4EMreBbsMKIRK+aWRoFLylaaODOVB395hTEApnpxQ=
d7y.io/api v1.5.1 h1:OMSQrd/dhcbA5FszKz+aHahl0P+kvuoesIZtOTS+OGE=
d7y.io/api v1.5.1/go.mod h1:7G3t9YO5esDzQVUgdUrS+6yCDAMWS5c9ux8yX5L9Ync=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
@ -152,8 +152,9 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@ -1319,8 +1320,8 @@ golang.org/x/oauth2 v0.0.0-20210201163806-010130855d6c/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk=
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1609,8 +1610,8 @@ google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210202153253-cf70463f6119/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY=
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@ -1637,8 +1638,8 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ=
google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -22,6 +22,8 @@ type PreheatRequest struct {
Digest string `json:"digest" validate:"omitempty"`
Filter string `json:"filter" validate:"omitempty"`
Headers map[string]string `json:"headers" validate:"omitempty"`
Application string `json:"application" validate:"omitempty"`
Priority int32 `json:"priority" validate:"omitempty"`
}
type PreheatResponse struct {

View File

@ -27,8 +27,8 @@ import (
)
const (
// filterSeparator is filter separator for url.
filterSeparator = "&"
// URLFilterSeparator is filter separator for url.
URLFilterSeparator = "&"
)
// TaskIDV1 generates v1 version of task id.
@ -87,7 +87,7 @@ func parseFilters(rawFilters string) []string {
return nil
}
return strings.Split(rawFilters, filterSeparator)
return strings.Split(rawFilters, URLFilterSeparator)
}
// TaskIDV2 generates v2 version of task id.

View File

@ -46,6 +46,11 @@ func (r *Range) String() string {
return fmt.Sprintf("%s%d%s%d", RangePrefix, r.Start, RangeSeparator, r.Start+r.Length-1)
}
// String specifies the string of url meta.
func (r *Range) URLMetaString() string {
return fmt.Sprintf("%d%s%d", r.Start, RangeSeparator, r.Start+r.Length-1)
}
// ParseRange parses a Range header string as per RFC 7233.
// ErrNoOverlap is returned if none of the ranges overlap.
// Example:

View File

@ -81,6 +81,65 @@ func TestRange_String(t *testing.T) {
}
}
func TestRange_URLMetaString(t *testing.T) {
tests := []struct {
s string
rg Range
expect func(t *testing.T, s string)
}{
{
s: "0-9",
rg: Range{
Start: 0,
Length: 10,
},
expect: func(t *testing.T, s string) {
assert := assert.New(t)
assert.Equal(s, "0-9")
},
},
{
s: "1-10",
rg: Range{
Start: 1,
Length: 10,
},
expect: func(t *testing.T, s string) {
assert := assert.New(t)
assert.Equal(s, "1-10")
},
},
{
s: "1-0",
rg: Range{
Start: 1,
Length: 0,
},
expect: func(t *testing.T, s string) {
assert := assert.New(t)
assert.Equal(s, "1-0")
},
},
{
s: "1-1",
rg: Range{
Start: 1,
Length: 1,
},
expect: func(t *testing.T, s string) {
assert := assert.New(t)
assert.Equal(s, "1-1")
},
},
}
for _, tc := range tests {
t.Run(tc.s, func(t *testing.T) {
tc.expect(t, tc.rg.URLMetaString())
})
}
}
func TestParseRange(t *testing.T) {
tests := []struct {
s string

View File

@ -74,15 +74,12 @@ type V2 interface {
// DownloadTask downloads task back-to-source.
DownloadTask(context.Context, *dfdaemonv2.DownloadTaskRequest, ...grpc.CallOption) error
// UploadTask uploads task to p2p network.
UploadTask(context.Context, *dfdaemonv2.UploadTaskRequest, ...grpc.CallOption) error
// StatTask stats task information.
StatTask(context.Context, *dfdaemonv2.StatTaskRequest, ...grpc.CallOption) (*commonv2.Task, error)
// ImportTask imports task to p2p network.
ImportTask(context.Context, *dfdaemonv2.ImportTaskRequest, ...grpc.CallOption) error
// ExportTask exports task from p2p network.
ExportTask(context.Context, *dfdaemonv2.ExportTaskRequest, ...grpc.CallOption) error
// DeleteTask deletes task from p2p network.
DeleteTask(context.Context, *dfdaemonv2.DeleteTaskRequest, ...grpc.CallOption) error
@ -118,6 +115,20 @@ func (v *v2) DownloadTask(ctx context.Context, req *dfdaemonv2.DownloadTaskReque
return err
}
// UploadTask uploads task to p2p network.
func (v *v2) UploadTask(ctx context.Context, req *dfdaemonv2.UploadTaskRequest, opts ...grpc.CallOption) error {
ctx, cancel := context.WithTimeout(ctx, contextTimeout)
defer cancel()
_, err := v.DfdaemonClient.UploadTask(
ctx,
req,
opts...,
)
return err
}
// StatTask stats task information.
func (v *v2) StatTask(ctx context.Context, req *dfdaemonv2.StatTaskRequest, opts ...grpc.CallOption) (*commonv2.Task, error) {
ctx, cancel := context.WithTimeout(ctx, contextTimeout)
@ -130,34 +141,6 @@ func (v *v2) StatTask(ctx context.Context, req *dfdaemonv2.StatTaskRequest, opts
)
}
// ImportTask imports task to p2p network.
func (v *v2) ImportTask(ctx context.Context, req *dfdaemonv2.ImportTaskRequest, opts ...grpc.CallOption) error {
ctx, cancel := context.WithTimeout(ctx, contextTimeout)
defer cancel()
_, err := v.DfdaemonClient.ImportTask(
ctx,
req,
opts...,
)
return err
}
// ExportTask exports task from p2p network.
func (v *v2) ExportTask(ctx context.Context, req *dfdaemonv2.ExportTaskRequest, opts ...grpc.CallOption) error {
ctx, cancel := context.WithTimeout(ctx, contextTimeout)
defer cancel()
_, err := v.DfdaemonClient.ExportTask(
ctx,
req,
opts...,
)
return err
}
// DeleteTask deletes task from p2p network.
func (v *v2) DeleteTask(ctx context.Context, req *dfdaemonv2.DeleteTaskRequest, opts ...grpc.CallOption) error {
ctx, cancel := context.WithTimeout(ctx, contextTimeout)

View File

@ -89,44 +89,6 @@ func (mr *MockV2MockRecorder) DownloadTask(arg0, arg1 interface{}, arg2 ...inter
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DownloadTask", reflect.TypeOf((*MockV2)(nil).DownloadTask), varargs...)
}
// ExportTask mocks base method.
func (m *MockV2) ExportTask(arg0 context.Context, arg1 *dfdaemon.ExportTaskRequest, arg2 ...grpc.CallOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ExportTask", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// ExportTask indicates an expected call of ExportTask.
func (mr *MockV2MockRecorder) ExportTask(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportTask", reflect.TypeOf((*MockV2)(nil).ExportTask), varargs...)
}
// ImportTask mocks base method.
func (m *MockV2) ImportTask(arg0 context.Context, arg1 *dfdaemon.ImportTaskRequest, arg2 ...grpc.CallOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "ImportTask", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// ImportTask indicates an expected call of ImportTask.
func (mr *MockV2MockRecorder) ImportTask(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportTask", reflect.TypeOf((*MockV2)(nil).ImportTask), varargs...)
}
// StatTask mocks base method.
func (m *MockV2) StatTask(arg0 context.Context, arg1 *dfdaemon.StatTaskRequest, arg2 ...grpc.CallOption) (*common.Task, error) {
m.ctrl.T.Helper()
@ -166,3 +128,22 @@ func (mr *MockV2MockRecorder) SyncPieces(arg0 interface{}, arg1 ...interface{})
varargs := append([]interface{}{arg0}, arg1...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPieces", reflect.TypeOf((*MockV2)(nil).SyncPieces), varargs...)
}
// UploadTask mocks base method.
func (m *MockV2) UploadTask(arg0 context.Context, arg1 *dfdaemon.UploadTaskRequest, arg2 ...grpc.CallOption) error {
m.ctrl.T.Helper()
varargs := []interface{}{arg0, arg1}
for _, a := range arg2 {
varargs = append(varargs, a)
}
ret := m.ctrl.Call(m, "UploadTask", varargs...)
ret0, _ := ret[0].(error)
return ret0
}
// UploadTask indicates an expected call of UploadTask.
func (mr *MockV2MockRecorder) UploadTask(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
varargs := append([]interface{}{arg0, arg1}, arg2...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadTask", reflect.TypeOf((*MockV2)(nil).UploadTask), varargs...)
}

View File

@ -163,3 +163,47 @@ func TaskTypeV2ToV1(typ commonv2.TaskType) commonv1.TaskType {
return commonv1.TaskType_Normal
}
// PriorityV1ToV2 converts priority from v1 to v2.
func PriorityV1ToV2(priority commonv1.Priority) commonv2.Priority {
switch priority {
case commonv1.Priority_LEVEL0:
return commonv2.Priority_LEVEL0
case commonv1.Priority_LEVEL1:
return commonv2.Priority_LEVEL1
case commonv1.Priority_LEVEL2:
return commonv2.Priority_LEVEL2
case commonv1.Priority_LEVEL3:
return commonv2.Priority_LEVEL3
case commonv1.Priority_LEVEL4:
return commonv2.Priority_LEVEL4
case commonv1.Priority_LEVEL5:
return commonv2.Priority_LEVEL5
case commonv1.Priority_LEVEL6:
return commonv2.Priority_LEVEL6
}
return commonv2.Priority_LEVEL0
}
// PriorityV2ToV1 converts priority from v2 to v1.
func PriorityV2ToV1(priority commonv2.Priority) commonv1.Priority {
switch priority {
case commonv2.Priority_LEVEL0:
return commonv1.Priority_LEVEL0
case commonv2.Priority_LEVEL1:
return commonv1.Priority_LEVEL1
case commonv2.Priority_LEVEL2:
return commonv1.Priority_LEVEL2
case commonv2.Priority_LEVEL3:
return commonv1.Priority_LEVEL3
case commonv2.Priority_LEVEL4:
return commonv1.Priority_LEVEL4
case commonv2.Priority_LEVEL5:
return commonv1.Priority_LEVEL5
case commonv2.Priority_LEVEL6:
return commonv1.Priority_LEVEL6
}
return commonv1.Priority_LEVEL0
}

View File

@ -258,8 +258,8 @@ type MetricsConfig struct {
// Metrics service address.
Addr string `yaml:"addr" mapstructure:"addr"`
// Enable peer host metrics.
EnablePeerHost bool `yaml:"enablePeerHost" mapstructure:"enablePeerHost"`
// Enable host metrics.
EnableHost bool `yaml:"enableHost" mapstructure:"enableHost"`
}
type SecurityConfig struct {
@ -357,7 +357,7 @@ func New() *Config {
Metrics: MetricsConfig{
Enable: false,
Addr: DefaultMetricsAddr,
EnablePeerHost: false,
EnableHost: false,
},
Security: SecurityConfig{
AutoIssueCert: false,

View File

@ -101,7 +101,7 @@ func TestConfig_Load(t *testing.T) {
Metrics: MetricsConfig{
Enable: false,
Addr: ":8000",
EnablePeerHost: false,
EnableHost: true,
},
Security: SecurityConfig{
AutoIssueCert: true,

View File

@ -65,7 +65,7 @@ storage:
metrics:
enable: false
addr: ":8000"
enablePeerHost: false
enableHost: true
security:
autoIssueCert: true

View File

@ -34,6 +34,7 @@ import (
logger "d7y.io/dragonfly/v2/internal/dflog"
internaljob "d7y.io/dragonfly/v2/internal/job"
"d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/net/http"
"d7y.io/dragonfly/v2/scheduler/config"
"d7y.io/dragonfly/v2/scheduler/resource"
)
@ -161,15 +162,17 @@ func (j *job) preheat(ctx context.Context, req string) error {
}
urlMeta := &commonv1.UrlMeta{
Header: preheat.Headers,
Digest: preheat.Digest,
Tag: preheat.Tag,
Filter: preheat.Filter,
Digest: preheat.Digest,
Header: preheat.Headers,
Application: preheat.Application,
Priority: commonv1.Priority(preheat.Priority),
}
if preheat.Headers != nil {
if r, ok := preheat.Headers[headers.Range]; ok {
// Range in dragonfly is without "bytes=".
urlMeta.Range = strings.TrimLeft(r, "bytes=")
urlMeta.Range = strings.TrimLeft(r, http.RangePrefix)
}
}

View File

@ -31,17 +31,11 @@ import (
)
var (
// TrafficP2PType is p2p type for traffic metrics.
TrafficP2PType = "p2p"
// HostTrafficUploadType is upload traffic type for host traffic metrics.
HostTrafficUploadType = "upload"
// TrafficBackToSourceType is back-to-source type for traffic metrics.
TrafficBackToSourceType = "back_to_source"
// PeerHostTrafficUploadType is upload traffic type for peer host traffic metrics.
PeerHostTrafficUploadType = "upload"
// PeerHostTrafficDownloadType is download traffic type for peer host traffic metrics.
PeerHostTrafficDownloadType = "download"
// HostTrafficDownloadType is download traffic type for host traffic metrics.
HostTrafficDownloadType = "download"
// DownloadFailureBackToSourceType is back-to-source type for download failure count metrics.
DownloadFailureBackToSourceType = "back_to_source"
@ -52,33 +46,33 @@ var (
// Variables declared for metrics.
var (
RegisterPeerTaskCount = promauto.NewCounterVec(prometheus.CounterOpts{
RegisterTaskCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: types.MetricsNamespace,
Subsystem: types.SchedulerMetricsName,
Name: "register_peer_task_total",
Help: "Counter of the number of the register peer task.",
Name: "register_task_total",
Help: "Counter of the number of the register task.",
}, []string{"tag", "app"})
RegisterPeerTaskFailureCount = promauto.NewCounterVec(prometheus.CounterOpts{
RegisterTaskFailureCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: types.MetricsNamespace,
Subsystem: types.SchedulerMetricsName,
Name: "register_peer_task_failure_total",
Help: "Counter of the number of failed of the register peer task.",
Name: "register_task_failure_total",
Help: "Counter of the number of failed of the register task.",
}, []string{"tag", "app"})
DownloadCount = promauto.NewCounterVec(prometheus.CounterOpts{
DownloadTaskCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: types.MetricsNamespace,
Subsystem: types.SchedulerMetricsName,
Name: "download_total",
Help: "Counter of the number of the downloading.",
}, []string{"tag", "app"})
Name: "download_task_total",
Help: "Counter of the number of the task downloading.",
}, []string{"tag", "app", "host_type"})
DownloadFailureCount = promauto.NewCounterVec(prometheus.CounterOpts{
DownloadTaskFailureCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: types.MetricsNamespace,
Subsystem: types.SchedulerMetricsName,
Name: "download_failure_total",
Help: "Counter of the number of failed of the downloading.",
}, []string{"tag", "app", "type", "code"})
Name: "download_task_failure_total",
Help: "Counter of the number of failed of the task downloading.",
}, []string{"tag", "app", "type", "code", "host_type"})
StatTaskCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: types.MetricsNamespace,
@ -128,14 +122,14 @@ var (
Subsystem: types.SchedulerMetricsName,
Name: "leave_task_total",
Help: "Counter of the number of the leaving task.",
}, []string{"tag", "app"})
}, []string{"tag", "app", "host_type"})
LeaveTaskFailureCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: types.MetricsNamespace,
Subsystem: types.SchedulerMetricsName,
Name: "leave_task_failure_total",
Help: "Counter of the number of failed of the leaving task.",
}, []string{"tag", "app"})
}, []string{"tag", "app", "host_type"})
LeaveHostCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: types.MetricsNamespace,
@ -156,36 +150,22 @@ var (
Subsystem: types.SchedulerMetricsName,
Name: "traffic",
Help: "Counter of the number of traffic.",
}, []string{"tag", "app", "type"})
}, []string{"task_tag", "task_app", "type"})
PeerHostTraffic = promauto.NewCounterVec(prometheus.CounterOpts{
HostTraffic = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: types.MetricsNamespace,
Subsystem: types.SchedulerMetricsName,
Name: "peer_host_traffic",
Help: "Counter of the number of per peer host traffic.",
}, []string{"tag", "app", "traffic_type", "peer_host_id", "peer_host_ip"})
Name: "host_traffic",
Help: "Counter of the number of per host traffic.",
}, []string{"task_tag", "task_app", "type", "host_id", "host_ip"})
PeerTaskCounter = promauto.NewCounterVec(prometheus.CounterOpts{
DownloadTaskDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: types.MetricsNamespace,
Subsystem: types.SchedulerMetricsName,
Name: "peer_task_total",
Help: "Counter of the number of peer task.",
}, []string{"tag", "app", "type"})
PeerTaskSourceErrorCounter = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: types.MetricsNamespace,
Subsystem: types.SchedulerMetricsName,
Name: "peer_task_source_error_total",
Help: "Counter of the source error code number of peer task.",
}, []string{"tag", "app", "protocol", "code"})
PeerTaskDownloadDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: types.MetricsNamespace,
Subsystem: types.SchedulerMetricsName,
Name: "peer_task_download_duration_milliseconds",
Help: "Histogram of the time each peer task downloading.",
Name: "download_task_duration_milliseconds",
Help: "Histogram of the time each task downloading.",
Buckets: []float64{100, 200, 500, 1000, 1500, 2 * 1000, 3 * 1000, 5 * 1000, 10 * 1000, 20 * 1000, 60 * 1000, 120 * 1000, 300 * 1000},
}, []string{"tag", "app"})
}, []string{"tag", "app", "host_type"})
ConcurrentScheduleGauge = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: types.MetricsNamespace,

View File

@ -29,93 +29,82 @@ import (
)
// HostOption is a functional option for configuring the host.
type HostOption func(h *Host) *Host
type HostOption func(h *Host)
// WithConcurrentUploadLimit sets host's ConcurrentUploadLimit.
func WithConcurrentUploadLimit(limit int32) HostOption {
return func(h *Host) *Host {
return func(h *Host) {
h.ConcurrentUploadLimit.Store(limit)
return h
}
}
// WithOS sets host's os.
func WithOS(os string) HostOption {
return func(h *Host) *Host {
return func(h *Host) {
h.OS = os
return h
}
}
// WithPlatform sets host's platform.
func WithPlatform(platform string) HostOption {
return func(h *Host) *Host {
return func(h *Host) {
h.Platform = platform
return h
}
}
// WithPlatformFamily sets host's platform family.
func WithPlatformFamily(platformFamily string) HostOption {
return func(h *Host) *Host {
return func(h *Host) {
h.PlatformFamily = platformFamily
return h
}
}
// WithPlatformVersion sets host's platform version.
func WithPlatformVersion(platformVersion string) HostOption {
return func(h *Host) *Host {
return func(h *Host) {
h.PlatformVersion = platformVersion
return h
}
}
// WithKernelVersion sets host's kernel version.
func WithKernelVersion(kernelVersion string) HostOption {
return func(h *Host) *Host {
return func(h *Host) {
h.KernelVersion = kernelVersion
return h
}
}
// WithCPU sets host's cpu.
func WithCPU(cpu CPU) HostOption {
return func(h *Host) *Host {
return func(h *Host) {
h.CPU = cpu
return h
}
}
// WithMemory sets host's memory.
func WithMemory(memory Memory) HostOption {
return func(h *Host) *Host {
return func(h *Host) {
h.Memory = memory
return h
}
}
// WithNetwork sets host's network.
func WithNetwork(network Network) HostOption {
return func(h *Host) *Host {
return func(h *Host) {
h.Network = network
return h
}
}
// WithDisk sets host's disk.
func WithDisk(disk Disk) HostOption {
return func(h *Host) *Host {
return func(h *Host) {
h.Disk = disk
return h
}
}
// WithBuild sets host's build information.
func WithBuild(build Build) HostOption {
return func(h *Host) *Host {
return func(h *Host) {
h.Build = build
return h
}
}

View File

@ -395,7 +395,7 @@ func TestHostManager_RunGC(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, mockTask, mockHost)
hostManager, err := newHostManager(mockHostGCConfig, gc)
if err != nil {

View File

@ -30,7 +30,7 @@ import (
var (
mockRawHost = Host{
ID: idgen.HostIDV1("hostname", 8003),
ID: mockHostID,
Type: types.HostTypeNormal,
Hostname: "hostname",
IP: "127.0.0.1",
@ -49,7 +49,7 @@ var (
}
mockRawSeedHost = Host{
ID: idgen.HostIDV1("hostname_seed", 8003),
ID: mockSeedHostID,
Type: types.HostTypeSuperSeed,
Hostname: "hostname_seed",
IP: "127.0.0.1",
@ -98,9 +98,9 @@ var (
mockNetwork = Network{
TCPConnectionCount: 10,
UploadTCPConnectionCount: 1,
SecurityDomain: "security_domain",
Location: "location",
IDC: "idc",
SecurityDomain: mockHostSecurityDomain,
Location: mockHostLocation,
IDC: mockHostIDC,
}
mockDisk = Disk{
@ -120,6 +120,12 @@ var (
GoVersion: "1.18",
Platform: "darwin",
}
mockHostID = idgen.HostIDV2("127.0.0.1", "hostname", 8003)
mockSeedHostID = idgen.HostIDV2("127.0.0.1", "hostname_seed", 8003)
mockHostSecurityDomain = "security_domain"
mockHostLocation = "location"
mockHostIDC = "idc"
)
func TestHost_NewHost(t *testing.T) {
@ -490,7 +496,7 @@ func TestHost_LoadPeer(t *testing.T) {
host := NewHost(
tc.rawHost.ID, tc.rawHost.IP, tc.rawHost.Hostname,
tc.rawHost.Port, tc.rawHost.DownloadPort, tc.rawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, mockTask, host)
host.StorePeer(mockPeer)
@ -535,7 +541,7 @@ func TestHost_StorePeer(t *testing.T) {
host := NewHost(
tc.rawHost.ID, tc.rawHost.IP, tc.rawHost.Hostname,
tc.rawHost.Port, tc.rawHost.DownloadPort, tc.rawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(tc.peerID, mockTask, host)
host.StorePeer(mockPeer)
@ -581,7 +587,7 @@ func TestHost_DeletePeer(t *testing.T) {
host := NewHost(
tc.rawHost.ID, tc.rawHost.IP, tc.rawHost.Hostname,
tc.rawHost.Port, tc.rawHost.DownloadPort, tc.rawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, mockTask, host)
host.StorePeer(mockPeer)
@ -633,7 +639,7 @@ func TestHost_LeavePeers(t *testing.T) {
host := NewHost(
tc.rawHost.ID, tc.rawHost.IP, tc.rawHost.Hostname,
tc.rawHost.Port, tc.rawHost.DownloadPort, tc.rawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, mockTask, host)
tc.expect(t, host, mockPeer)
@ -685,7 +691,7 @@ func TestHost_FreeUploadCount(t *testing.T) {
host := NewHost(
tc.rawHost.ID, tc.rawHost.IP, tc.rawHost.Hostname,
tc.rawHost.Port, tc.rawHost.DownloadPort, tc.rawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, mockTask, host)
tc.expect(t, host, mockTask, mockPeer)

View File

@ -37,16 +37,11 @@ import (
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/container/set"
nethttp "d7y.io/dragonfly/v2/pkg/net/http"
"d7y.io/dragonfly/v2/scheduler/config"
)
const (
// Default value of tag.
DefaultTag = "unknow"
//DefaultApplication default value of application
DefaultApplication = "unknown"
// Download tiny file timeout.
downloadTinyFileContextTimeout = 30 * time.Second
)
@ -112,22 +107,20 @@ const (
PeerEventLeave = "Leave"
)
// PeerOption is a functional option for configuring the peer.
type PeerOption func(p *Peer) *Peer
// PeerOption is a functional option for peer.
type PeerOption func(peer *Peer)
// WithTag sets peer's Tag.
func WithTag(tag string) PeerOption {
return func(p *Peer) *Peer {
p.Tag = tag
return p
// WithPieceSize set Priority for peer.
func WithPriority(priority commonv2.Priority) PeerOption {
return func(p *Peer) {
p.Priority = priority
}
}
// WithApplication sets peer's Application.
func WithApplication(application string) PeerOption {
return func(p *Peer) *Peer {
p.Application = application
return p
// WithRange set Range for peer.
func WithRange(rg nethttp.Range) PeerOption {
return func(p *Peer) {
p.Range = &rg
}
}
@ -136,11 +129,11 @@ type Peer struct {
// ID is peer id.
ID string
// Tag is peer tag.
Tag string
// Range is url range of request.
Range *nethttp.Range
// Application is peer application.
Application string
// Priority is peer priority.
Priority commonv2.Priority
// Pieces is finished piece set.
Pieces set.SafeSet[*Piece]
@ -205,8 +198,7 @@ type Peer struct {
func NewPeer(id string, task *Task, host *Host, options ...PeerOption) *Peer {
p := &Peer{
ID: id,
Tag: DefaultTag,
Application: DefaultApplication,
Priority: commonv2.Priority_LEVEL0,
Pieces: set.NewSafeSet[*Piece](),
FinishedPieces: &bitset.BitSet{},
pieceCosts: []int64{},
@ -421,7 +413,8 @@ func (p *Peer) Children() []*Peer {
return children
}
// DownloadTinyFile downloads tiny file from peer.
// DownloadTinyFile downloads tiny file from peer without range.
// Used only in v1 version of the grpc.
func (p *Peer) DownloadTinyFile() ([]byte, error) {
ctx, cancel := context.WithTimeout(context.Background(), downloadTinyFileContextTimeout)
defer cancel()
@ -458,8 +451,55 @@ func (p *Peer) DownloadTinyFile() ([]byte, error) {
return io.ReadAll(resp.Body)
}
// DownloadFile downloads file from peer with range.
// Used only in v2 version of the grpc.
func (p *Peer) DownloadFile() ([]byte, error) {
ctx, cancel := context.WithTimeout(context.Background(), downloadTinyFileContextTimeout)
defer cancel()
// Download url: http://${host}:${port}/download/${taskIndex}/${taskID}?peerId=${peerID}
targetURL := url.URL{
Scheme: "http",
Host: fmt.Sprintf("%s:%d", p.Host.IP, p.Host.DownloadPort),
Path: fmt.Sprintf("download/%s/%s", p.Task.ID[:3], p.Task.ID),
RawQuery: fmt.Sprintf("peerId=%s", p.ID),
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL.String(), nil)
if err != nil {
return []byte{}, err
}
rg := fmt.Sprintf("bytes=%d-%d", 0, p.Task.ContentLength.Load()-1)
if p.Range != nil {
rg = p.Range.String()
}
req.Header.Set(headers.Range, rg)
p.Log.Infof("download tiny file %s, header is : %#v", targetURL.String(), req.Header)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// The HTTP 206 Partial Content success status response code indicates that
// the request has succeeded and the body contains the requested ranges of data, as described in the Range header of the request.
// Refer to https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/206.
if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("bad response status %s", resp.Status)
}
return io.ReadAll(resp.Body)
}
// GetPriority returns priority of peer.
func (p *Peer) GetPriority(dynconfig config.DynconfigInterface) commonv2.Priority {
if p.Priority != commonv2.Priority_LEVEL0 {
return p.Priority
}
pbApplications, err := dynconfig.GetApplications()
if err != nil {
p.Log.Info(err)
@ -469,7 +509,7 @@ func (p *Peer) GetPriority(dynconfig config.DynconfigInterface) commonv2.Priorit
// Find peer application.
var application *managerv2.Application
for _, pbApplication := range pbApplications {
if p.Application == pbApplication.Name {
if p.Task.Application == pbApplication.Name {
application = pbApplication
break
}

View File

@ -136,7 +136,7 @@ func TestPeerManager_Load(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, mockTask, mockHost)
peerManager, err := newPeerManager(mockPeerGCConfig, gc)
if err != nil {
@ -193,7 +193,7 @@ func TestPeerManager_Store(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, mockTask, mockHost)
peerManager, err := newPeerManager(mockPeerGCConfig, gc)
if err != nil {
@ -248,7 +248,7 @@ func TestPeerManager_LoadOrStore(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, mockTask, mockHost)
peerManager, err := newPeerManager(mockPeerGCConfig, gc)
if err != nil {
@ -305,7 +305,7 @@ func TestPeerManager_Delete(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, mockTask, mockHost)
peerManager, err := newPeerManager(mockPeerGCConfig, gc)
if err != nil {
@ -513,7 +513,7 @@ func TestPeerManager_RunGC(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, mockTask, mockHost)
peerManager, err := newPeerManager(tc.gcConfig, gc)
if err != nil {

View File

@ -57,9 +57,12 @@ func TestPeer_NewPeer(t *testing.T) {
{
name: "new peer",
id: mockPeerID,
options: []PeerOption{},
expect: func(t *testing.T, peer *Peer, mockTask *Task, mockHost *Host) {
assert := assert.New(t)
assert.Equal(peer.ID, mockPeerID)
assert.Nil(peer.Range)
assert.Equal(peer.Priority, commonv2.Priority_LEVEL0)
assert.Equal(peer.Pieces.Len(), uint(0))
assert.Empty(peer.FinishedPieces)
assert.Equal(len(peer.PieceCosts()), 0)
@ -68,27 +71,65 @@ func TestPeer_NewPeer(t *testing.T) {
assert.Equal(peer.FSM.Current(), PeerStatePending)
assert.EqualValues(peer.Task, mockTask)
assert.EqualValues(peer.Host, mockHost)
assert.Equal(peer.BlockParents.Len(), uint(0))
assert.Equal(peer.NeedBackToSource.Load(), false)
assert.Equal(peer.IsBackToSource.Load(), false)
assert.NotEqual(peer.PieceUpdatedAt.Load(), 0)
assert.NotEqual(peer.CreatedAt.Load(), 0)
assert.NotEqual(peer.UpdatedAt.Load(), 0)
assert.NotNil(peer.Log)
},
},
{
name: "new peer with tag and application",
name: "new peer with priority",
id: mockPeerID,
options: []PeerOption{WithTag("foo"), WithApplication("bar")},
options: []PeerOption{WithPriority(commonv2.Priority_LEVEL4)},
expect: func(t *testing.T, peer *Peer, mockTask *Task, mockHost *Host) {
assert := assert.New(t)
assert.Equal(peer.ID, mockPeerID)
assert.Equal(peer.Tag, "foo")
assert.Equal(peer.Application, "bar")
assert.Nil(peer.Range)
assert.Equal(peer.Priority, commonv2.Priority_LEVEL4)
assert.Equal(peer.Pieces.Len(), uint(0))
assert.Empty(peer.FinishedPieces)
assert.Equal(len(peer.PieceCosts()), 0)
assert.Empty(peer.ReportPieceResultStream)
assert.Empty(peer.AnnouncePeerStream)
assert.Equal(peer.FSM.Current(), PeerStatePending)
assert.EqualValues(peer.Task, mockTask)
assert.EqualValues(peer.Host, mockHost)
assert.Equal(peer.BlockParents.Len(), uint(0))
assert.Equal(peer.NeedBackToSource.Load(), false)
assert.Equal(peer.IsBackToSource.Load(), false)
assert.NotEqual(peer.PieceUpdatedAt.Load(), 0)
assert.NotEqual(peer.CreatedAt.Load(), 0)
assert.NotEqual(peer.UpdatedAt.Load(), 0)
assert.NotNil(peer.Log)
},
},
{
name: "new peer with range",
id: mockPeerID,
options: []PeerOption{WithRange(nethttp.Range{
Start: 1,
Length: 10,
})},
expect: func(t *testing.T, peer *Peer, mockTask *Task, mockHost *Host) {
assert := assert.New(t)
assert.Equal(peer.ID, mockPeerID)
assert.EqualValues(peer.Range, &nethttp.Range{Start: 1, Length: 10})
assert.Equal(peer.Priority, commonv2.Priority_LEVEL0)
assert.Equal(peer.Pieces.Len(), uint(0))
assert.Empty(peer.FinishedPieces)
assert.Equal(len(peer.PieceCosts()), 0)
assert.Empty(peer.ReportPieceResultStream)
assert.Empty(peer.AnnouncePeerStream)
assert.Equal(peer.FSM.Current(), PeerStatePending)
assert.EqualValues(peer.Task, mockTask)
assert.EqualValues(peer.Host, mockHost)
assert.Equal(peer.BlockParents.Len(), uint(0))
assert.Equal(peer.NeedBackToSource.Load(), false)
assert.Equal(peer.IsBackToSource.Load(), false)
assert.NotEqual(peer.PieceUpdatedAt.Load(), 0)
assert.NotEqual(peer.CreatedAt.Load(), 0)
assert.NotEqual(peer.UpdatedAt.Load(), 0)
assert.NotNil(peer.Log)
@ -101,7 +142,7 @@ func TestPeer_NewPeer(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
tc.expect(t, NewPeer(tc.id, mockTask, mockHost, tc.options...), mockTask, mockHost)
})
}
@ -136,7 +177,7 @@ func TestPeer_AppendPieceCost(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
tc.expect(t, peer)
@ -173,7 +214,7 @@ func TestPeer_PieceCosts(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
tc.expect(t, peer)
@ -215,7 +256,7 @@ func TestPeer_LoadReportPieceResultStream(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
tc.expect(t, peer, stream)
})
@ -248,7 +289,7 @@ func TestPeer_StoreReportPieceResultStream(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
tc.expect(t, peer, stream)
})
@ -281,7 +322,7 @@ func TestPeer_DeleteReportPieceResultStream(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
tc.expect(t, peer, stream)
})
@ -322,7 +363,7 @@ func TestPeer_LoadAnnouncePeerStream(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
tc.expect(t, peer, stream)
})
@ -355,7 +396,7 @@ func TestPeer_StoreAnnouncePeerStream(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
tc.expect(t, peer, stream)
})
@ -388,7 +429,7 @@ func TestPeer_DeleteAnnouncePeerStream(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
tc.expect(t, peer, stream)
})
@ -433,7 +474,7 @@ func TestPeer_Parents(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
seedPeer := NewPeer(mockSeedPeerID, mockTask, mockHost)
tc.expect(t, peer, seedPeer, stream)
@ -479,7 +520,7 @@ func TestPeer_Children(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
seedPeer := NewPeer(mockSeedPeerID, mockTask, mockHost)
tc.expect(t, peer, seedPeer, stream)
@ -490,9 +531,8 @@ func TestPeer_Children(t *testing.T) {
func TestPeer_DownloadTinyFile(t *testing.T) {
testData := []byte("./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" +
"./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")
newServer := func(t *testing.T, getPeer func() *Peer) *httptest.Server {
mockServer := func(t *testing.T, peer *Peer) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
peer := getPeer()
assert := assert.New(t)
assert.NotNil(peer)
assert.Equal(r.URL.Path, fmt.Sprintf("/download/%s/%s", peer.Task.ID[:3], peer.Task.ID))
@ -509,13 +549,15 @@ func TestPeer_DownloadTinyFile(t *testing.T) {
assert.Equal(int64(n), rg.Length)
}))
}
tests := []struct {
name string
newServer func(t *testing.T, getPeer func() *Peer) *httptest.Server
mockServer func(t *testing.T, peer *Peer) *httptest.Server
expect func(t *testing.T, peer *Peer)
}{
{
name: "download tiny file - 32",
name: "download tiny file",
mockServer: mockServer,
expect: func(t *testing.T, peer *Peer) {
assert := assert.New(t)
peer.Task.ContentLength.Store(32)
@ -525,10 +567,15 @@ func TestPeer_DownloadTinyFile(t *testing.T) {
},
},
{
name: "download tiny file - 128",
name: "download tiny file with range",
mockServer: mockServer,
expect: func(t *testing.T, peer *Peer) {
assert := assert.New(t)
peer.Task.ContentLength.Store(32)
peer.Range = &nethttp.Range{
Start: 0,
Length: 10,
}
data, err := peer.DownloadTinyFile()
assert.NoError(err)
assert.Equal(testData[:32], data)
@ -536,7 +583,7 @@ func TestPeer_DownloadTinyFile(t *testing.T) {
},
{
name: "download tiny file failed because of http status code",
newServer: func(t *testing.T, getPeer func() *Peer) *httptest.Server {
mockServer: func(t *testing.T, peer *Peer) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
@ -552,14 +599,19 @@ func TestPeer_DownloadTinyFile(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
var peer *Peer
if tc.newServer == nil {
tc.newServer = newServer
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
if tc.mockServer == nil {
tc.mockServer = mockServer
}
s := tc.newServer(t, func() *Peer {
return peer
})
s := tc.mockServer(t, peer)
defer s.Close()
url, err := url.Parse(s.URL)
if err != nil {
t.Fatal(err)
@ -575,13 +627,114 @@ func TestPeer_DownloadTinyFile(t *testing.T) {
t.Fatal(err)
}
mockRawHost.IP = ip
mockRawHost.DownloadPort = int32(port)
mockHost.IP = ip
mockHost.DownloadPort = int32(port)
tc.expect(t, peer)
})
}
}
func TestPeer_DownloadFile(t *testing.T) {
testData := []byte("./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" +
"./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")
mockServer := func(t *testing.T, peer *Peer) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert := assert.New(t)
assert.NotNil(peer)
assert.Equal(r.URL.Path, fmt.Sprintf("/download/%s/%s", peer.Task.ID[:3], peer.Task.ID))
assert.Equal(r.URL.RawQuery, fmt.Sprintf("peerId=%s", peer.ID))
rgs, err := nethttp.ParseRange(r.Header.Get(headers.Range), 128)
assert.Nil(err)
assert.Equal(1, len(rgs))
rg := rgs[0]
w.WriteHeader(http.StatusPartialContent)
n, err := w.Write(testData[rg.Start : rg.Start+rg.Length])
assert.Nil(err)
assert.Equal(int64(n), rg.Length)
}))
}
tests := []struct {
name string
mockServer func(t *testing.T, peer *Peer) *httptest.Server
expect func(t *testing.T, peer *Peer)
}{
{
name: "download tiny file",
mockServer: mockServer,
expect: func(t *testing.T, peer *Peer) {
assert := assert.New(t)
peer.Task.ContentLength.Store(32)
data, err := peer.DownloadFile()
assert.NoError(err)
assert.Equal(testData[:32], data)
},
},
{
name: "download tiny file with range",
mockServer: mockServer,
expect: func(t *testing.T, peer *Peer) {
assert := assert.New(t)
peer.Task.ContentLength.Store(10)
peer.Range = &nethttp.Range{
Start: 0,
Length: 10,
}
data, err := peer.DownloadFile()
assert.NoError(err)
assert.Equal(testData[:10], data)
},
},
{
name: "download tiny file failed because of http status code",
mockServer: func(t *testing.T, peer *Peer) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
},
expect: func(t *testing.T, peer *Peer) {
assert := assert.New(t)
peer.Task.ID = "foobar"
_, err := peer.DownloadFile()
assert.EqualError(err, "bad response status 404 Not Found")
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
peer = NewPeer(mockPeerID, mockTask, mockHost)
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
if tc.mockServer == nil {
tc.mockServer = mockServer
}
s := tc.mockServer(t, peer)
defer s.Close()
url, err := url.Parse(s.URL)
if err != nil {
t.Fatal(err)
}
ip, rawPort, err := net.SplitHostPort(url.Host)
if err != nil {
t.Fatal(err)
}
port, err := strconv.ParseInt(rawPort, 10, 32)
if err != nil {
t.Fatal(err)
}
mockHost.IP = ip
mockHost.DownloadPort = int32(port)
tc.expect(t, peer)
})
}
@ -593,6 +746,17 @@ func TestPeer_GetPriority(t *testing.T) {
mock func(peer *Peer, md *configmocks.MockDynconfigInterfaceMockRecorder)
expect func(t *testing.T, priority commonv2.Priority)
}{
{
name: "peer has priority",
mock: func(peer *Peer, md *configmocks.MockDynconfigInterfaceMockRecorder) {
priority := commonv2.Priority_LEVEL4
peer.Priority = priority
},
expect: func(t *testing.T, priority commonv2.Priority) {
assert := assert.New(t)
assert.Equal(priority, commonv2.Priority_LEVEL4)
},
},
{
name: "get applications failed",
mock: func(peer *Peer, md *configmocks.MockDynconfigInterfaceMockRecorder) {
@ -630,7 +794,7 @@ func TestPeer_GetPriority(t *testing.T) {
{
name: "can not found priority",
mock: func(peer *Peer, md *configmocks.MockDynconfigInterfaceMockRecorder) {
peer.Application = "bae"
peer.Task.Application = "bae"
md.GetApplications().Return([]*managerv2.Application{
{
Name: "bae",
@ -645,7 +809,7 @@ func TestPeer_GetPriority(t *testing.T) {
{
name: "match the priority of application",
mock: func(peer *Peer, md *configmocks.MockDynconfigInterfaceMockRecorder) {
peer.Application = "baz"
peer.Task.Application = "baz"
md.GetApplications().Return([]*managerv2.Application{
{
Name: "baz",
@ -663,7 +827,7 @@ func TestPeer_GetPriority(t *testing.T) {
{
name: "match the priority of url",
mock: func(peer *Peer, md *configmocks.MockDynconfigInterfaceMockRecorder) {
peer.Application = "bak"
peer.Task.Application = "bak"
peer.Task.URL = "example.com"
md.GetApplications().Return([]*managerv2.Application{
{
@ -696,7 +860,7 @@ func TestPeer_GetPriority(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer := NewPeer(mockPeerID, mockTask, mockHost)
tc.mock(peer, dynconfig.EXPECT())
tc.expect(t, peer.GetPriority(dynconfig))

View File

@ -21,26 +21,22 @@ package resource
import (
"context"
"fmt"
"strings"
"time"
cdnsystemv1 "d7y.io/api/pkg/apis/cdnsystem/v1"
commonv1 "d7y.io/api/pkg/apis/common/v1"
commonv2 "d7y.io/api/pkg/apis/common/v2"
schedulerv1 "d7y.io/api/pkg/apis/scheduler/v1"
"d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/net/http"
"d7y.io/dragonfly/v2/pkg/rpc/common"
pkgtime "d7y.io/dragonfly/v2/pkg/time"
"d7y.io/dragonfly/v2/scheduler/metrics"
)
const (
// SeedTag Default value of tag label for seed peer.
SeedTag = "d7y/seed"
// SeedApplication Default value of application label for seed peer.
SeedApplication = "d7y/seed"
)
const (
// Default value of seed peer failed timeout.
SeedPeerFailedTimeout = 30 * time.Minute
@ -54,7 +50,7 @@ type SeedPeer interface {
// TriggerTask triggers the seed peer to download task.
// Used only in v1 version of the grpc.
TriggerTask(context.Context, *Task) (*Peer, *schedulerv1.PeerResult, error)
TriggerTask(context.Context, *http.Range, *Task) (*Peer, *schedulerv1.PeerResult, error)
// Client returns grpc client of seed peer.
Client() SeedPeerClient
@ -91,14 +87,27 @@ func (s *seedPeer) DownloadTask(ctx context.Context, task *Task) error {
// TriggerTask triggers the seed peer to download task.
// Used only in v1 version of the grpc.
func (s *seedPeer) TriggerTask(ctx context.Context, task *Task) (*Peer, *schedulerv1.PeerResult, error) {
func (s *seedPeer) TriggerTask(ctx context.Context, rg *http.Range, task *Task) (*Peer, *schedulerv1.PeerResult, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
urlMeta := &commonv1.UrlMeta{
Digest: task.Digest,
Tag: task.Tag,
Filter: strings.Join(task.Filters, idgen.URLFilterSeparator),
Header: task.Header,
Application: task.Application,
Priority: commonv1.Priority_LEVEL0,
}
if rg != nil {
urlMeta.Range = rg.URLMetaString()
}
stream, err := s.client.ObtainSeeds(ctx, &cdnsystemv1.SeedRequest{
TaskId: task.ID,
Url: task.URL,
UrlMeta: task.URLMeta,
UrlMeta: urlMeta,
})
if err != nil {
return nil, nil, err
@ -127,7 +136,7 @@ func (s *seedPeer) TriggerTask(ctx context.Context, task *Task) (*Peer, *schedul
initialized = true
// Initialize seed peer.
peer, err = s.initSeedPeer(ctx, task, piece)
peer, err = s.initSeedPeer(ctx, rg, task, piece)
if err != nil {
return nil, nil, err
}
@ -167,11 +176,11 @@ func (s *seedPeer) TriggerTask(ctx context.Context, task *Task) (*Peer, *schedul
task.StorePiece(piece.PieceInfo)
// Statistical traffic metrics.
trafficType := metrics.TrafficBackToSourceType
trafficType := commonv2.TrafficType_BACK_TO_SOURCE
if piece.Reuse {
trafficType = metrics.TrafficP2PType
trafficType = commonv2.TrafficType_REMOTE_PEER
}
metrics.Traffic.WithLabelValues(peer.Tag, peer.Application, trafficType).Add(float64(piece.PieceInfo.RangeSize))
metrics.Traffic.WithLabelValues(peer.Task.Tag, peer.Task.Application, trafficType.String()).Add(float64(piece.PieceInfo.RangeSize))
}
// Handle end of piece.
@ -186,7 +195,7 @@ func (s *seedPeer) TriggerTask(ctx context.Context, task *Task) (*Peer, *schedul
}
// Initialize seed peer.
func (s *seedPeer) initSeedPeer(ctx context.Context, task *Task, ps *cdnsystemv1.PieceSeed) (*Peer, error) {
func (s *seedPeer) initSeedPeer(ctx context.Context, rg *http.Range, task *Task, ps *cdnsystemv1.PieceSeed) (*Peer, error) {
// Load peer from manager.
peer, loaded := s.peerManager.Load(ps.PeerId)
if loaded {
@ -201,8 +210,13 @@ func (s *seedPeer) initSeedPeer(ctx context.Context, task *Task, ps *cdnsystemv1
return nil, fmt.Errorf("can not find host id: %s", ps.HostId)
}
// New and store seed peer.
peer = NewPeer(ps.PeerId, task, host, WithTag(SeedTag), WithApplication(SeedApplication))
options := []PeerOption{}
if rg != nil {
options = append(options, WithRange(*rg))
}
// New and store seed peer without range.
peer = NewPeer(ps.PeerId, task, host, options...)
s.peerManager.Store(peer)
peer.Log.Info("seed peer has been stored")

View File

@ -9,6 +9,7 @@ import (
reflect "reflect"
scheduler "d7y.io/api/pkg/apis/scheduler/v1"
http "d7y.io/dragonfly/v2/pkg/net/http"
gomock "github.com/golang/mock/gomock"
)
@ -78,9 +79,9 @@ func (mr *MockSeedPeerMockRecorder) Stop() *gomock.Call {
}
// TriggerTask mocks base method.
func (m *MockSeedPeer) TriggerTask(arg0 context.Context, arg1 *Task) (*Peer, *scheduler.PeerResult, error) {
func (m *MockSeedPeer) TriggerTask(arg0 context.Context, arg1 *http.Range, arg2 *Task) (*Peer, *scheduler.PeerResult, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TriggerTask", arg0, arg1)
ret := m.ctrl.Call(m, "TriggerTask", arg0, arg1, arg2)
ret0, _ := ret[0].(*Peer)
ret1, _ := ret[1].(*scheduler.PeerResult)
ret2, _ := ret[2].(error)
@ -88,7 +89,7 @@ func (m *MockSeedPeer) TriggerTask(arg0 context.Context, arg1 *Task) (*Peer, *sc
}
// TriggerTask indicates an expected call of TriggerTask.
func (mr *MockSeedPeerMockRecorder) TriggerTask(arg0, arg1 interface{}) *gomock.Call {
func (mr *MockSeedPeerMockRecorder) TriggerTask(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TriggerTask", reflect.TypeOf((*MockSeedPeer)(nil).TriggerTask), arg0, arg1)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TriggerTask", reflect.TypeOf((*MockSeedPeer)(nil).TriggerTask), arg0, arg1, arg2)
}

View File

@ -84,8 +84,8 @@ func TestSeedPeer_TriggerTask(t *testing.T) {
tc.mock(client.EXPECT())
seedPeer := newSeedPeer(client, peerManager, hostManager)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
peer, result, err := seedPeer.TriggerTask(context.Background(), mockTask)
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
peer, result, err := seedPeer.TriggerTask(context.Background(), nil, mockTask)
tc.expect(t, peer, result, err)
})
}

View File

@ -87,10 +87,10 @@ const (
// TaskOption is a functional option for task.
type TaskOption func(task *Task)
// WithBackToSourceLimit set BackToSourceLimit for task.
func WithBackToSourceLimit(limit int32) TaskOption {
return func(task *Task) {
task.BackToSourceLimit.Add(limit)
// WithPieceSize set PieceSize for task.
func WithPieceSize(pieceSize int32) TaskOption {
return func(t *Task) {
t.PieceSize = pieceSize
}
}
@ -99,14 +99,29 @@ type Task struct {
// ID is task id.
ID string
// URL is task download url.
URL string
// Type is task type.
Type commonv2.TaskType
// URLMeta is task download url meta.
URLMeta *commonv1.UrlMeta
// URL is task download url.
URL string
// Digest of the task content, for example md5:xxx or sha256:yyy.
Digest string
// URL tag identifies different task for same url.
Tag string
// Application identifies different task for same url.
Application string
// Filter url used to generate task id.
Filters []string
// Task request headers.
Header map[string]string
// Task piece size.
PieceSize int32
// DirectPiece is tiny piece data.
DirectPiece []byte
@ -147,16 +162,21 @@ type Task struct {
}
// New task instance.
func NewTask(id, url string, typ commonv2.TaskType, meta *commonv1.UrlMeta, options ...TaskOption) *Task {
func NewTask(id, url, digest, tag, application string, typ commonv2.TaskType,
filters []string, header map[string]string, backToSourceLimit int32, options ...TaskOption) *Task {
t := &Task{
ID: id,
URL: url,
Type: typ,
URLMeta: meta,
URL: url,
Digest: digest,
Tag: tag,
Application: application,
Filters: filters,
Header: header,
DirectPiece: []byte{},
ContentLength: atomic.NewInt64(-1),
TotalPieceCount: atomic.NewInt32(0),
BackToSourceLimit: atomic.NewInt32(0),
BackToSourceLimit: atomic.NewInt32(backToSourceLimit),
BackToSourcePeers: set.NewSafeSet[string](),
Pieces: &sync.Map{},
DAG: dag.NewDAG[*Peer](),

View File

@ -131,7 +131,7 @@ func TestTaskManager_Load(t *testing.T) {
gc := gc.NewMockGC(ctl)
tc.mock(gc.EXPECT())
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
taskManager, err := newTaskManager(mockTaskGCConfig, gc)
if err != nil {
t.Fatal(err)
@ -184,7 +184,7 @@ func TestTaskManager_Store(t *testing.T) {
gc := gc.NewMockGC(ctl)
tc.mock(gc.EXPECT())
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
taskManager, err := newTaskManager(mockTaskGCConfig, gc)
if err != nil {
t.Fatal(err)
@ -235,7 +235,7 @@ func TestTaskManager_LoadOrStore(t *testing.T) {
gc := gc.NewMockGC(ctl)
tc.mock(gc.EXPECT())
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
taskManager, err := newTaskManager(mockTaskGCConfig, gc)
if err != nil {
t.Fatal(err)
@ -288,7 +288,7 @@ func TestTaskManager_Delete(t *testing.T) {
gc := gc.NewMockGC(ctl)
tc.mock(gc.EXPECT())
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
taskManager, err := newTaskManager(mockTaskGCConfig, gc)
if err != nil {
t.Fatal(err)
@ -356,7 +356,7 @@ func TestTaskManager_RunGC(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, mockTask, mockHost)
taskManager, err := newTaskManager(mockTaskGCConfig, gc)
if err != nil {

View File

@ -37,18 +37,6 @@ import (
)
var (
mockTaskURLMeta = &commonv1.UrlMeta{
Digest: "digest",
Tag: "tag",
Range: "range",
Filter: "filter",
Header: map[string]string{
"content-length": "100",
},
}
mockTaskBackToSourceLimit int32 = 200
mockTaskURL = "http://example.com/foo"
mockTaskID = idgen.TaskIDV1(mockTaskURL, mockTaskURLMeta)
mockPieceInfo = &commonv1.PieceInfo{
PieceNum: 1,
RangeStart: 0,
@ -56,28 +44,65 @@ var (
PieceMd5: "ad83a945518a4ef007d8b2db2ef165b3",
PieceOffset: 10,
}
mockTaskBackToSourceLimit int32 = 200
mockTaskURL = "http://example.com/foo"
mockTaskID = idgen.TaskIDV2(mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, mockTaskFilters)
mockTaskDigest = "sha256:c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
mockTaskTag = "d7y"
mockTaskApplication = "foo"
mockTaskFilters = []string{"bar"}
mockTaskHeader = map[string]string{"content-length": "100"}
mockTaskPieceSize int32 = 2048
)
func TestTask_NewTask(t *testing.T) {
tests := []struct {
name string
id string
urlMeta *commonv1.UrlMeta
url string
backToSourceLimit int32
options []TaskOption
expect func(t *testing.T, task *Task)
}{
{
name: "new task",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
options: []TaskOption{},
expect: func(t *testing.T, task *Task) {
assert := assert.New(t)
assert.Equal(task.ID, mockTaskID)
assert.Equal(task.Type, commonv2.TaskType_DFDAEMON)
assert.Equal(task.URL, mockTaskURL)
assert.EqualValues(task.URLMeta, mockTaskURLMeta)
assert.Equal(task.Digest, mockTaskDigest)
assert.Equal(task.Tag, mockTaskTag)
assert.Equal(task.Application, mockTaskApplication)
assert.EqualValues(task.Filters, mockTaskFilters)
assert.EqualValues(task.Header, mockTaskHeader)
assert.Equal(task.PieceSize, int32(0))
assert.Empty(task.DirectPiece)
assert.Equal(task.ContentLength.Load(), int64(-1))
assert.Equal(task.TotalPieceCount.Load(), int32(0))
assert.Equal(task.BackToSourceLimit.Load(), int32(200))
assert.Equal(task.BackToSourcePeers.Len(), uint(0))
assert.Equal(task.FSM.Current(), TaskStatePending)
assert.Empty(task.Pieces)
assert.Equal(task.PeerCount(), 0)
assert.NotEqual(task.CreatedAt.Load(), 0)
assert.NotEqual(task.UpdatedAt.Load(), 0)
assert.NotNil(task.Log)
},
},
{
name: "new task with piece size",
options: []TaskOption{WithPieceSize(mockTaskPieceSize)},
expect: func(t *testing.T, task *Task) {
assert := assert.New(t)
assert.Equal(task.ID, mockTaskID)
assert.Equal(task.Type, commonv2.TaskType_DFDAEMON)
assert.Equal(task.URL, mockTaskURL)
assert.Equal(task.Digest, mockTaskDigest)
assert.Equal(task.Tag, mockTaskTag)
assert.Equal(task.Application, mockTaskApplication)
assert.EqualValues(task.Filters, mockTaskFilters)
assert.EqualValues(task.Header, mockTaskHeader)
assert.Equal(task.PieceSize, mockTaskPieceSize)
assert.Empty(task.DirectPiece)
assert.Equal(task.ContentLength.Load(), int64(-1))
assert.Equal(task.TotalPieceCount.Load(), int32(0))
@ -95,7 +120,7 @@ func TestTask_NewTask(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
tc.expect(t, NewTask(tc.id, tc.url, commonv2.TaskType_DFDAEMON, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit)))
tc.expect(t, NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, tc.options...))
})
}
}
@ -103,19 +128,11 @@ func TestTask_NewTask(t *testing.T) {
func TestTask_LoadPeer(t *testing.T) {
tests := []struct {
name string
id string
urlMeta *commonv1.UrlMeta
url string
backToSourceLimit int32
peerID string
expect func(t *testing.T, peer *Peer, loaded bool)
}{
{
name: "load peer",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
peerID: mockPeerID,
expect: func(t *testing.T, peer *Peer, loaded bool) {
assert := assert.New(t)
@ -125,11 +142,7 @@ func TestTask_LoadPeer(t *testing.T) {
},
{
name: "peer does not exist",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
peerID: idgen.PeerIDV1("0.0.0.0"),
peerID: idgen.PeerIDV2(),
expect: func(t *testing.T, peer *Peer, loaded bool) {
assert := assert.New(t)
assert.Equal(loaded, false)
@ -137,10 +150,6 @@ func TestTask_LoadPeer(t *testing.T) {
},
{
name: "load key is empty",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
peerID: "",
expect: func(t *testing.T, peer *Peer, loaded bool) {
assert := assert.New(t)
@ -154,7 +163,7 @@ func TestTask_LoadPeer(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(tc.id, tc.url, commonv2.TaskType_DFDAEMON, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, task, mockHost)
task.StorePeer(mockPeer)
@ -223,7 +232,7 @@ func TestTask_LoadRandomPeers(t *testing.T) {
host := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta)
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
tc.expect(t, task, host)
})
@ -233,19 +242,11 @@ func TestTask_LoadRandomPeers(t *testing.T) {
func TestTask_StorePeer(t *testing.T) {
tests := []struct {
name string
id string
urlMeta *commonv1.UrlMeta
url string
backToSourceLimit int32
peerID string
expect func(t *testing.T, peer *Peer, loaded bool)
}{
{
name: "store peer",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
peerID: mockPeerID,
expect: func(t *testing.T, peer *Peer, loaded bool) {
assert := assert.New(t)
@ -255,10 +256,6 @@ func TestTask_StorePeer(t *testing.T) {
},
{
name: "store key is empty",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
peerID: "",
expect: func(t *testing.T, peer *Peer, loaded bool) {
assert := assert.New(t)
@ -273,7 +270,7 @@ func TestTask_StorePeer(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(tc.id, tc.url, commonv2.TaskType_DFDAEMON, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(tc.peerID, task, mockHost)
task.StorePeer(mockPeer)
@ -286,19 +283,11 @@ func TestTask_StorePeer(t *testing.T) {
func TestTask_DeletePeer(t *testing.T) {
tests := []struct {
name string
id string
urlMeta *commonv1.UrlMeta
url string
backToSourceLimit int32
peerID string
expect func(t *testing.T, task *Task)
}{
{
name: "delete peer",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
peerID: mockPeerID,
expect: func(t *testing.T, task *Task) {
assert := assert.New(t)
@ -308,10 +297,6 @@ func TestTask_DeletePeer(t *testing.T) {
},
{
name: "delete key is empty",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
peerID: "",
expect: func(t *testing.T, task *Task) {
assert := assert.New(t)
@ -327,7 +312,7 @@ func TestTask_DeletePeer(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(tc.id, tc.url, commonv2.TaskType_DFDAEMON, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, task, mockHost)
task.StorePeer(mockPeer)
@ -366,7 +351,7 @@ func TestTask_PeerCount(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta)
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, task, mockHost)
tc.expect(t, mockPeer, task)
@ -464,7 +449,7 @@ func TestTask_AddPeerEdge(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta)
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
tc.expect(t, mockHost, task)
})
@ -568,7 +553,7 @@ func TestTask_DeletePeerInEdges(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta)
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
tc.expect(t, mockHost, task)
})
@ -670,7 +655,7 @@ func TestTask_DeletePeerOutEdges(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta)
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
tc.expect(t, mockHost, task)
})
@ -757,7 +742,7 @@ func TestTask_CanAddPeerEdge(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta)
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
tc.expect(t, mockHost, task)
})
@ -820,7 +805,7 @@ func TestTask_PeerDegree(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta)
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
tc.expect(t, mockHost, task)
})
@ -883,7 +868,7 @@ func TestTask_PeerInDegree(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta)
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
tc.expect(t, mockHost, task)
})
@ -946,7 +931,7 @@ func TestTask_PeerOutDegree(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta)
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
tc.expect(t, mockHost, task)
})
@ -956,18 +941,10 @@ func TestTask_PeerOutDegree(t *testing.T) {
func TestTask_HasAvailablePeer(t *testing.T) {
tests := []struct {
name string
id string
urlMeta *commonv1.UrlMeta
url string
backToSourceLimit int32
expect func(t *testing.T, task *Task, mockPeer *Peer)
}{
{
name: "blocklist includes peer",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
expect: func(t *testing.T, task *Task, mockPeer *Peer) {
assert := assert.New(t)
mockPeer.FSM.SetState(PeerStatePending)
@ -980,14 +957,10 @@ func TestTask_HasAvailablePeer(t *testing.T) {
},
{
name: "peer state is PeerStatePending",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
expect: func(t *testing.T, task *Task, mockPeer *Peer) {
assert := assert.New(t)
task.StorePeer(mockPeer)
mockPeer.ID = idgen.PeerIDV1("0.0.0.0")
mockPeer.ID = idgen.PeerIDV2()
mockPeer.FSM.SetState(PeerStatePending)
task.StorePeer(mockPeer)
assert.Equal(task.HasAvailablePeer(set.NewSafeSet[string]()), true)
@ -995,14 +968,10 @@ func TestTask_HasAvailablePeer(t *testing.T) {
},
{
name: "peer state is PeerStateSucceeded",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
expect: func(t *testing.T, task *Task, mockPeer *Peer) {
assert := assert.New(t)
task.StorePeer(mockPeer)
mockPeer.ID = idgen.PeerIDV1("0.0.0.0")
mockPeer.ID = idgen.PeerIDV2()
mockPeer.FSM.SetState(PeerStateSucceeded)
task.StorePeer(mockPeer)
assert.Equal(task.HasAvailablePeer(set.NewSafeSet[string]()), true)
@ -1010,14 +979,10 @@ func TestTask_HasAvailablePeer(t *testing.T) {
},
{
name: "peer state is PeerStateRunning",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
expect: func(t *testing.T, task *Task, mockPeer *Peer) {
assert := assert.New(t)
task.StorePeer(mockPeer)
mockPeer.ID = idgen.PeerIDV1("0.0.0.0")
mockPeer.ID = idgen.PeerIDV2()
mockPeer.FSM.SetState(PeerStateRunning)
task.StorePeer(mockPeer)
assert.Equal(task.HasAvailablePeer(set.NewSafeSet[string]()), true)
@ -1025,14 +990,10 @@ func TestTask_HasAvailablePeer(t *testing.T) {
},
{
name: "peer state is PeerStateBackToSource",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
expect: func(t *testing.T, task *Task, mockPeer *Peer) {
assert := assert.New(t)
task.StorePeer(mockPeer)
mockPeer.ID = idgen.PeerIDV1("0.0.0.0")
mockPeer.ID = idgen.PeerIDV2()
mockPeer.FSM.SetState(PeerStateBackToSource)
task.StorePeer(mockPeer)
assert.Equal(task.HasAvailablePeer(set.NewSafeSet[string]()), true)
@ -1040,10 +1001,6 @@ func TestTask_HasAvailablePeer(t *testing.T) {
},
{
name: "peer does not exist",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
expect: func(t *testing.T, task *Task, mockPeer *Peer) {
assert := assert.New(t)
assert.Equal(task.HasAvailablePeer(set.NewSafeSet[string]()), false)
@ -1056,7 +1013,7 @@ func TestTask_HasAvailablePeer(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, task, mockHost)
tc.expect(t, task, mockPeer)
@ -1123,7 +1080,7 @@ func TestTask_LoadSeedPeer(t *testing.T) {
mockSeedHost := NewHost(
mockRawSeedHost.ID, mockRawSeedHost.IP, mockRawSeedHost.Hostname,
mockRawSeedHost.Port, mockRawSeedHost.DownloadPort, mockRawSeedHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, task, mockHost)
mockSeedPeer := NewPeer(mockSeedPeerID, task, mockSeedHost)
@ -1190,7 +1147,7 @@ func TestTask_IsSeedPeerFailed(t *testing.T) {
mockSeedHost := NewHost(
mockRawSeedHost.ID, mockRawSeedHost.IP, mockRawSeedHost.Hostname,
mockRawSeedHost.Port, mockRawSeedHost.DownloadPort, mockRawSeedHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, task, mockHost)
mockSeedPeer := NewPeer(mockSeedPeerID, task, mockSeedHost)
@ -1202,20 +1159,12 @@ func TestTask_IsSeedPeerFailed(t *testing.T) {
func TestTask_LoadPiece(t *testing.T) {
tests := []struct {
name string
id string
urlMeta *commonv1.UrlMeta
url string
backToSourceLimit int32
pieceInfo *commonv1.PieceInfo
pieceNum int32
expect func(t *testing.T, piece *commonv1.PieceInfo, loaded bool)
}{
{
name: "load piece",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
pieceInfo: mockPieceInfo,
pieceNum: mockPieceInfo.PieceNum,
expect: func(t *testing.T, piece *commonv1.PieceInfo, loaded bool) {
@ -1226,10 +1175,6 @@ func TestTask_LoadPiece(t *testing.T) {
},
{
name: "piece does not exist",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
pieceInfo: mockPieceInfo,
pieceNum: 2,
expect: func(t *testing.T, piece *commonv1.PieceInfo, loaded bool) {
@ -1239,10 +1184,6 @@ func TestTask_LoadPiece(t *testing.T) {
},
{
name: "load key is zero",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
pieceInfo: mockPieceInfo,
pieceNum: 0,
expect: func(t *testing.T, piece *commonv1.PieceInfo, loaded bool) {
@ -1254,7 +1195,7 @@ func TestTask_LoadPiece(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
task := NewTask(tc.id, tc.url, commonv2.TaskType_DFDAEMON, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
task.StorePiece(tc.pieceInfo)
piece, loaded := task.LoadPiece(tc.pieceNum)
@ -1266,20 +1207,12 @@ func TestTask_LoadPiece(t *testing.T) {
func TestTask_StorePiece(t *testing.T) {
tests := []struct {
name string
id string
urlMeta *commonv1.UrlMeta
url string
backToSourceLimit int32
pieceInfo *commonv1.PieceInfo
pieceNum int32
expect func(t *testing.T, piece *commonv1.PieceInfo, loaded bool)
}{
{
name: "store piece",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
pieceInfo: mockPieceInfo,
pieceNum: mockPieceInfo.PieceNum,
expect: func(t *testing.T, piece *commonv1.PieceInfo, loaded bool) {
@ -1292,7 +1225,7 @@ func TestTask_StorePiece(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
task := NewTask(tc.id, tc.url, commonv2.TaskType_DFDAEMON, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
task.StorePiece(tc.pieceInfo)
piece, loaded := task.LoadPiece(tc.pieceNum)
@ -1304,20 +1237,12 @@ func TestTask_StorePiece(t *testing.T) {
func TestTask_DeletePiece(t *testing.T) {
tests := []struct {
name string
id string
urlMeta *commonv1.UrlMeta
url string
backToSourceLimit int32
pieceInfo *commonv1.PieceInfo
pieceNum int32
expect func(t *testing.T, task *Task)
}{
{
name: "delete piece",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
pieceInfo: mockPieceInfo,
pieceNum: mockPieceInfo.PieceNum,
expect: func(t *testing.T, task *Task) {
@ -1328,10 +1253,6 @@ func TestTask_DeletePiece(t *testing.T) {
},
{
name: "delete key does not exist",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
pieceInfo: mockPieceInfo,
pieceNum: 0,
expect: func(t *testing.T, task *Task) {
@ -1345,7 +1266,7 @@ func TestTask_DeletePiece(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
task := NewTask(tc.id, tc.url, commonv2.TaskType_DFDAEMON, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
task.StorePiece(tc.pieceInfo)
task.DeletePiece(tc.pieceNum)
@ -1357,20 +1278,12 @@ func TestTask_DeletePiece(t *testing.T) {
func TestTask_SizeScope(t *testing.T) {
tests := []struct {
name string
id string
urlMeta *commonv1.UrlMeta
url string
backToSourceLimit int32
contentLength int64
totalPieceCount int32
expect func(t *testing.T, task *Task)
}{
{
name: "scope size is tiny",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
contentLength: TinyFileSize,
totalPieceCount: 1,
expect: func(t *testing.T, task *Task) {
@ -1382,10 +1295,6 @@ func TestTask_SizeScope(t *testing.T) {
},
{
name: "scope size is empty",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
contentLength: 0,
totalPieceCount: 0,
expect: func(t *testing.T, task *Task) {
@ -1397,10 +1306,6 @@ func TestTask_SizeScope(t *testing.T) {
},
{
name: "scope size is small",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
contentLength: TinyFileSize + 1,
totalPieceCount: 1,
expect: func(t *testing.T, task *Task) {
@ -1412,10 +1317,6 @@ func TestTask_SizeScope(t *testing.T) {
},
{
name: "scope size is normal",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
contentLength: TinyFileSize + 1,
totalPieceCount: 2,
expect: func(t *testing.T, task *Task) {
@ -1427,10 +1328,6 @@ func TestTask_SizeScope(t *testing.T) {
},
{
name: "invalid content length",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
contentLength: -1,
totalPieceCount: 2,
expect: func(t *testing.T, task *Task) {
@ -1441,10 +1338,6 @@ func TestTask_SizeScope(t *testing.T) {
},
{
name: "invalid total piece count",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: mockTaskBackToSourceLimit,
contentLength: TinyFileSize + 1,
totalPieceCount: -1,
expect: func(t *testing.T, task *Task) {
@ -1457,7 +1350,7 @@ func TestTask_SizeScope(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
task := NewTask(tc.id, tc.url, commonv2.TaskType_DFDAEMON, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
task.ContentLength.Store(tc.contentLength)
task.TotalPieceCount.Store(tc.totalPieceCount)
tc.expect(t, task)
@ -1476,9 +1369,6 @@ func TestTask_CanBackToSource(t *testing.T) {
}{
{
name: "task can back-to-source",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: 1,
expect: func(t *testing.T, task *Task) {
assert := assert.New(t)
@ -1487,9 +1377,6 @@ func TestTask_CanBackToSource(t *testing.T) {
},
{
name: "task can not back-to-source",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: -1,
expect: func(t *testing.T, task *Task) {
assert := assert.New(t)
@ -1498,9 +1385,6 @@ func TestTask_CanBackToSource(t *testing.T) {
},
{
name: "task can back-to-source and task type is DFSTORE",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: 1,
expect: func(t *testing.T, task *Task) {
assert := assert.New(t)
@ -1510,9 +1394,6 @@ func TestTask_CanBackToSource(t *testing.T) {
},
{
name: "task type is DFCACHE",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: 1,
expect: func(t *testing.T, task *Task) {
assert := assert.New(t)
@ -1524,7 +1405,7 @@ func TestTask_CanBackToSource(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
task := NewTask(tc.id, tc.url, commonv2.TaskType_DFDAEMON, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, tc.backToSourceLimit)
tc.expect(t, task)
})
}
@ -1533,18 +1414,10 @@ func TestTask_CanBackToSource(t *testing.T) {
func TestTask_CanReuseDirectPiece(t *testing.T) {
tests := []struct {
name string
id string
urlMeta *commonv1.UrlMeta
url string
backToSourceLimit int32
expect func(t *testing.T, task *Task)
}{
{
name: "task can reuse direct piece",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: 1,
expect: func(t *testing.T, task *Task) {
assert := assert.New(t)
task.DirectPiece = []byte{1}
@ -1554,10 +1427,6 @@ func TestTask_CanReuseDirectPiece(t *testing.T) {
},
{
name: "direct piece is empty",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: 1,
expect: func(t *testing.T, task *Task) {
assert := assert.New(t)
task.ContentLength.Store(1)
@ -1566,10 +1435,6 @@ func TestTask_CanReuseDirectPiece(t *testing.T) {
},
{
name: "content length is error",
id: mockTaskID,
urlMeta: mockTaskURLMeta,
url: mockTaskURL,
backToSourceLimit: 1,
expect: func(t *testing.T, task *Task) {
assert := assert.New(t)
task.DirectPiece = []byte{1}
@ -1581,7 +1446,7 @@ func TestTask_CanReuseDirectPiece(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
task := NewTask(tc.id, tc.url, commonv2.TaskType_DFDAEMON, tc.urlMeta, WithBackToSourceLimit(tc.backToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
tc.expect(t, task)
})
}
@ -1662,7 +1527,7 @@ func TestTask_ReportPieceResultToPeers(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, task, mockHost)
task.StorePeer(mockPeer)
tc.run(t, task, mockPeer, stream, stream.EXPECT())
@ -1745,7 +1610,7 @@ func TestTask_AnnouncePeers(t *testing.T) {
mockHost := NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, WithBackToSourceLimit(mockTaskBackToSourceLimit))
task := NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit)
mockPeer := NewPeer(mockPeerID, task, mockHost)
task.StorePeer(mockPeer)
tc.run(t, task, mockPeer, stream, stream.EXPECT())

View File

@ -56,22 +56,13 @@ func (s *schedulerServerV1) RegisterPeerTask(ctx context.Context, req *scheduler
req.TaskId = idgen.TaskIDV1(req.Url, req.UrlMeta)
}
tag := resource.DefaultTag
if req.UrlMeta.Tag != "" {
tag = req.UrlMeta.Tag
}
application := resource.DefaultApplication
if req.UrlMeta.Application != "" {
application = req.UrlMeta.Application
}
metrics.RegisterPeerTaskCount.WithLabelValues(tag, application).Inc()
tag := req.UrlMeta.Tag
application := req.UrlMeta.Application
metrics.RegisterTaskCount.WithLabelValues(tag, application).Inc()
resp, err := s.service.RegisterPeerTask(ctx, req)
if err != nil {
metrics.RegisterPeerTaskFailureCount.WithLabelValues(tag, application).Inc()
} else {
metrics.PeerTaskCounter.WithLabelValues(tag, application, resp.SizeScope.String()).Inc()
metrics.RegisterTaskFailureCount.WithLabelValues(tag, application).Inc()
}
return resp, err

View File

@ -22,7 +22,6 @@ import (
"github.com/stretchr/testify/assert"
commonv1 "d7y.io/api/pkg/apis/common/v1"
commonv2 "d7y.io/api/pkg/apis/common/v2"
"d7y.io/dragonfly/v2/pkg/idgen"
@ -32,7 +31,7 @@ import (
var (
mockRawHost = resource.Host{
ID: idgen.HostIDV1("hostname", 8003),
ID: mockHostID,
Type: types.HostTypeNormal,
Hostname: "hostname",
IP: "127.0.0.1",
@ -51,7 +50,7 @@ var (
}
mockRawSeedHost = resource.Host{
ID: idgen.HostIDV1("hostname_seed", 8003),
ID: mockSeedHostID,
Type: types.HostTypeSuperSeed,
Hostname: "hostname_seed",
IP: "127.0.0.1",
@ -100,9 +99,9 @@ var (
mockNetwork = resource.Network{
TCPConnectionCount: 10,
UploadTCPConnectionCount: 1,
SecurityDomain: "security_domain",
Location: "location",
IDC: "idc",
SecurityDomain: mockHostSecurityDomain,
Location: mockHostLocation,
IDC: mockHostIDC,
}
mockDisk = resource.Disk{
@ -123,20 +122,21 @@ var (
Platform: "darwin",
}
mockTaskURLMeta = &commonv1.UrlMeta{
Digest: "digest",
Tag: "tag",
Range: "range",
Filter: "filter",
Header: map[string]string{
"content-length": "100",
},
}
mockTaskBackToSourceLimit int32 = 200
mockTaskURL = "http://example.com/foo"
mockTaskID = idgen.TaskIDV1(mockTaskURL, mockTaskURLMeta)
mockPeerID = idgen.PeerIDV1("127.0.0.1")
mockTaskID = idgen.TaskIDV2(mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, mockTaskFilters)
mockTaskDigest = "sha256:c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
mockTaskTag = "d7y"
mockTaskApplication = "foo"
mockTaskFilters = []string{"bar"}
mockTaskHeader = map[string]string{"content-length": "100"}
mockTaskPieceSize int32 = 2048
mockHostID = idgen.HostIDV2("127.0.0.1", "hostname", 8003)
mockSeedHostID = idgen.HostIDV2("127.0.0.1", "hostname_seed", 8003)
mockHostSecurityDomain = "security_domain"
mockHostLocation = "location"
mockHostIDC = "idc"
mockPeerID = idgen.PeerIDV2()
)
func TestEvaluatorBase_NewEvaluatorBase(t *testing.T) {
@ -172,12 +172,12 @@ func TestEvaluatorBase_Evaluate(t *testing.T) {
{
name: "security domain is not the same",
parent: resource.NewPeer(idgen.PeerIDV1("127.0.0.1"),
resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)),
resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize)),
resource.NewHost(
mockRawSeedHost.ID, mockRawSeedHost.IP, mockRawSeedHost.Hostname,
mockRawSeedHost.Port, mockRawSeedHost.DownloadPort, mockRawSeedHost.Type)),
child: resource.NewPeer(idgen.PeerIDV1("127.0.0.1"),
resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)),
resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize)),
resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)),
@ -194,12 +194,12 @@ func TestEvaluatorBase_Evaluate(t *testing.T) {
{
name: "security domain is same",
parent: resource.NewPeer(idgen.PeerIDV1("127.0.0.1"),
resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)),
resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize)),
resource.NewHost(
mockRawSeedHost.ID, mockRawSeedHost.IP, mockRawSeedHost.Hostname,
mockRawSeedHost.Port, mockRawSeedHost.DownloadPort, mockRawSeedHost.Type)),
child: resource.NewPeer(idgen.PeerIDV1("127.0.0.1"),
resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)),
resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize)),
resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)),
@ -217,12 +217,12 @@ func TestEvaluatorBase_Evaluate(t *testing.T) {
{
name: "parent security domain is empty",
parent: resource.NewPeer(idgen.PeerIDV1("127.0.0.1"),
resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)),
resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize)),
resource.NewHost(
mockRawSeedHost.ID, mockRawSeedHost.IP, mockRawSeedHost.Hostname,
mockRawSeedHost.Port, mockRawSeedHost.DownloadPort, mockRawSeedHost.Type)),
child: resource.NewPeer(idgen.PeerIDV1("127.0.0.1"),
resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)),
resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize)),
resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)),
@ -240,12 +240,12 @@ func TestEvaluatorBase_Evaluate(t *testing.T) {
{
name: "child security domain is empty",
parent: resource.NewPeer(idgen.PeerIDV1("127.0.0.1"),
resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)),
resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize)),
resource.NewHost(
mockRawSeedHost.ID, mockRawSeedHost.IP, mockRawSeedHost.Hostname,
mockRawSeedHost.Port, mockRawSeedHost.DownloadPort, mockRawSeedHost.Type)),
child: resource.NewPeer(idgen.PeerIDV1("127.0.0.1"),
resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit)),
resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize)),
resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)),
@ -275,7 +275,7 @@ func TestEvaluatorBase_calculatePieceScore(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
tests := []struct {
name string
@ -436,7 +436,7 @@ func TestEvaluatorBase_calculatehostUploadSuccessScore(t *testing.T) {
host := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
mockPeer := resource.NewPeer(mockPeerID, mockTask, host)
tc.mock(host)
tc.expect(t, calculateParentHostUploadSuccessScore(mockPeer))
@ -475,7 +475,7 @@ func TestEvaluatorBase_calculateFreeUploadScore(t *testing.T) {
host := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
mockPeer := resource.NewPeer(mockPeerID, mockTask, host)
tc.mock(host, mockPeer)
tc.expect(t, calculateFreeUploadScore(host))
@ -526,7 +526,7 @@ func TestEvaluatorBase_calculateHostTypeScore(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
peer := resource.NewPeer(mockPeerID, mockTask, mockHost)
tc.mock(peer)
tc.expect(t, calculateHostTypeScore(peer))
@ -737,7 +737,7 @@ func TestEvaluatorBase_IsBadNode(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
tests := []struct {
name string

View File

@ -54,7 +54,7 @@ var (
}
mockRawHost = resource.Host{
ID: idgen.HostIDV1("hostname", 8003),
ID: mockHostID,
Type: pkgtypes.HostTypeNormal,
Hostname: "hostname",
IP: "127.0.0.1",
@ -73,7 +73,7 @@ var (
}
mockRawSeedHost = resource.Host{
ID: idgen.HostIDV1("hostname_seed", 8003),
ID: mockSeedHostID,
Type: pkgtypes.HostTypeSuperSeed,
Hostname: "hostname_seed",
IP: "127.0.0.1",
@ -122,9 +122,9 @@ var (
mockNetwork = resource.Network{
TCPConnectionCount: 10,
UploadTCPConnectionCount: 1,
SecurityDomain: "security_domain",
Location: "location",
IDC: "idc",
SecurityDomain: mockHostSecurityDomain,
Location: mockHostLocation,
IDC: mockHostIDC,
}
mockDisk = resource.Disk{
@ -145,21 +145,22 @@ var (
Platform: "darwin",
}
mockTaskURLMeta = &commonv1.UrlMeta{
Digest: "digest",
Tag: "tag",
Range: "range",
Filter: "filter",
Header: map[string]string{
"content-length": "100",
},
}
mockTaskBackToSourceLimit int32 = 200
mockTaskURL = "http://example.com/foo"
mockTaskID = idgen.TaskIDV1(mockTaskURL, mockTaskURLMeta)
mockPeerID = idgen.PeerIDV1("127.0.0.1")
mockSeedPeerID = idgen.SeedPeerIDV1("127.0.0.1")
mockTaskID = idgen.TaskIDV2(mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, mockTaskFilters)
mockTaskDigest = "sha256:c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
mockTaskTag = "d7y"
mockTaskApplication = "foo"
mockTaskFilters = []string{"bar"}
mockTaskHeader = map[string]string{"content-length": "100"}
mockTaskPieceSize int32 = 2048
mockHostID = idgen.HostIDV2("127.0.0.1", "hostname", 8003)
mockSeedHostID = idgen.HostIDV2("127.0.0.1", "hostname_seed", 8003)
mockHostSecurityDomain = "security_domain"
mockHostLocation = "location"
mockHostIDC = "idc"
mockPeerID = idgen.PeerIDV2()
mockSeedPeerID = idgen.PeerIDV2()
)
func TestScheduler_New(t *testing.T) {
@ -384,7 +385,7 @@ func TestScheduler_ScheduleParent(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
peer := resource.NewPeer(mockPeerID, mockTask, mockHost)
mockSeedHost := resource.NewHost(
mockRawSeedHost.ID, mockRawSeedHost.IP, mockRawSeedHost.Hostname,
@ -663,7 +664,7 @@ func TestScheduler_NotifyAndFindParent(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
peer := resource.NewPeer(mockPeerID, mockTask, mockHost)
mockPeer := resource.NewPeer(idgen.PeerIDV1("127.0.0.1"), mockTask, resource.NewHost(
idgen.HostIDV1(uuid.New().String(), 8003), mockRawHost.IP, mockRawHost.Hostname,
@ -934,7 +935,7 @@ func TestScheduler_FindParent(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
peer := resource.NewPeer(mockPeerID, mockTask, mockHost)
var mockPeers []*resource.Peer
@ -1027,7 +1028,7 @@ func TestScheduler_constructSuccessPeerPacket(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
peer := resource.NewPeer(mockPeerID, mockTask, mockHost)
parent := resource.NewPeer(idgen.PeerIDV1("127.0.0.1"), mockTask, mockHost)

View File

@ -21,7 +21,8 @@ import (
"errors"
"fmt"
"io"
"net/url"
"math"
"strings"
"time"
"go.opentelemetry.io/otel/trace"
@ -36,6 +37,8 @@ import (
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/container/set"
"d7y.io/dragonfly/v2/pkg/digest"
"d7y.io/dragonfly/v2/pkg/idgen"
"d7y.io/dragonfly/v2/pkg/net/http"
"d7y.io/dragonfly/v2/pkg/rpc/common"
pkgtime "d7y.io/dragonfly/v2/pkg/time"
"d7y.io/dragonfly/v2/pkg/types"
@ -89,7 +92,7 @@ func (v *V1) RegisterPeerTask(ctx context.Context, req *schedulerv1.PeerTaskRequ
// Store resource.
task := v.storeTask(ctx, req, commonv2.TaskType_DFDAEMON)
host := v.storeHost(ctx, req.PeerHost)
peer := v.storePeer(ctx, req.PeerId, task, host, req.UrlMeta.Tag, req.UrlMeta.Application)
peer := v.storePeer(ctx, req.PeerId, req.UrlMeta.Priority, req.UrlMeta.Range, task, host)
// Trigger the first download of the task.
if err := v.triggerTask(ctx, req, task, host, peer, v.dynconfig); err != nil {
@ -234,10 +237,10 @@ func (v *V1) ReportPieceResult(stream schedulerv1.Scheduler_ReportPieceResultSer
v.handlePieceSuccess(ctx, peer, piece)
// Collect peer host traffic metrics.
if v.config.Metrics.Enable && v.config.Metrics.EnablePeerHost {
metrics.PeerHostTraffic.WithLabelValues(peer.Tag, peer.Application, metrics.PeerHostTrafficDownloadType, peer.Host.ID, peer.Host.IP).Add(float64(piece.PieceInfo.RangeSize))
if v.config.Metrics.Enable && v.config.Metrics.EnableHost {
metrics.HostTraffic.WithLabelValues(peer.Task.Tag, peer.Task.Application, metrics.HostTrafficDownloadType, peer.Host.ID, peer.Host.IP).Add(float64(piece.PieceInfo.RangeSize))
if parent, loaded := v.resource.PeerManager().Load(piece.DstPid); loaded {
metrics.PeerHostTraffic.WithLabelValues(peer.Tag, peer.Application, metrics.PeerHostTrafficUploadType, parent.Host.ID, parent.Host.IP).Add(float64(piece.PieceInfo.RangeSize))
metrics.HostTraffic.WithLabelValues(peer.Task.Tag, peer.Task.Application, metrics.HostTrafficUploadType, parent.Host.ID, parent.Host.IP).Add(float64(piece.PieceInfo.RangeSize))
} else if !resource.IsPieceBackToSource(piece.DstPid) {
peer.Log.Warnf("dst peer %s not found", piece.DstPid)
}
@ -245,9 +248,9 @@ func (v *V1) ReportPieceResult(stream schedulerv1.Scheduler_ReportPieceResultSer
// Collect traffic metrics.
if !resource.IsPieceBackToSource(piece.DstPid) {
metrics.Traffic.WithLabelValues(peer.Tag, peer.Application, metrics.TrafficP2PType).Add(float64(piece.PieceInfo.RangeSize))
metrics.Traffic.WithLabelValues(peer.Task.Tag, peer.Task.Application, commonv2.TrafficType_REMOTE_PEER.String()).Add(float64(piece.PieceInfo.RangeSize))
} else {
metrics.Traffic.WithLabelValues(peer.Tag, peer.Application, metrics.TrafficBackToSourceType).Add(float64(piece.PieceInfo.RangeSize))
metrics.Traffic.WithLabelValues(peer.Task.Tag, peer.Task.Application, commonv2.TrafficType_BACK_TO_SOURCE.String()).Add(float64(piece.PieceInfo.RangeSize))
}
continue
}
@ -279,25 +282,27 @@ func (v *V1) ReportPeerResult(ctx context.Context, req *schedulerv1.PeerResult)
logger.Error(msg)
return dferrors.New(commonv1.Code_SchedPeerNotFound, msg)
}
metrics.DownloadCount.WithLabelValues(peer.Tag, peer.Application).Inc()
metrics.DownloadTaskCount.WithLabelValues(peer.Task.Tag, peer.Task.Application, peer.Host.Type.Name()).Inc()
parents := peer.Parents()
if !req.Success {
peer.Log.Error("report failed peer")
if peer.FSM.Is(resource.PeerStateBackToSource) {
metrics.DownloadFailureCount.WithLabelValues(peer.Tag, peer.Application, metrics.DownloadFailureBackToSourceType, req.Code.String()).Inc()
metrics.DownloadTaskFailureCount.WithLabelValues(peer.Task.Tag, peer.Task.Application,
metrics.DownloadFailureBackToSourceType, req.Code.String(), peer.Host.Type.Name()).Inc()
go v.createRecord(peer, parents, req)
v.handleTaskFailure(ctx, peer.Task, req.GetSourceError(), nil)
v.handlePeerFailure(ctx, peer)
return nil
}
metrics.DownloadFailureCount.WithLabelValues(peer.Tag, peer.Application, metrics.DownloadFailureP2PType, req.Code.String()).Inc()
metrics.DownloadTaskFailureCount.WithLabelValues(peer.Task.Tag, peer.Task.Application,
metrics.DownloadFailureP2PType, req.Code.String(), peer.Host.Type.Name()).Inc()
go v.createRecord(peer, parents, req)
v.handlePeerFailure(ctx, peer)
return nil
}
metrics.PeerTaskDownloadDuration.WithLabelValues(peer.Tag, peer.Application).Observe(float64(req.Cost))
metrics.DownloadTaskDuration.WithLabelValues(peer.Task.Tag, peer.Task.Application, peer.Host.Type.Name()).Observe(float64(req.Cost))
peer.Log.Info("report success peer")
if peer.FSM.Is(resource.PeerStateBackToSource) {
@ -320,10 +325,11 @@ func (v *V1) AnnounceTask(ctx context.Context, req *schedulerv1.AnnounceTaskRequ
taskID := req.TaskId
peerID := req.PiecePacket.DstPid
task := resource.NewTask(taskID, req.Url, types.TaskTypeV1ToV2(req.TaskType), req.UrlMeta)
task := resource.NewTask(taskID, req.Url, req.UrlMeta.Digest, req.UrlMeta.Tag, req.UrlMeta.Application,
types.TaskTypeV1ToV2(req.TaskType), strings.Split(req.UrlMeta.Filter, idgen.URLFilterSeparator), req.UrlMeta.Header, int32(v.config.Scheduler.BackSourceCount))
task, _ = v.resource.TaskManager().LoadOrStore(task)
host := v.storeHost(ctx, req.PeerHost)
peer := v.storePeer(ctx, peerID, task, host, req.UrlMeta.Tag, req.UrlMeta.Application)
peer := v.storePeer(ctx, peerID, req.UrlMeta.Priority, req.UrlMeta.Range, task, host)
// If the task state is not TaskStateSucceeded,
// advance the task state to TaskStateSucceeded.
@ -418,10 +424,10 @@ func (v *V1) LeaveTask(ctx context.Context, req *schedulerv1.PeerTarget) error {
logger.Error(msg)
return dferrors.New(commonv1.Code_SchedPeerNotFound, msg)
}
metrics.LeaveTaskCount.WithLabelValues(peer.Tag, peer.Application).Inc()
metrics.LeaveTaskCount.WithLabelValues(peer.Task.Tag, peer.Task.Application, peer.Host.Type.Name()).Inc()
if err := peer.FSM.Event(ctx, resource.PeerEventLeave); err != nil {
metrics.LeaveTaskFailureCount.WithLabelValues(peer.Tag, peer.Application).Inc()
metrics.LeaveTaskFailureCount.WithLabelValues(peer.Task.Tag, peer.Task.Application, peer.Host.Type.Name()).Inc()
msg := fmt.Sprintf("peer fsm event failed: %s", err.Error())
peer.Log.Error(msg)
return dferrors.New(commonv1.Code_SchedTaskStatusError, msg)
@ -658,16 +664,25 @@ func (v *V1) triggerTask(ctx context.Context, req *schedulerv1.PeerTaskRequest,
priority = req.UrlMeta.Priority
} else {
// Compatible with v1 version of priority enum.
priority = commonv1.Priority(peer.GetPriority(dynconfig))
priority = types.PriorityV2ToV1(peer.GetPriority(dynconfig))
}
peer.Log.Infof("peer priority is %d", priority)
switch priority {
case commonv1.Priority_LEVEL6, commonv1.Priority_LEVEL0:
if v.config.SeedPeer.Enable && !task.IsSeedPeerFailed() {
go v.triggerSeedPeerTask(ctx, task)
if len(req.UrlMeta.Range) > 0 {
if rg, err := http.ParseURLMetaRange(req.UrlMeta.Range, math.MaxInt64); err == nil {
go v.triggerSeedPeerTask(ctx, &rg, task)
return nil
}
peer.Log.Errorf("range %s is invalid", req.UrlMeta.Range)
} else {
go v.triggerSeedPeerTask(ctx, nil, task)
return nil
}
}
fallthrough
case commonv1.Priority_LEVEL5:
fallthrough
@ -689,11 +704,11 @@ func (v *V1) triggerTask(ctx context.Context, req *schedulerv1.PeerTaskRequest,
}
// triggerSeedPeerTask starts to trigger seed peer task.
func (v *V1) triggerSeedPeerTask(ctx context.Context, task *resource.Task) {
func (v *V1) triggerSeedPeerTask(ctx context.Context, rg *http.Range, task *resource.Task) {
ctx = trace.ContextWithSpan(context.Background(), trace.SpanFromContext(ctx))
task.Log.Info("trigger seed peer")
peer, endOfPiece, err := v.resource.SeedPeer().TriggerTask(ctx, task)
peer, endOfPiece, err := v.resource.SeedPeer().TriggerTask(ctx, rg, task)
if err != nil {
task.Log.Errorf("trigger seed peer failed: %s", err.Error())
v.handleTaskFailure(ctx, task, nil, err)
@ -708,10 +723,12 @@ func (v *V1) triggerSeedPeerTask(ctx context.Context, task *resource.Task) {
// storeTask stores a new task or reuses a previous task.
func (v *V1) storeTask(ctx context.Context, req *schedulerv1.PeerTaskRequest, typ commonv2.TaskType) *resource.Task {
filters := strings.Split(req.UrlMeta.Filter, idgen.URLFilterSeparator)
task, loaded := v.resource.TaskManager().Load(req.TaskId)
if !loaded {
// Create a task for the first time.
task = resource.NewTask(req.TaskId, req.Url, typ, req.UrlMeta, resource.WithBackToSourceLimit(int32(v.config.Scheduler.BackToSourceCount)))
task := resource.NewTask(req.TaskId, req.Url, req.UrlMeta.Digest, req.UrlMeta.Tag, req.UrlMeta.Application,
typ, filters, req.UrlMeta.Header, int32(v.config.Scheduler.BackToSourceCount))
v.resource.TaskManager().Store(task)
task.Log.Info("create new task")
return task
@ -720,7 +737,8 @@ func (v *V1) storeTask(ctx context.Context, req *schedulerv1.PeerTaskRequest, ty
// Task is the pointer, if the task already exists, the next request will
// update the task's Url and UrlMeta in task manager.
task.URL = req.Url
task.URLMeta = req.UrlMeta
task.Filters = filters
task.Header = req.UrlMeta.Header
task.Log.Info("task already exists")
return task
}
@ -729,7 +747,6 @@ func (v *V1) storeTask(ctx context.Context, req *schedulerv1.PeerTaskRequest, ty
func (v *V1) storeHost(ctx context.Context, peerHost *schedulerv1.PeerHost) *resource.Host {
host, loaded := v.resource.HostManager().Load(peerHost.Id)
if !loaded {
// Get scheduler cluster client config by manager.
options := []resource.HostOption{resource.WithNetwork(resource.Network{
SecurityDomain: peerHost.SecurityDomain,
Location: peerHost.Location,
@ -739,7 +756,7 @@ func (v *V1) storeHost(ctx context.Context, peerHost *schedulerv1.PeerHost) *res
options = append(options, resource.WithConcurrentUploadLimit(int32(clientConfig.LoadLimit)))
}
host = resource.NewHost(
host := resource.NewHost(
peerHost.Id, peerHost.Ip, peerHost.HostName,
peerHost.RpcPort, peerHost.DownPort, types.HostTypeNormal,
options...,
@ -755,18 +772,24 @@ func (v *V1) storeHost(ctx context.Context, peerHost *schedulerv1.PeerHost) *res
}
// storePeer stores a new peer or reuses a previous peer.
func (v *V1) storePeer(ctx context.Context, peerID string, task *resource.Task, host *resource.Host, tag, application string) *resource.Peer {
var options []resource.PeerOption
if tag != "" {
options = append(options, resource.WithTag(tag))
}
if application != "" {
options = append(options, resource.WithApplication(application))
}
peer, loaded := v.resource.PeerManager().LoadOrStore(resource.NewPeer(peerID, task, host, options...))
func (v *V1) storePeer(ctx context.Context, id string, priority commonv1.Priority, rg string, task *resource.Task, host *resource.Host) *resource.Peer {
peer, loaded := v.resource.PeerManager().Load(id)
if !loaded {
options := []resource.PeerOption{}
if priority != commonv1.Priority_LEVEL0 {
options = append(options, resource.WithPriority(types.PriorityV1ToV2(priority)))
}
if len(rg) > 0 {
if r, err := http.ParseURLMetaRange(rg, math.MaxInt64); err == nil {
options = append(options, resource.WithRange(r))
} else {
logger.WithPeer(host.ID, task.ID, id).Error(err)
}
}
peer := resource.NewPeer(id, task, host, options...)
v.resource.PeerManager().Store(peer)
peer.Log.Info("create new peer")
return peer
}
@ -1009,7 +1032,7 @@ func (v *V1) handlePieceFailure(ctx context.Context, peer *resource.Peer, piece
// Start trigger seed peer task.
if v.config.SeedPeer.Enable {
go v.triggerSeedPeerTask(ctx, parent.Task)
go v.triggerSeedPeerTask(ctx, peer.Range, parent.Task)
}
default:
}
@ -1147,20 +1170,7 @@ func (v *V1) handleTaskFailure(ctx context.Context, task *resource.Task, backToS
for _, detail := range st.Details() {
switch d := detail.(type) {
case *errordetailsv1.SourceError:
var proto = "unknown"
if u, err := url.Parse(task.URL); err == nil {
proto = u.Scheme
}
task.Log.Infof("source error: %#v", d)
// TODO currently, metrics.PeerTaskSourceErrorCounter is only updated for seed peer source error, need update for normal peer
if d.Metadata != nil {
metrics.PeerTaskSourceErrorCounter.WithLabelValues(
task.URLMeta.Tag, task.URLMeta.Application, proto, fmt.Sprintf("%d", d.Metadata.StatusCode)).Inc()
} else {
metrics.PeerTaskSourceErrorCounter.WithLabelValues(
task.URLMeta.Tag, task.URLMeta.Application, proto, "0").Inc()
}
task.Log.Infof("download back-to-source error: %#v", d)
if !d.Temporary {
task.ReportPieceResultToPeers(&schedulerv1.PeerPacket{
Code: commonv1.Code_BackToSourceAborted,
@ -1198,8 +1208,8 @@ func (v *V1) createRecord(peer *resource.Peer, parents []*resource.Peer, req *sc
for _, parent := range parents {
parentRecord := storage.Parent{
ID: parent.ID,
Tag: parent.Tag,
Application: parent.Application,
Tag: parent.Task.Tag,
Application: parent.Task.Application,
State: parent.FSM.Current(),
Cost: parent.Cost.Load().Nanoseconds(),
UploadPieceCount: 0,
@ -1291,8 +1301,8 @@ func (v *V1) createRecord(peer *resource.Peer, parents []*resource.Peer, req *sc
record := storage.Record{
ID: peer.ID,
Tag: peer.Tag,
Application: peer.Application,
Tag: peer.Task.Tag,
Application: peer.Task.Application,
State: peer.FSM.Current(),
Cost: peer.Cost.Load().Nanoseconds(),
Parents: parentRecords,

View File

@ -26,6 +26,7 @@ import (
"net/url"
"reflect"
"strconv"
"strings"
"sync"
"testing"
"time"
@ -46,6 +47,7 @@ import (
"d7y.io/dragonfly/v2/manager/types"
"d7y.io/dragonfly/v2/pkg/container/set"
"d7y.io/dragonfly/v2/pkg/idgen"
nethttp "d7y.io/dragonfly/v2/pkg/net/http"
"d7y.io/dragonfly/v2/pkg/rpc/common"
pkgtypes "d7y.io/dragonfly/v2/pkg/types"
"d7y.io/dragonfly/v2/scheduler/config"
@ -66,7 +68,7 @@ var (
}
mockRawHost = resource.Host{
ID: idgen.HostIDV1("hostname", 8003),
ID: mockHostID,
Type: pkgtypes.HostTypeNormal,
Hostname: "hostname",
IP: "127.0.0.1",
@ -85,7 +87,7 @@ var (
}
mockRawSeedHost = resource.Host{
ID: idgen.HostIDV1("hostname_seed", 8003),
ID: mockSeedHostID,
Type: pkgtypes.HostTypeSuperSeed,
Hostname: "hostname_seed",
IP: "127.0.0.1",
@ -134,9 +136,9 @@ var (
mockNetwork = resource.Network{
TCPConnectionCount: 10,
UploadTCPConnectionCount: 1,
SecurityDomain: "security_domain",
Location: "location",
IDC: "idc",
SecurityDomain: mockHostSecurityDomain,
Location: mockHostLocation,
IDC: mockHostIDC,
}
mockDisk = resource.Disk{
@ -158,33 +160,37 @@ var (
}
mockPeerHost = &schedulerv1.PeerHost{
Id: idgen.HostIDV1("hostname", 8003),
Id: mockHostID,
Ip: "127.0.0.1",
RpcPort: 8003,
DownPort: 8001,
HostName: "hostname",
SecurityDomain: "security_domain",
Location: "location",
Idc: "idc",
SecurityDomain: mockHostSecurityDomain,
Location: mockHostLocation,
Idc: mockHostIDC,
}
mockTaskURLMeta = &commonv1.UrlMeta{
Digest: "digest",
Tag: "tag",
Range: "range",
Filter: "filter",
Priority: commonv1.Priority_LEVEL0,
Header: map[string]string{
"content-length": "100",
},
}
mockTaskURL = "http://example.com/foo"
mockTaskBackToSourceLimit int32 = 200
mockTaskID = idgen.TaskIDV1(mockTaskURL, mockTaskURLMeta)
mockPeerID = idgen.PeerIDV1("127.0.0.1")
mockSeedPeerID = idgen.SeedPeerIDV1("127.0.0.1")
mockURL = "d7y://foo"
mockTaskURL = "http://example.com/foo"
mockTaskID = idgen.TaskIDV2(mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, mockTaskFilters)
mockTaskDigest = "sha256:c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4"
mockTaskTag = "d7y"
mockTaskApplication = "foo"
mockTaskFilters = []string{"bar"}
mockTaskHeader = map[string]string{"content-length": "100"}
mockTaskPieceSize int32 = 2048
mockHostID = idgen.HostIDV2("127.0.0.1", "hostname", 8003)
mockSeedHostID = idgen.HostIDV2("127.0.0.1", "hostname_seed", 8003)
mockHostSecurityDomain = "security_domain"
mockHostLocation = "location"
mockHostIDC = "idc"
mockPeerID = idgen.PeerIDV2()
mockSeedPeerID = idgen.PeerIDV2()
mockPeerRange = nethttp.Range{
Start: 0,
Length: 10,
}
mockURLMetaRange = "0-9"
)
func TestService_NewV1(t *testing.T) {
@ -251,7 +257,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) {
@ -279,14 +285,14 @@ func TestService_RegisterPeerTask(t *testing.T) {
mp *resource.MockPeerManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder,
) {
mockPeer.Task.FSM.SetState(resource.TaskStatePending)
mockPeer.Application = "baz"
mockPeer.Task.Application = "baz"
gomock.InOrder(
mr.TaskManager().Return(taskManager).Times(1),
mt.Load(gomock.Any()).Return(mockPeer.Task, true).Times(1),
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
md.GetApplications().Return([]*managerv2.Application{
{
Name: "baz",
@ -333,7 +339,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.Delete(gomock.Any()).Return().Times(1),
)
@ -373,7 +379,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) {
@ -411,7 +417,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) {
@ -453,7 +459,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) {
@ -494,7 +500,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) {
@ -534,7 +540,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.Delete(gomock.Any()).Return().Times(1),
)
@ -581,7 +587,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockSeedPeer, true).Times(1),
)
},
@ -627,7 +633,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockSeedPeer, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.Delete(gomock.Any()).Return().Times(1),
@ -675,7 +681,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockSeedPeer, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.Delete(gomock.Any()).Return().Times(1),
@ -721,7 +727,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockSeedPeer, true).Times(1),
)
},
@ -766,7 +772,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
ms.FindParent(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockSeedPeer, true).Times(1),
)
},
@ -807,7 +813,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.Delete(gomock.Any()).Return().Times(1),
)
@ -848,7 +854,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Eq(mockPeer.Host.ID)).Return(mockPeer.Host, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer, result *schedulerv1.RegisterResult, err error) {
@ -878,7 +884,7 @@ func TestService_RegisterPeerTask(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost)
mockSeedHost := resource.NewHost(
mockRawSeedHost.ID, mockRawSeedHost.IP, mockRawSeedHost.Hostname,
@ -1142,7 +1148,7 @@ func TestService_ReportPieceResult(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost)
tc.mock(mockPeer, res, peerManager, res.EXPECT(), peerManager.EXPECT(), stream.EXPECT())
tc.expect(t, mockPeer, svc.ReportPieceResult(stream))
@ -1340,7 +1346,7 @@ func TestService_ReportPeerResult(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost)
tc.run(t, mockPeer, tc.req, svc, mockPeer, res, peerManager, res.EXPECT(), peerManager.EXPECT(), storage.EXPECT())
})
@ -1399,8 +1405,8 @@ func TestService_StatTask(t *testing.T) {
dynconfig := configmocks.NewMockDynconfigInterface(ctl)
storage := storagemocks.NewMockStorage(ctl)
taskManager := resource.NewMockTaskManager(ctl)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduler, dynconfig, storage)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
tc.mock(mockTask, taskManager, res.EXPECT(), taskManager.EXPECT())
task, err := svc.StatTask(context.Background(), &schedulerv1.StatTaskRequest{TaskId: mockTaskID})
@ -1422,7 +1428,7 @@ func TestService_AnnounceTask(t *testing.T) {
name: "task state is TaskStateSucceeded and peer state is PeerStateSucceeded",
req: &schedulerv1.AnnounceTaskRequest{
TaskId: mockTaskID,
Url: mockURL,
Url: mockTaskURL,
UrlMeta: &commonv1.UrlMeta{
Priority: commonv1.Priority_LEVEL0,
},
@ -1446,7 +1452,7 @@ func TestService_AnnounceTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Any()).Return(mockHost, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, mockTask *resource.Task, mockPeer *resource.Peer, err error) {
@ -1460,7 +1466,7 @@ func TestService_AnnounceTask(t *testing.T) {
name: "task state is TaskStatePending and peer state is PeerStateSucceeded",
req: &schedulerv1.AnnounceTaskRequest{
TaskId: mockTaskID,
Url: mockURL,
Url: mockTaskURL,
UrlMeta: &commonv1.UrlMeta{
Priority: commonv1.Priority_LEVEL0,
},
@ -1483,7 +1489,7 @@ func TestService_AnnounceTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Any()).Return(mockHost, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, mockTask *resource.Task, mockPeer *resource.Peer, err error) {
@ -1505,7 +1511,7 @@ func TestService_AnnounceTask(t *testing.T) {
name: "task state is TaskStateFailed and peer state is PeerStateSucceeded",
req: &schedulerv1.AnnounceTaskRequest{
TaskId: mockTaskID,
Url: mockURL,
Url: mockTaskURL,
UrlMeta: &commonv1.UrlMeta{
Priority: commonv1.Priority_LEVEL0,
},
@ -1528,7 +1534,7 @@ func TestService_AnnounceTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Any()).Return(mockHost, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, mockTask *resource.Task, mockPeer *resource.Peer, err error) {
@ -1550,7 +1556,7 @@ func TestService_AnnounceTask(t *testing.T) {
name: "task state is TaskStatePending and peer state is PeerStatePending",
req: &schedulerv1.AnnounceTaskRequest{
TaskId: mockTaskID,
Url: mockURL,
Url: mockTaskURL,
UrlMeta: &commonv1.UrlMeta{
Priority: commonv1.Priority_LEVEL0,
},
@ -1573,7 +1579,7 @@ func TestService_AnnounceTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Any()).Return(mockHost, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, mockTask *resource.Task, mockPeer *resource.Peer, err error) {
@ -1595,7 +1601,7 @@ func TestService_AnnounceTask(t *testing.T) {
name: "task state is TaskStatePending and peer state is PeerStateReceivedNormal",
req: &schedulerv1.AnnounceTaskRequest{
TaskId: mockTaskID,
Url: mockURL,
Url: mockTaskURL,
UrlMeta: &commonv1.UrlMeta{
Priority: commonv1.Priority_LEVEL0,
},
@ -1618,7 +1624,7 @@ func TestService_AnnounceTask(t *testing.T) {
mr.HostManager().Return(hostManager).Times(1),
mh.Load(gomock.Any()).Return(mockHost, true).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Any()).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, mockTask *resource.Task, mockPeer *resource.Peer, err error) {
@ -1649,11 +1655,11 @@ func TestService_AnnounceTask(t *testing.T) {
hostManager := resource.NewMockHostManager(ctl)
taskManager := resource.NewMockTaskManager(ctl)
peerManager := resource.NewMockPeerManager(ctl)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduler, dynconfig, storage)
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost)
tc.mock(mockHost, mockTask, mockPeer, hostManager, taskManager, peerManager, res.EXPECT(), hostManager.EXPECT(), taskManager.EXPECT(), peerManager.EXPECT())
@ -1850,9 +1856,9 @@ func TestService_LeaveTask(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
peer := resource.NewPeer(mockSeedPeerID, mockTask, mockHost)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduler, dynconfig, storage)
tc.mock(peer, peerManager, scheduler.EXPECT(), res.EXPECT(), peerManager.EXPECT())
tc.expect(t, peer, svc.LeaveTask(context.Background(), &schedulerv1.PeerTarget{}))
@ -2065,9 +2071,9 @@ func TestService_LeaveHost(t *testing.T) {
host := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
mockPeer := resource.NewPeer(mockSeedPeerID, mockTask, host)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduler, dynconfig, storage)
tc.mock(host, mockPeer, hostManager, scheduler.EXPECT(), res.EXPECT(), hostManager.EXPECT())
tc.expect(t, mockPeer, svc.LeaveHost(context.Background(), &schedulerv1.LeaveHostRequest{
@ -2233,7 +2239,7 @@ func TestService_triggerTask(t *testing.T) {
},
run: func(t *testing.T, svc *V1, mockTask *resource.Task, mockHost *resource.Host, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, dynconfig config.DynconfigInterface, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mc *resource.MockSeedPeerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
mockTask.FSM.SetState(resource.TaskStatePending)
mockPeer.Application = "baw"
mockPeer.Task.Application = "baw"
var wg sync.WaitGroup
wg.Add(2)
@ -2249,7 +2255,7 @@ func TestService_triggerTask(t *testing.T) {
},
}, nil).Times(1),
mr.SeedPeer().Do(func() { wg.Done() }).Return(seedPeer).Times(1),
mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &schedulerv1.PeerResult{}, nil).Times(1),
mc.TriggerTask(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(ctx context.Context, rg *nethttp.Range, task *resource.Task) { wg.Done() }).Return(mockPeer, &schedulerv1.PeerResult{}, nil).Times(1),
)
err := svc.triggerTask(context.Background(), &schedulerv1.PeerTaskRequest{
@ -2299,7 +2305,7 @@ func TestService_triggerTask(t *testing.T) {
},
run: func(t *testing.T, svc *V1, mockTask *resource.Task, mockHost *resource.Host, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, dynconfig config.DynconfigInterface, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mc *resource.MockSeedPeerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
mockTask.FSM.SetState(resource.TaskStatePending)
mockPeer.Application = "bas"
mockPeer.Task.Application = "bas"
md.GetApplications().Return([]*managerv2.Application{
{
@ -2331,7 +2337,7 @@ func TestService_triggerTask(t *testing.T) {
},
run: func(t *testing.T, svc *V1, mockTask *resource.Task, mockHost *resource.Host, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, dynconfig config.DynconfigInterface, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mc *resource.MockSeedPeerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
mockTask.FSM.SetState(resource.TaskStatePending)
mockPeer.Application = "bas"
mockPeer.Task.Application = "bas"
md.GetApplications().Return([]*managerv2.Application{
{
@ -2363,7 +2369,7 @@ func TestService_triggerTask(t *testing.T) {
},
run: func(t *testing.T, svc *V1, mockTask *resource.Task, mockHost *resource.Host, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, dynconfig config.DynconfigInterface, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mc *resource.MockSeedPeerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
mockTask.FSM.SetState(resource.TaskStatePending)
mockPeer.Application = "bae"
mockPeer.Task.Application = "bae"
md.GetApplications().Return([]*managerv2.Application{
{
@ -2395,7 +2401,7 @@ func TestService_triggerTask(t *testing.T) {
},
run: func(t *testing.T, svc *V1, mockTask *resource.Task, mockHost *resource.Host, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, dynconfig config.DynconfigInterface, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mc *resource.MockSeedPeerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
mockTask.FSM.SetState(resource.TaskStatePending)
mockPeer.Application = "bae"
mockPeer.Task.Application = "bae"
md.GetApplications().Return([]*managerv2.Application{
{
@ -2425,7 +2431,7 @@ func TestService_triggerTask(t *testing.T) {
},
run: func(t *testing.T, svc *V1, mockTask *resource.Task, mockHost *resource.Host, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, dynconfig config.DynconfigInterface, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mc *resource.MockSeedPeerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
mockTask.FSM.SetState(resource.TaskStatePending)
mockPeer.Application = "bat"
mockPeer.Task.Application = "bat"
md.GetApplications().Return([]*managerv2.Application{
{
@ -2455,7 +2461,7 @@ func TestService_triggerTask(t *testing.T) {
},
run: func(t *testing.T, svc *V1, mockTask *resource.Task, mockHost *resource.Host, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, dynconfig config.DynconfigInterface, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mc *resource.MockSeedPeerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
mockTask.FSM.SetState(resource.TaskStatePending)
mockPeer.Application = "bat"
mockPeer.Task.Application = "bat"
var wg sync.WaitGroup
wg.Add(2)
@ -2471,7 +2477,7 @@ func TestService_triggerTask(t *testing.T) {
},
}, nil).Times(1),
mr.SeedPeer().Do(func() { wg.Done() }).Return(seedPeer).Times(1),
mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &schedulerv1.PeerResult{}, nil).Times(1),
mc.TriggerTask(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(ctx context.Context, rg *nethttp.Range, task *resource.Task) { wg.Done() }).Return(mockPeer, &schedulerv1.PeerResult{}, nil).Times(1),
)
err := svc.triggerTask(context.Background(), &schedulerv1.PeerTaskRequest{
@ -2495,7 +2501,7 @@ func TestService_triggerTask(t *testing.T) {
},
run: func(t *testing.T, svc *V1, mockTask *resource.Task, mockHost *resource.Host, mockPeer *resource.Peer, mockSeedPeer *resource.Peer, dynconfig config.DynconfigInterface, seedPeer resource.SeedPeer, mr *resource.MockResourceMockRecorder, mc *resource.MockSeedPeerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
mockTask.FSM.SetState(resource.TaskStatePending)
mockPeer.Task.URLMeta.Priority = commonv1.Priority_LEVEL6
mockPeer.Priority = commonv2.Priority_LEVEL6
var wg sync.WaitGroup
wg.Add(2)
@ -2503,7 +2509,7 @@ func TestService_triggerTask(t *testing.T) {
gomock.InOrder(
mr.SeedPeer().Do(func() { wg.Done() }).Return(seedPeer).Times(1),
mc.TriggerTask(gomock.Any(), gomock.Any()).Do(func(ctx context.Context, task *resource.Task) { wg.Done() }).Return(mockPeer, &schedulerv1.PeerResult{}, nil).Times(1),
mc.TriggerTask(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(ctx context.Context, rg *nethttp.Range, task *resource.Task) { wg.Done() }).Return(mockPeer, &schedulerv1.PeerResult{}, nil).Times(1),
)
err := svc.triggerTask(context.Background(), &schedulerv1.PeerTaskRequest{
@ -2535,7 +2541,7 @@ func TestService_triggerTask(t *testing.T) {
mockSeedHost := resource.NewHost(
mockRawSeedHost.ID, mockRawSeedHost.IP, mockRawSeedHost.Hostname,
mockRawSeedHost.Port, mockRawSeedHost.DownloadPort, mockRawSeedHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost)
mockSeedPeer := resource.NewPeer(mockSeedPeerID, mockTask, mockSeedHost)
seedPeer := resource.NewMockSeedPeer(ctl)
@ -2547,61 +2553,78 @@ func TestService_triggerTask(t *testing.T) {
func TestService_storeTask(t *testing.T) {
tests := []struct {
name string
req *schedulerv1.PeerTaskRequest
taskType commonv2.TaskType
mock func(mockTask *resource.Task, taskManager resource.TaskManager, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder)
expect func(t *testing.T, task *resource.Task, req *schedulerv1.PeerTaskRequest)
run func(t *testing.T, svc *V1, taskManager resource.TaskManager, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder)
}{
{
name: "task already exists",
req: &schedulerv1.PeerTaskRequest{
TaskId: mockTaskID,
Url: "https://example.com",
UrlMeta: &commonv1.UrlMeta{
Priority: commonv1.Priority_LEVEL0,
},
PeerHost: mockPeerHost,
},
taskType: commonv2.TaskType_DFDAEMON,
mock: func(mockTask *resource.Task, taskManager resource.TaskManager, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder) {
run: func(t *testing.T, svc *V1, taskManager resource.TaskManager, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder) {
mockTask := resource.NewTask(mockTaskID, "", mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, nil, nil, mockTaskBackToSourceLimit)
gomock.InOrder(
mr.TaskManager().Return(taskManager).Times(1),
mt.Load(gomock.Eq(mockTaskID)).Return(mockTask, true).Times(1),
)
task := svc.storeTask(context.Background(), &schedulerv1.PeerTaskRequest{
TaskId: mockTaskID,
Url: mockTaskURL,
UrlMeta: &commonv1.UrlMeta{
Priority: commonv1.Priority_LEVEL0,
Filter: strings.Join(mockTaskFilters, idgen.URLFilterSeparator),
Header: mockTaskHeader,
},
expect: func(t *testing.T, task *resource.Task, req *schedulerv1.PeerTaskRequest) {
PeerHost: mockPeerHost,
}, commonv2.TaskType_DFDAEMON)
assert := assert.New(t)
assert.Equal(task.ID, mockTaskID)
assert.Equal(task.URL, req.Url)
assert.Equal(task.Type, commonv2.TaskType_DFDAEMON)
assert.EqualValues(task.URLMeta, req.UrlMeta)
assert.EqualValues(task, mockTask)
},
},
{
name: "task does not exist",
req: &schedulerv1.PeerTaskRequest{
TaskId: mockTaskID,
Url: "https://example.com",
UrlMeta: &commonv1.UrlMeta{
Priority: commonv1.Priority_LEVEL0,
},
PeerHost: mockPeerHost,
},
taskType: commonv2.TaskType_DFCACHE,
mock: func(mockTask *resource.Task, taskManager resource.TaskManager, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder) {
run: func(t *testing.T, svc *V1, taskManager resource.TaskManager, mr *resource.MockResourceMockRecorder, mt *resource.MockTaskManagerMockRecorder) {
gomock.InOrder(
mr.TaskManager().Return(taskManager).Times(1),
mt.Load(gomock.Eq(mockTaskID)).Return(nil, false).Times(1),
mr.TaskManager().Return(taskManager).Times(1),
mt.Store(gomock.Any()).Return().Times(1),
)
task := svc.storeTask(context.Background(), &schedulerv1.PeerTaskRequest{
TaskId: mockTaskID,
Url: mockTaskURL,
UrlMeta: &commonv1.UrlMeta{
Priority: commonv1.Priority_LEVEL0,
Digest: mockTaskDigest,
Tag: mockTaskTag,
Application: mockTaskApplication,
Filter: strings.Join(mockTaskFilters, idgen.URLFilterSeparator),
Header: mockTaskHeader,
},
expect: func(t *testing.T, task *resource.Task, req *schedulerv1.PeerTaskRequest) {
PeerHost: mockPeerHost,
}, commonv2.TaskType_DFCACHE)
assert := assert.New(t)
assert.Equal(task.ID, mockTaskID)
assert.Equal(task.URL, req.Url)
assert.Equal(task.Type, commonv2.TaskType_DFCACHE)
assert.EqualValues(task.URLMeta, req.UrlMeta)
assert.Equal(task.URL, mockTaskURL)
assert.Equal(task.Digest, mockTaskDigest)
assert.Equal(task.Tag, mockTaskTag)
assert.Equal(task.Application, mockTaskApplication)
assert.EqualValues(task.Filters, mockTaskFilters)
assert.EqualValues(task.Header, mockTaskHeader)
assert.Equal(task.PieceSize, int32(0))
assert.Empty(task.DirectPiece)
assert.Equal(task.ContentLength.Load(), int64(-1))
assert.Equal(task.TotalPieceCount.Load(), int32(0))
assert.Equal(task.BackToSourceLimit.Load(), int32(200))
assert.Equal(task.BackToSourcePeers.Len(), uint(0))
assert.Equal(task.FSM.Current(), resource.TaskStatePending)
assert.Empty(task.Pieces)
assert.Equal(task.PeerCount(), 0)
assert.NotEqual(task.CreatedAt.Load(), 0)
assert.NotEqual(task.UpdatedAt.Load(), 0)
assert.NotNil(task.Log)
},
},
}
@ -2616,11 +2639,7 @@ func TestService_storeTask(t *testing.T) {
storage := storagemocks.NewMockStorage(ctl)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig, storage)
taskManager := resource.NewMockTaskManager(ctl)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta)
tc.mock(mockTask, taskManager, res.EXPECT(), taskManager.EXPECT())
task := svc.storeTask(context.Background(), tc.req, tc.taskType)
tc.expect(t, task, tc.req)
tc.run(t, svc, taskManager, res.EXPECT(), taskManager.EXPECT())
})
}
}
@ -2628,17 +2647,13 @@ func TestService_storeTask(t *testing.T) {
func TestService_storeHost(t *testing.T) {
tests := []struct {
name string
req *schedulerv1.PeerTaskRequest
peerHost *schedulerv1.PeerHost
mock func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder)
expect func(t *testing.T, host *resource.Host)
}{
{
name: "host already exists",
req: &schedulerv1.PeerTaskRequest{
Url: mockTaskURL,
UrlMeta: mockTaskURLMeta,
PeerHost: mockPeerHost,
},
peerHost: mockPeerHost,
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
@ -2652,11 +2667,7 @@ func TestService_storeHost(t *testing.T) {
},
{
name: "host does not exist",
req: &schedulerv1.PeerTaskRequest{
Url: mockTaskURL,
UrlMeta: mockTaskURLMeta,
PeerHost: mockPeerHost,
},
peerHost: mockPeerHost,
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
@ -2674,11 +2685,7 @@ func TestService_storeHost(t *testing.T) {
},
{
name: "host does not exist and dynconfig get cluster client config failed",
req: &schedulerv1.PeerTaskRequest{
Url: mockTaskURL,
UrlMeta: mockTaskURLMeta,
PeerHost: mockPeerHost,
},
peerHost: mockPeerHost,
mock: func(mockHost *resource.Host, hostManager resource.HostManager, mr *resource.MockResourceMockRecorder, mh *resource.MockHostManagerMockRecorder, md *configmocks.MockDynconfigInterfaceMockRecorder) {
gomock.InOrder(
mr.HostManager().Return(hostManager).Times(1),
@ -2710,7 +2717,7 @@ func TestService_storeHost(t *testing.T) {
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
tc.mock(mockHost, hostManager, res.EXPECT(), hostManager.EXPECT(), dynconfig.EXPECT())
host := svc.storeHost(context.Background(), tc.req.PeerHost)
host := svc.storeHost(context.Background(), tc.peerHost)
tc.expect(t, host)
})
}
@ -2719,48 +2726,63 @@ func TestService_storeHost(t *testing.T) {
func TestService_storePeer(t *testing.T) {
tests := []struct {
name string
req *schedulerv1.PeerTaskRequest
mock func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder)
expect func(t *testing.T, peer *resource.Peer)
run func(t *testing.T, svc *V1, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder)
}{
{
name: "peer already exists",
req: &schedulerv1.PeerTaskRequest{
PeerId: mockPeerID,
UrlMeta: &commonv1.UrlMeta{
Priority: commonv1.Priority_LEVEL0,
},
},
mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) {
run: func(t *testing.T, svc *V1, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost)
gomock.InOrder(
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, true).Times(1),
mp.Load(gomock.Eq(mockPeerID)).Return(mockPeer, true).Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer) {
peer := svc.storePeer(context.Background(), mockPeerID, commonv1.Priority_LEVEL0, mockURLMetaRange, mockTask, mockHost)
assert := assert.New(t)
assert.Equal(peer.ID, mockPeerID)
assert.Equal(peer.Tag, resource.DefaultTag)
assert.EqualValues(peer, mockPeer)
},
},
{
name: "peer does not exists",
req: &schedulerv1.PeerTaskRequest{
PeerId: mockPeerID,
UrlMeta: &commonv1.UrlMeta{
Priority: commonv1.Priority_LEVEL0,
},
},
mock: func(mockPeer *resource.Peer, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) {
run: func(t *testing.T, svc *V1, peerManager resource.PeerManager, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
gomock.InOrder(
mr.PeerManager().Return(peerManager).Times(1),
mp.LoadOrStore(gomock.Any()).Return(mockPeer, false).Times(1),
mp.Load(gomock.Eq(mockPeerID)).Return(nil, false).Times(1),
mr.PeerManager().Return(peerManager).Times(1),
mp.Store(gomock.Any()).Return().Times(1),
)
},
expect: func(t *testing.T, peer *resource.Peer) {
peer := svc.storePeer(context.Background(), mockPeerID, commonv1.Priority_LEVEL1, mockURLMetaRange, mockTask, mockHost)
assert := assert.New(t)
assert.Equal(peer.ID, mockPeerID)
assert.Equal(peer.Tag, resource.DefaultTag)
assert.EqualValues(peer.Range, &mockPeerRange)
assert.Equal(peer.Priority, commonv2.Priority_LEVEL1)
assert.Equal(peer.Pieces.Len(), uint(0))
assert.Empty(peer.FinishedPieces)
assert.Equal(len(peer.PieceCosts()), 0)
assert.Empty(peer.ReportPieceResultStream)
assert.Empty(peer.AnnouncePeerStream)
assert.Equal(peer.FSM.Current(), resource.PeerStatePending)
assert.EqualValues(peer.Task, mockTask)
assert.EqualValues(peer.Host, mockHost)
assert.Equal(peer.BlockParents.Len(), uint(0))
assert.Equal(peer.NeedBackToSource.Load(), false)
assert.Equal(peer.IsBackToSource.Load(), false)
assert.NotEqual(peer.PieceUpdatedAt.Load(), 0)
assert.NotEqual(peer.CreatedAt.Load(), 0)
assert.NotEqual(peer.UpdatedAt.Load(), 0)
assert.NotNil(peer.Log)
},
},
}
@ -2775,15 +2797,8 @@ func TestService_storePeer(t *testing.T) {
storage := storagemocks.NewMockStorage(ctl)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig, storage)
peerManager := resource.NewMockPeerManager(ctl)
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockPeer := resource.NewPeer(mockPeerID, mockTask, mockHost)
tc.mock(mockPeer, peerManager, res.EXPECT(), peerManager.EXPECT())
peer := svc.storePeer(context.Background(), tc.req.PeerId, mockTask, mockHost, tc.req.UrlMeta.Tag, tc.req.UrlMeta.Application)
tc.expect(t, peer)
tc.run(t, svc, peerManager, res.EXPECT(), peerManager.EXPECT())
})
}
}
@ -2801,7 +2816,7 @@ func TestService_triggerSeedPeerTask(t *testing.T) {
peer.FSM.SetState(resource.PeerStateRunning)
gomock.InOrder(
mr.SeedPeer().Return(seedPeer).Times(1),
mc.TriggerTask(gomock.Any(), gomock.Any()).Return(peer, &schedulerv1.PeerResult{
mc.TriggerTask(gomock.Any(), gomock.Any(), gomock.Any()).Return(peer, &schedulerv1.PeerResult{
TotalPieceCount: 3,
ContentLength: 1024,
}, nil).Times(1),
@ -2821,7 +2836,7 @@ func TestService_triggerSeedPeerTask(t *testing.T) {
task.FSM.SetState(resource.TaskStateRunning)
gomock.InOrder(
mr.SeedPeer().Return(seedPeer).Times(1),
mc.TriggerTask(gomock.Any(), gomock.Any()).Return(peer, &schedulerv1.PeerResult{}, errors.New("foo")).Times(1),
mc.TriggerTask(gomock.Any(), gomock.Any(), gomock.Any()).Return(peer, &schedulerv1.PeerResult{}, errors.New("foo")).Times(1),
)
},
expect: func(t *testing.T, task *resource.Task, peer *resource.Peer) {
@ -2843,12 +2858,12 @@ func TestService_triggerSeedPeerTask(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
task := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
task := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
peer := resource.NewPeer(mockPeerID, task, mockHost)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig, storage)
tc.mock(task, peer, seedPeer, res.EXPECT(), seedPeer.EXPECT())
svc.triggerSeedPeerTask(context.Background(), task)
svc.triggerSeedPeerTask(context.Background(), &mockPeerRange, task)
tc.expect(t, task, peer)
})
}
@ -2924,7 +2939,7 @@ func TestService_handleBeginOfPiece(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
peer := resource.NewPeer(mockPeerID, mockTask, mockHost)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig}, res, scheduler, dynconfig, storage)
@ -2939,7 +2954,7 @@ func TestService_handlePieceSuccess(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
now := time.Now()
tests := []struct {
@ -3007,7 +3022,7 @@ func TestService_handlePieceSuccess(t *testing.T) {
res := resource.NewMockResource(ctl)
dynconfig := configmocks.NewMockDynconfigInterface(ctl)
storage := storagemocks.NewMockStorage(ctl)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduler, dynconfig, storage)
tc.mock(tc.peer)
svc.handlePieceSuccess(context.Background(), tc.peer, tc.piece)
@ -3031,7 +3046,7 @@ func TestService_handlePieceFail(t *testing.T) {
config: &config.Config{
Scheduler: mockSchedulerConfig,
SeedPeer: config.SeedPeerConfig{Enable: true},
Metrics: config.MetricsConfig{EnablePeerHost: true},
Metrics: config.MetricsConfig{EnableHost: true},
},
piece: &schedulerv1.PieceResult{},
run: func(t *testing.T, svc *V1, peer *resource.Peer, parent *resource.Peer, piece *schedulerv1.PieceResult, peerManager resource.PeerManager, seedPeer resource.SeedPeer, ms *mocks.MockSchedulerMockRecorder, mr *resource.MockResourceMockRecorder, mp *resource.MockPeerManagerMockRecorder, mc *resource.MockSeedPeerMockRecorder) {
@ -3048,7 +3063,7 @@ func TestService_handlePieceFail(t *testing.T) {
config: &config.Config{
Scheduler: mockSchedulerConfig,
SeedPeer: config.SeedPeerConfig{Enable: true},
Metrics: config.MetricsConfig{EnablePeerHost: true},
Metrics: config.MetricsConfig{EnableHost: true},
},
piece: &schedulerv1.PieceResult{
Code: commonv1.Code_ClientWaitPieceReady,
@ -3075,7 +3090,7 @@ func TestService_handlePieceFail(t *testing.T) {
config: &config.Config{
Scheduler: mockSchedulerConfig,
SeedPeer: config.SeedPeerConfig{Enable: true},
Metrics: config.MetricsConfig{EnablePeerHost: true},
Metrics: config.MetricsConfig{EnableHost: true},
},
piece: &schedulerv1.PieceResult{
Code: commonv1.Code_PeerTaskNotFound,
@ -3104,7 +3119,7 @@ func TestService_handlePieceFail(t *testing.T) {
config: &config.Config{
Scheduler: mockSchedulerConfig,
SeedPeer: config.SeedPeerConfig{Enable: true},
Metrics: config.MetricsConfig{EnablePeerHost: true},
Metrics: config.MetricsConfig{EnableHost: true},
},
piece: &schedulerv1.PieceResult{
Code: commonv1.Code_ClientPieceNotFound,
@ -3132,7 +3147,7 @@ func TestService_handlePieceFail(t *testing.T) {
config: &config.Config{
Scheduler: mockSchedulerConfig,
SeedPeer: config.SeedPeerConfig{Enable: true},
Metrics: config.MetricsConfig{EnablePeerHost: true},
Metrics: config.MetricsConfig{EnableHost: true},
},
piece: &schedulerv1.PieceResult{
Code: commonv1.Code_ClientPieceRequestFail,
@ -3161,7 +3176,7 @@ func TestService_handlePieceFail(t *testing.T) {
config: &config.Config{
Scheduler: mockSchedulerConfig,
SeedPeer: config.SeedPeerConfig{Enable: true},
Metrics: config.MetricsConfig{EnablePeerHost: true},
Metrics: config.MetricsConfig{EnableHost: true},
},
piece: &schedulerv1.PieceResult{
Code: commonv1.Code_ClientPieceRequestFail,
@ -3199,7 +3214,7 @@ func TestService_handlePieceFail(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
peer := resource.NewPeer(mockPeerID, mockTask, mockHost)
parent := resource.NewPeer(mockSeedPeerID, mockTask, mockHost)
seedPeer := resource.NewMockSeedPeer(ctl)
@ -3324,9 +3339,9 @@ func TestService_handlePeerSuccess(t *testing.T) {
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
peer := resource.NewPeer(mockPeerID, mockTask, mockHost)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduler, dynconfig, storage)
tc.mock(peer)
svc.handlePeerSuccess(context.Background(), peer)
@ -3401,11 +3416,11 @@ func TestService_handlePeerFail(t *testing.T) {
res := resource.NewMockResource(ctl)
dynconfig := configmocks.NewMockDynconfigInterface(ctl)
storage := storagemocks.NewMockStorage(ctl)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduler, dynconfig, storage)
mockHost := resource.NewHost(
mockRawHost.ID, mockRawHost.IP, mockRawHost.Hostname,
mockRawHost.Port, mockRawHost.DownloadPort, mockRawHost.Type)
mockTask := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
mockTask := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
peer := resource.NewPeer(mockSeedPeerID, mockTask, mockHost)
child := resource.NewPeer(mockPeerID, mockTask, mockHost)
@ -3487,8 +3502,8 @@ func TestService_handleTaskSuccess(t *testing.T) {
res := resource.NewMockResource(ctl)
dynconfig := configmocks.NewMockDynconfigInterface(ctl)
storage := storagemocks.NewMockStorage(ctl)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage)
task := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduler, dynconfig, storage)
task := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
tc.mock(task)
svc.handleTaskSuccess(context.Background(), task, tc.result)
@ -3626,8 +3641,8 @@ func TestService_handleTaskFail(t *testing.T) {
res := resource.NewMockResource(ctl)
dynconfig := configmocks.NewMockDynconfigInterface(ctl)
storage := storagemocks.NewMockStorage(ctl)
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnablePeerHost: true}}, res, scheduler, dynconfig, storage)
task := resource.NewTask(mockTaskID, mockTaskURL, commonv2.TaskType_DFDAEMON, mockTaskURLMeta, resource.WithBackToSourceLimit(mockTaskBackToSourceLimit))
svc := NewV1(&config.Config{Scheduler: mockSchedulerConfig, Metrics: config.MetricsConfig{EnableHost: true}}, res, scheduler, dynconfig, storage)
task := resource.NewTask(mockTaskID, mockTaskURL, mockTaskDigest, mockTaskTag, mockTaskApplication, commonv2.TaskType_DFDAEMON, mockTaskFilters, mockTaskHeader, mockTaskBackToSourceLimit, resource.WithPieceSize(mockTaskPieceSize))
tc.mock(task)
svc.handleTaskFailure(context.Background(), task, tc.backToSourceErr, tc.seedPeerErr)

View File

@ -20,6 +20,9 @@ scheduler:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
enableHost: true
config:
verbose: true
@ -46,6 +49,8 @@ seedPeer:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
download:
@ -72,10 +77,11 @@ dfdaemon:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
pprofPort: 9999
metrics: 127.0.0.1:8888
download:
prefetch: true
recursiveConcurrent:
@ -120,5 +126,7 @@ manager:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true

View File

@ -20,6 +20,9 @@ scheduler:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
enableHost: true
config:
verbose: true
@ -46,6 +49,8 @@ seedPeer:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
download:
@ -76,10 +81,11 @@ dfdaemon:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
pprofPort: 9999
metrics: 127.0.0.1:8888
download:
prefetch: true
concurrent:
@ -125,5 +131,7 @@ manager:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true

View File

@ -20,6 +20,9 @@ scheduler:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
enableHost: true
config:
seedPeer:
enable: false
@ -49,10 +52,11 @@ dfdaemon:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
pprofPort: 9999
metrics: 127.0.0.1:8888
download:
prefetch: true
concurrent:
@ -96,5 +100,7 @@ manager:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true

View File

@ -20,6 +20,9 @@ scheduler:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
enableHost: true
config:
verbose: true
security:
@ -85,6 +88,8 @@ seedPeer:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
download:
@ -154,10 +159,11 @@ dfdaemon:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
pprofPort: 9999
metrics: 127.0.0.1:8888
download:
prefetch: true
concurrent:
@ -242,6 +248,8 @@ manager:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
security:

View File

@ -20,6 +20,9 @@ scheduler:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
enableHost: true
config:
verbose: true
security:
@ -83,6 +86,8 @@ seedPeer:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
download:
@ -150,10 +155,11 @@ dfdaemon:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
pprofPort: 9999
metrics: 127.0.0.1:8888
download:
prefetch: true
concurrent:
@ -236,6 +242,8 @@ manager:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
security:

View File

@ -20,6 +20,9 @@ scheduler:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
enableHost: true
config:
verbose: true
network:
@ -48,6 +51,8 @@ seedPeer:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
download:
@ -76,10 +81,11 @@ dfdaemon:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
pprofPort: 9999
metrics: 127.0.0.1:8888
download:
prefetch: true
scheduler:
@ -123,6 +129,8 @@ manager:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
network:

View File

@ -20,6 +20,9 @@ scheduler:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
enableHost: true
config:
verbose: true
@ -46,6 +49,8 @@ seedPeer:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
download:
@ -72,10 +77,11 @@ dfdaemon:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
pprofPort: 9999
metrics: 127.0.0.1:8888
download:
prefetch: true
splitRunningTasks: true
@ -118,5 +124,7 @@ manager:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true

View File

@ -20,6 +20,9 @@ scheduler:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
enableHost: true
config:
verbose: true
@ -46,6 +49,8 @@ seedPeer:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
download:
@ -72,10 +77,11 @@ dfdaemon:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true
pprofPort: 9999
metrics: 127.0.0.1:8888
download:
prefetch: true
scheduler:
@ -117,6 +123,8 @@ manager:
- name: artifact
hostPath:
path: /tmp/artifact
metrics:
enable: true
config:
verbose: true