Adjust CDN module package (#893)

* adjust CDN code package structure

Signed-off-by: sunwp <244372610@qq.com>
This commit is contained in:
sunwp 2021-12-11 20:17:39 +08:00 committed by Gaius
parent 40a281a752
commit e252ef5880
No known key found for this signature in database
GPG Key ID: 8B4E5D1290FA2FFB
62 changed files with 2878 additions and 3375 deletions

View File

@ -19,33 +19,27 @@ package cdn
import (
"context"
"fmt"
"net/http"
"runtime"
"time"
"github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/gc"
"d7y.io/dragonfly/v2/cdn/metrics"
"d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/rpcserver"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/gc"
"d7y.io/dragonfly/v2/cdn/supervisor/progress"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/manager"
managerclient "d7y.io/dragonfly/v2/pkg/rpc/manager/client"
managerClient "d7y.io/dragonfly/v2/pkg/rpc/manager/client"
"d7y.io/dragonfly/v2/pkg/util/hostutils"
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
)
const (
gracefulStopTimeout = 10 * time.Second
)
type Server struct {
@ -53,79 +47,121 @@ type Server struct {
config *config.Config
// GRPC server
grpcServer *grpc.Server
grpcServer *rpcserver.Server
// Metrics server
metricsServer *http.Server
metricsServer *metrics.Server
// Manager client
managerClient managerclient.Client
configServer managerClient.Client
// gc Server
gcServer *gc.Server
}
// New creates a brand new server instance.
// New creates a brand-new server instance.
func New(cfg *config.Config) (*Server, error) {
s := &Server{config: cfg}
if ok := storage.IsSupport(cfg.StorageMode); !ok {
return nil, fmt.Errorf("os %s is not support storage mode %s", runtime.GOOS, cfg.StorageMode)
}
// Initialize plugins
if err := plugins.Initialize(cfg.Plugins); err != nil {
return nil, err
return nil, errors.Wrapf(err, "init plugins")
}
// Initialize task manager
taskManager, err := task.NewManager(cfg)
if err != nil {
return nil, errors.Wrapf(err, "create task manager")
}
// Initialize progress manager
progressMgr, err := progress.NewManager()
progressManager, err := progress.NewManager(taskManager)
if err != nil {
return nil, errors.Wrapf(err, "create progress manager")
}
// Initialize storage manager
storageMgr, ok := storage.Get(cfg.StorageMode)
storageManager, ok := storage.Get(cfg.StorageMode)
if !ok {
return nil, fmt.Errorf("can not find storage pattern %s", cfg.StorageMode)
}
storageManager.Initialize(taskManager)
// Initialize CDN manager
cdnMgr, err := cdn.NewManager(cfg, storageMgr, progressMgr)
cdnManager, err := cdn.NewManager(cfg, storageManager, progressManager, taskManager)
if err != nil {
return nil, errors.Wrapf(err, "create cdn manager")
}
// Initialize task manager
taskMgr, err := task.NewManager(cfg, cdnMgr, progressMgr)
// Initialize CDN service
service, err := supervisor.NewCDNService(taskManager, cdnManager, progressManager)
if err != nil {
return nil, errors.Wrapf(err, "create task manager")
return nil, errors.Wrapf(err, "create cdn service")
}
// Initialize storage manager
storageMgr.Initialize(taskMgr)
// Initialize storage manager
var opts []grpc.ServerOption
if s.config.Options.Telemetry.Jaeger != "" {
if cfg.Options.Telemetry.Jaeger != "" {
opts = append(opts, grpc.ChainUnaryInterceptor(otelgrpc.UnaryServerInterceptor()), grpc.ChainStreamInterceptor(otelgrpc.StreamServerInterceptor()))
}
grpcServer, err := rpcserver.New(cfg, taskMgr, opts...)
grpcServer, err := rpcserver.New(cfg, service, opts...)
if err != nil {
return nil, errors.Wrap(err, "create seedServer")
}
s.grpcServer = grpcServer
// Initialize prometheus
if cfg.Metrics != nil {
s.metricsServer = metrics.New(cfg.Metrics, grpcServer)
return nil, errors.Wrap(err, "create rpcServer")
}
// Initialize manager client
// Initialize gc server
gcServer, err := gc.New()
if err != nil {
return nil, errors.Wrap(err, "create gcServer")
}
var metricsServer *metrics.Server
if cfg.Metrics != nil && cfg.Metrics.Addr != "" {
// Initialize metrics server
metricsServer, err = metrics.New(cfg.Metrics, grpcServer.Server)
if err != nil {
return nil, errors.Wrap(err, "create metricsServer")
}
}
// Initialize configServer
var configServer managerClient.Client
if cfg.Manager.Addr != "" {
managerClient, err := managerclient.New(cfg.Manager.Addr)
configServer, err = managerClient.New(cfg.Manager.Addr)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "create configServer")
}
s.managerClient = managerClient
}
return &Server{
config: cfg,
grpcServer: grpcServer,
metricsServer: metricsServer,
configServer: configServer,
gcServer: gcServer,
}, nil
}
// Register to manager
if _, err := s.managerClient.UpdateCDN(&manager.UpdateCDNRequest{
func (s *Server) Serve() error {
go func() {
// Start GC
if err := s.gcServer.Serve(); err != nil {
logger.Fatalf("start gc task failed: %v", err)
}
}()
go func() {
if s.metricsServer != nil {
// Start metrics server
if err := s.metricsServer.ListenAndServe(s.metricsServer.Handler()); err != nil {
logger.Fatalf("start metrics server failed: %v", err)
}
}
}()
go func() {
if s.configServer != nil {
CDNInstance, err := s.configServer.UpdateCDN(&manager.UpdateCDNRequest{
SourceType: manager.SourceType_CDN_SOURCE,
HostName: hostutils.FQDNHostname,
Ip: s.config.AdvertiseIP,
@ -134,96 +170,45 @@ func New(cfg *config.Config) (*Server, error) {
Idc: s.config.Host.IDC,
Location: s.config.Host.Location,
CdnClusterId: uint64(s.config.Manager.CDNClusterID),
}); err != nil {
return nil, err
})
if err != nil {
logger.Fatalf("update cdn instance failed: %v", err)
}
}
return s, nil
}
func (s *Server) Serve() error {
// Start GC
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if err := gc.StartGC(ctx); err != nil {
return err
}
// Started metrics server
if s.metricsServer != nil {
go func() {
logger.Infof("started metrics server at %s", s.metricsServer.Addr)
if err := s.metricsServer.ListenAndServe(); err != nil {
if err == http.ErrServerClosed {
return
}
logger.Fatalf("metrics server closed unexpect: %#v", err)
}
}()
}
// Serve Keepalive
if s.managerClient != nil {
go func() {
logger.Info("start keepalive to manager")
s.managerClient.KeepAlive(s.config.Manager.KeepAlive.Interval, &manager.KeepAliveRequest{
logger.Infof("====starting keepalive cdn instance %#v to manager %s====", CDNInstance)
s.configServer.KeepAlive(s.config.Manager.KeepAlive.Interval, &manager.KeepAliveRequest{
HostName: hostutils.FQDNHostname,
SourceType: manager.SourceType_CDN_SOURCE,
ClusterId: uint64(s.config.Manager.CDNClusterID),
})
}
}()
}
// Generate GRPC listener
var listen = iputils.IPv4
if s.config.AdvertiseIP != "" {
listen = s.config.AdvertiseIP
}
lis, _, err := rpc.ListenWithPortRange(listen, s.config.ListenPort, s.config.ListenPort)
if err != nil {
logger.Fatalf("net listener failed to start: %v", err)
}
defer lis.Close()
// Started GRPC server
logger.Infof("started grpc server at %s://%s", lis.Addr().Network(), lis.Addr().String())
if err := s.grpcServer.Serve(lis); err != nil {
logger.Errorf("stoped grpc server: %v", err)
return err
}
return nil
// Start grpc server
return s.grpcServer.ListenAndServe()
}
func (s *Server) Stop() {
func (s *Server) Stop() error {
g, ctx := errgroup.WithContext(context.Background())
g.Go(func() error {
return s.gcServer.Shutdown()
})
if s.configServer != nil {
// Stop manager client
if s.managerClient != nil {
s.managerClient.Close()
logger.Info("manager client closed")
g.Go(func() error {
return s.configServer.Close()
})
}
g.Go(func() error {
// Stop metrics server
if s.metricsServer != nil {
if err := s.metricsServer.Shutdown(context.Background()); err != nil {
logger.Errorf("metrics server failed to stop: %#v", err)
}
logger.Info("metrics server closed under request")
}
return s.metricsServer.Shutdown(ctx)
})
// Stop GRPC server
stopped := make(chan struct{})
go func() {
s.grpcServer.GracefulStop()
logger.Info("grpc server closed under request")
close(stopped)
}()
t := time.NewTimer(gracefulStopTimeout)
select {
case <-t.C:
s.grpcServer.Stop()
case <-stopped:
t.Stop()
}
g.Go(func() error {
// Stop grpc server
return s.grpcServer.Shutdown()
})
return g.Wait()
}

View File

@ -21,12 +21,9 @@ import (
"gopkg.in/yaml.v3"
"d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/storedriver/local"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/disk"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/hybrid"
"d7y.io/dragonfly/v2/cmd/dependency/base"
"d7y.io/dragonfly/v2/pkg/unit"
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
@ -60,13 +57,13 @@ func NewDefaultPlugins() map[plugins.PluginType][]*plugins.PluginProperties {
return map[plugins.PluginType][]*plugins.PluginProperties{
plugins.StorageDriverPlugin: {
{
Name: local.DiskDriverName,
Name: "disk",
Enable: true,
Config: &storedriver.Config{
BaseDir: DefaultDiskBaseDir,
},
}, {
Name: local.MemoryDriverName,
Name: "memory",
Enable: false,
Config: &storedriver.Config{
BaseDir: DefaultMemoryBaseDir,
@ -74,14 +71,14 @@ func NewDefaultPlugins() map[plugins.PluginType][]*plugins.PluginProperties {
},
}, plugins.StorageManagerPlugin: {
{
Name: disk.StorageMode,
Name: "disk",
Enable: true,
Config: &storage.Config{
Config: &StorageConfig{
GCInitialDelay: 0 * time.Second,
GCInterval: 15 * time.Second,
DriverConfigs: map[string]*storage.DriverConfig{
local.DiskDriverName: {
GCConfig: &storage.GCConfig{
DriverConfigs: map[string]*DriverConfig{
"disk": {
GCConfig: &GCConfig{
YoungGCThreshold: 100 * unit.GB,
FullGCThreshold: 5 * unit.GB,
CleanRatio: 1,
@ -90,22 +87,22 @@ func NewDefaultPlugins() map[plugins.PluginType][]*plugins.PluginProperties {
},
},
}, {
Name: hybrid.StorageMode,
Name: "hybrid",
Enable: false,
Config: &storage.Config{
Config: &StorageConfig{
GCInitialDelay: 0 * time.Second,
GCInterval: 15 * time.Second,
DriverConfigs: map[string]*storage.DriverConfig{
local.DiskDriverName: {
GCConfig: &storage.GCConfig{
DriverConfigs: map[string]*DriverConfig{
"disk": {
GCConfig: &GCConfig{
YoungGCThreshold: 100 * unit.GB,
FullGCThreshold: 5 * unit.GB,
CleanRatio: 1,
IntervalThreshold: 2 * time.Hour,
},
},
local.MemoryDriverName: {
GCConfig: &storage.GCConfig{
"memory": {
GCConfig: &GCConfig{
YoungGCThreshold: 100 * unit.GB,
FullGCThreshold: 5 * unit.GB,
CleanRatio: 3,
@ -122,25 +119,46 @@ func NewDefaultPlugins() map[plugins.PluginType][]*plugins.PluginProperties {
// NewDefaultBaseProperties creates an base properties instant with default values.
func NewDefaultBaseProperties() *BaseProperties {
return &BaseProperties{
ListenPort: DefaultListenPort,
DownloadPort: DefaultDownloadPort,
SystemReservedBandwidth: DefaultSystemReservedBandwidth,
MaxBandwidth: DefaultMaxBandwidth,
FailAccessInterval: DefaultFailAccessInterval,
GCInitialDelay: DefaultGCInitialDelay,
GCMetaInterval: DefaultGCMetaInterval,
TaskExpireTime: DefaultTaskExpireTime,
StorageMode: DefaultStorageMode,
ListenPort: constants.DefaultListenPort,
DownloadPort: constants.DefaultDownloadPort,
SystemReservedBandwidth: constants.DefaultSystemReservedBandwidth,
MaxBandwidth: constants.DefaultMaxBandwidth,
AdvertiseIP: iputils.IPv4,
FailAccessInterval: constants.DefaultFailAccessInterval,
GCInitialDelay: constants.DefaultGCInitialDelay,
GCMetaInterval: constants.DefaultGCMetaInterval,
TaskExpireTime: constants.DefaultTaskExpireTime,
StorageMode: constants.DefaultStorageMode,
Manager: ManagerConfig{
KeepAlive: KeepAliveConfig{
Interval: DefaultKeepAliveInterval,
Interval: constants.DefaultKeepAliveInterval,
},
},
Host: HostConfig{},
Metrics: &RestConfig{
Addr: ":8080",
},
}
}
type StorageConfig struct {
GCInitialDelay time.Duration `yaml:"gcInitialDelay"`
GCInterval time.Duration `yaml:"gcInterval"`
DriverConfigs map[string]*DriverConfig `yaml:"driverConfigs"`
}
type DriverConfig struct {
GCConfig *GCConfig `yaml:"gcConfig"`
}
// GCConfig gc config
type GCConfig struct {
YoungGCThreshold unit.Bytes `yaml:"youngGCThreshold"`
FullGCThreshold unit.Bytes `yaml:"fullGCThreshold"`
CleanRatio int `yaml:"cleanRatio"`
IntervalThreshold time.Duration `yaml:"intervalThreshold"`
}
// BaseProperties contains all basic properties of cdn system.
type BaseProperties struct {
// ListenPort is the port cdn server listens on.

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package config
package constants
import (
"time"

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package config
package constants
import "go.opentelemetry.io/otel/attribute"
@ -24,7 +24,6 @@ const (
AttributePiecePacketResult = attribute.Key("d7y.piece.packet.result")
AttributeTaskID = attribute.Key("d7y.task.id")
AttributeTaskStatus = attribute.Key("d7y.task.status")
AttributeTaskURL = attribute.Key("d7y.task.url")
AttributeTaskInfo = attribute.Key("d7y.taskInfo")
AttributeIfReuseTask = attribute.Key("d7y.task.already.exist")
AttributeSeedPiece = attribute.Key("d7y.seed.piece")
@ -48,7 +47,7 @@ const (
)
const (
EventHitUnReachableURL = "hit-unReachableURL"
EventHitUnreachableURL = "hit-unreachableURL"
EventRequestSourceFileLength = "request-source-file-length"
EventDeleteUnReachableTask = "downloaded"
EventInitSeedProgress = "init-seed-progress"

View File

@ -1,161 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package errors
import (
"fmt"
"github.com/pkg/errors"
)
// ErrURLNotReachable represents the url is a not reachable.
type ErrURLNotReachable struct {
URL string
}
func (e ErrURLNotReachable) Error() string {
return fmt.Sprintf("url %s not reachable", e.URL)
}
// ErrTaskIDDuplicate represents the task id is in conflict.
type ErrTaskIDDuplicate struct {
TaskID string
Cause error
}
func (e ErrTaskIDDuplicate) Error() string {
return fmt.Sprintf("taskId %s conflict: %v", e.TaskID, e.Cause)
}
type ErrInconsistentValues struct {
Expected interface{}
Actual interface{}
}
func (e ErrInconsistentValues) Error() string {
return fmt.Sprintf("inconsistent number of pieces, expected %s, actual: %s", e.Expected, e.Actual)
}
// ErrResourceExpired represents the downloaded resource has expired
type ErrResourceExpired struct {
URL string
}
func (e ErrResourceExpired) Error() string {
return fmt.Sprintf("url %s expired", e.URL)
}
// ErrResourceNotSupportRangeRequest represents the downloaded resource does not support Range downloads
type ErrResourceNotSupportRangeRequest struct {
URL string
}
func (e ErrResourceNotSupportRangeRequest) Error() string {
return fmt.Sprintf("url %s does not support range request", e.URL)
}
// ErrFileNotExist represents the file is not exists
type ErrFileNotExist struct {
File string
}
func (e ErrFileNotExist) Error() string {
return fmt.Sprintf("file or dir %s not exist", e.File)
}
var (
// ErrSystemError represents the error is a system error.
ErrSystemError = errors.New("system error")
// ErrTaskDownloadFail represents an exception was encountered while downloading the file
ErrTaskDownloadFail = errors.New("resource download failed")
// ErrDataNotFound represents the data cannot be found.
ErrDataNotFound = errors.New("data not found")
// ErrInvalidValue represents the value is invalid.
ErrInvalidValue = errors.New("invalid value")
// ErrConvertFailed represents failed to convert.
ErrConvertFailed = errors.New("convert failed")
// ErrResourcesLacked represents a lack of resources, for example, the disk does not have enough space.
ErrResourcesLacked = errors.New("resources lacked")
)
// IsSystemError checks the error is a system error or not.
func IsSystemError(err error) bool {
return errors.Cause(err) == ErrSystemError
}
// IsURLNotReachable checks the error is a url not reachable or not.
func IsURLNotReachable(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrURLNotReachable)
return ok
}
// IsTaskIDDuplicate checks the error is a TaskIDDuplicate error or not.
func IsTaskIDDuplicate(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrTaskIDDuplicate)
return ok
}
func IsInconsistentValues(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrInconsistentValues)
return ok
}
func IsDownloadFail(err error) bool {
return errors.Cause(err) == ErrTaskDownloadFail
}
func IsResourceExpired(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrResourceExpired)
return ok
}
func IsResourceNotSupportRangeRequest(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrResourceNotSupportRangeRequest)
return ok
}
func IsDataNotFound(err error) bool {
return errors.Cause(err) == ErrDataNotFound
}
func IsInvalidValue(err error) bool {
return errors.Cause(err) == ErrInvalidValue
}
func IsConvertFailed(err error) bool {
return errors.Cause(err) == ErrConvertFailed
}
func IsFileNotExist(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrFileNotExist)
return ok
}
func IsResourcesLacked(err error) bool {
return errors.Cause(err) == ErrResourcesLacked
}

View File

@ -1,438 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package errors
import (
"fmt"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/suite"
)
func TestErrorSuite(t *testing.T) {
suite.Run(t, new(ErrorTestSuite))
}
type ErrorTestSuite struct {
suite.Suite
}
func (s *ErrorTestSuite) TestIsConvertFailed() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrConvertFailed,
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(ErrConvertFailed, "wrap err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsConvertFailed(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsDataNotFound() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrDataNotFound,
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(ErrDataNotFound, "wrap err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsDataNotFound(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsDownloadFail() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrTaskDownloadFail,
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrTaskDownloadFail, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsDownloadFail(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsFileNotExist() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrFileNotExist{},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrFileNotExist{}, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsFileNotExist(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsInvalidValue() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrInvalidValue,
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrInvalidValue, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: true,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsInvalidValue(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsInconsistentValues() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrInconsistentValues{},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrInconsistentValues{}, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsInconsistentValues(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsResourceExpired() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrResourceExpired{},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrResourceExpired{}, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsResourceExpired(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsResourceNotSupportRangeRequest() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrResourceNotSupportRangeRequest{},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrResourceNotSupportRangeRequest{}, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsResourceNotSupportRangeRequest(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsSystemError() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrSystemError,
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrSystemError, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsSystemError(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsTaskIDDuplicate() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrTaskIDDuplicate{
TaskID: "test",
Cause: fmt.Errorf("test"),
},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(ErrTaskIDDuplicate{
TaskID: "test",
Cause: fmt.Errorf("test")}, "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsTaskIDDuplicate(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsURLNotReachable() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrURLNotReachable{
URL: "test",
},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(ErrURLNotReachable{
URL: "test",
}, "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsURLNotReachable(tt.args.err))
})
}
}

View File

@ -17,7 +17,6 @@
package gc
import (
"context"
"strings"
"sync"
"time"
@ -25,6 +24,54 @@ import (
logger "d7y.io/dragonfly/v2/internal/dflog"
)
type Server struct {
done chan struct{}
wg *sync.WaitGroup
}
func New() (*Server, error) {
return &Server{
done: make(chan struct{}),
wg: new(sync.WaitGroup),
}, nil
}
func (server *Server) Serve() error {
logger.Info("====starting gc jobs====")
for name, executorWrapper := range gcExecutorWrappers {
server.wg.Add(1)
// start a goroutine to gc
go func(name string, wrapper *ExecutorWrapper) {
defer server.wg.Done()
logger.Debugf("start %s gc mission gc initialDelay: %s, gc initial interval: %s", name, wrapper.gcInitialDelay, wrapper.gcInterval)
// delay executing GC after initialDelay
time.Sleep(wrapper.gcInitialDelay)
// execute the GC by fixed delay
ticker := time.NewTicker(wrapper.gcInterval)
for {
select {
case <-server.done:
logger.Infof("exit %s gc task", name)
return
case <-ticker.C:
if err := wrapper.gcExecutor.GC(); err != nil {
logger.Errorf("%s gc task execute failed: %v", name, err)
}
}
}
}(name, executorWrapper)
}
server.wg.Wait()
return nil
}
func (server *Server) Shutdown() error {
defer logger.Infof("====stopped gc server====")
server.done <- struct{}{}
server.wg.Wait()
return nil
}
type Executor interface {
GC() error
}
@ -46,36 +93,5 @@ func Register(name string, gcInitialDelay time.Duration, gcInterval time.Duratio
gcInterval: gcInterval,
gcExecutor: gcExecutor,
}
}
// StartGC starts to do the gc jobs.
func StartGC(ctx context.Context) error {
logger.Debugf("====start the gc jobs====")
var wg sync.WaitGroup
for name, executorWrapper := range gcExecutorWrappers {
wg.Add(1)
// start a goroutine to gc
go func(name string, wrapper *ExecutorWrapper) {
logger.Debugf("start the %s gc task", name)
// delay to execute GC after initialDelay
time.Sleep(wrapper.gcInitialDelay)
wg.Done()
// execute the GC by fixed delay
ticker := time.NewTicker(wrapper.gcInterval)
for {
select {
case <-ctx.Done():
logger.Infof("exit %s gc task", name)
return
case <-ticker.C:
if err := wrapper.gcExecutor.GC(); err != nil {
logger.Errorf("%s gc task execute failed: %v", name, err)
}
}
}
}(name, executorWrapper)
}
wg.Wait()
logger.Debugf("====all gc jobs have been launched====")
return nil
logger.Infof("register %s gc task, gcInitialDelay %s, gcInterval %s", name, gcInitialDelay, gcInterval)
}

View File

@ -17,16 +17,21 @@
package metrics
import (
"context"
"net"
"net/http"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
"gopkg.in/yaml.v3"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/internal/constants"
logger "d7y.io/dragonfly/v2/internal/dflog"
)
// Variables declared for metrics.
@ -60,14 +65,49 @@ var (
})
)
func New(cfg *config.RestConfig, grpcServer *grpc.Server) *http.Server {
grpc_prometheus.Register(grpcServer)
type Server struct {
config *config.RestConfig
httpServer *http.Server
}
func New(config *config.RestConfig, rpcServer *grpc.Server) (*Server, error) {
// scheduler config values
s, err := yaml.Marshal(config)
if err != nil {
return nil, errors.Wrap(err, "marshal metrics server config")
}
logger.Infof("metrics server config: \n%s", s)
grpc_prometheus.Register(rpcServer)
return &Server{
config: config,
httpServer: &http.Server{},
}, nil
}
// Handler returns an http handler for the blob server.
func (s *Server) Handler() http.Handler {
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
return &http.Server{
Addr: cfg.Addr,
Handler: mux,
}
return mux
}
// ListenAndServe is a blocking call which runs s.
func (s *Server) ListenAndServe(h http.Handler) error {
l, err := net.Listen("tcp", s.config.Addr)
if err != nil {
return err
}
s.httpServer.Handler = h
logger.Infof("====starting metrics server at %s====", s.config.Addr)
err = s.httpServer.Serve(l)
if errors.Is(err, http.ErrServerClosed) {
return nil
}
return err
}
func (s *Server) Shutdown(ctx context.Context) error {
defer logger.Infof("====stopped metrics server====")
return s.httpServer.Shutdown(ctx)
}

View File

@ -38,7 +38,7 @@ func NewRepository() Repository {
// Manager manages all plugin builders and plugin instants.
type Manager interface {
// GetBuilder adds a Builder object with the giving plugin type and name.
// AddBuilder adds a Builder object with the giving plugin type and name.
AddBuilder(pt PluginType, name string, b Builder) error
// GetBuilder returns a Builder object with the giving plugin type and name.

View File

@ -18,63 +18,70 @@ package rpcserver
import (
"context"
"encoding/json"
"fmt"
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
"d7y.io/dragonfly/v2/cdn/config"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/internal/dferrors"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/internal/idgen"
"d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
cdnserver "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/server"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/util/hostutils"
)
var tracer = otel.Tracer("cdn-server")
type server struct {
type Server struct {
*grpc.Server
taskMgr supervisor.SeedTaskMgr
cfg *config.Config
config *config.Config
service supervisor.CDNService
}
// New returns a new Manager Object.
func New(cfg *config.Config, taskMgr supervisor.SeedTaskMgr, opts ...grpc.ServerOption) (*grpc.Server, error) {
svr := &server{
taskMgr: taskMgr,
cfg: cfg,
func New(config *config.Config, cdnService supervisor.CDNService, opts ...grpc.ServerOption) (*Server, error) {
svr := &Server{
config: config,
service: cdnService,
}
svr.Server = cdnserver.New(svr, opts...)
return svr.Server, nil
return svr, nil
}
func (css *server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest, psc chan<- *cdnsystem.PieceSeed) (err error) {
func (css *Server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest, psc chan<- *cdnsystem.PieceSeed) (err error) {
clientAddr := "unknown"
if pe, ok := peer.FromContext(ctx); ok {
clientAddr = pe.Addr.String()
}
logger.Infof("trigger obtain seed for taskID: %s, url: %s, urlMeta: %+v client: %s", req.TaskId, req.Url, req.UrlMeta, clientAddr)
var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanObtainSeeds, trace.WithSpanKind(trace.SpanKindServer))
ctx, span = tracer.Start(ctx, constants.SpanObtainSeeds, trace.WithSpanKind(trace.SpanKindServer))
defer span.End()
span.SetAttributes(config.AttributeObtainSeedsRequest.String(req.String()))
span.SetAttributes(config.AttributeTaskID.String(req.TaskId))
logger.Infof("obtain seeds request: %#v", req)
span.SetAttributes(constants.AttributeObtainSeedsRequest.String(req.String()))
span.SetAttributes(constants.AttributeTaskID.String(req.TaskId))
defer func() {
if r := recover(); r != nil {
err = dferrors.Newf(base.Code_UnknownError, "obtain task(%s) seeds encounter an panic: %v", req.TaskId, r)
span.RecordError(err)
logger.WithTaskID(req.TaskId).Errorf("%v", err)
}
logger.Infof("seeds task %s result success: %t", req.TaskId, err == nil)
}()
// register task
pieceChan, err := css.taskMgr.Register(ctx, types.NewSeedTask(req.TaskId, req.Url, req.UrlMeta))
// register seed task
pieceChan, err := css.service.RegisterSeedTask(ctx, clientAddr, task.NewSeedTask(req.TaskId, req.Url, req.UrlMeta))
if err != nil {
if cdnerrors.IsResourcesLacked(err) {
if supervisor.IsResourcesLacked(err) {
err = dferrors.Newf(base.Code_ResourceLacked, "resources lacked for task(%s): %v", req.TaskId, err)
span.RecordError(err)
return err
@ -83,90 +90,113 @@ func (css *server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest,
span.RecordError(err)
return err
}
peerID := idgen.CDNPeerID(css.cfg.AdvertiseIP)
peerID := idgen.CDNPeerID(css.config.AdvertiseIP)
hostID := idgen.CDNHostID(hostutils.FQDNHostname, int32(css.config.ListenPort))
for piece := range pieceChan {
psc <- &cdnsystem.PieceSeed{
pieceSeed := &cdnsystem.PieceSeed{
PeerId: peerID,
HostUuid: idgen.CDNHostID(hostutils.FQDNHostname, int32(css.cfg.ListenPort)),
HostUuid: hostID,
PieceInfo: &base.PieceInfo{
PieceNum: int32(piece.PieceNum),
RangeStart: piece.PieceRange.StartIndex,
RangeSize: piece.PieceLen,
PieceMd5: piece.PieceMd5,
PieceOffset: piece.OriginRange.StartIndex,
PieceStyle: base.PieceStyle(piece.PieceStyle),
PieceStyle: piece.PieceStyle,
},
Done: false,
ContentLength: source.UnknownSourceFileLen,
TotalPieceCount: task.UnknownTotalPieceCount,
}
}
task, err := css.taskMgr.Get(req.TaskId)
psc <- pieceSeed
jsonPiece, err := json.Marshal(pieceSeed)
if err != nil {
logger.Errorf("failed to json marshal seed piece: %v", err)
}
logger.Debugf("send piece seed: %s to client: %s", jsonPiece, clientAddr)
}
seedTask, err := css.service.GetSeedTask(req.TaskId)
if err != nil {
err = dferrors.Newf(base.Code_CDNError, "failed to get task(%s): %v", req.TaskId, err)
if task.IsTaskNotFound(err) {
err = dferrors.Newf(base.Code_CDNTaskNotFound, "failed to get task(%s): %v", req.TaskId, err)
span.RecordError(err)
return err
}
err = dferrors.Newf(base.Code_CDNError, "failed to get task(%s): %v", req.TaskId, err)
span.RecordError(err)
return err
}
if !task.IsSuccess() {
err = dferrors.Newf(base.Code_CDNTaskDownloadFail, "task(%s) status error , status: %s", req.TaskId, task.CdnStatus)
if !seedTask.IsSuccess() {
err = dferrors.Newf(base.Code_CDNTaskDownloadFail, "task(%s) status error , status: %s", req.TaskId, seedTask.CdnStatus)
span.RecordError(err)
return err
}
psc <- &cdnsystem.PieceSeed{
pieceSeed := &cdnsystem.PieceSeed{
PeerId: peerID,
HostUuid: idgen.CDNHostID(hostutils.FQDNHostname, int32(css.cfg.ListenPort)),
HostUuid: hostID,
Done: true,
ContentLength: task.SourceFileLength,
TotalPieceCount: task.PieceTotal,
ContentLength: seedTask.SourceFileLength,
TotalPieceCount: seedTask.TotalPieceCount,
}
psc <- pieceSeed
jsonPiece, err := json.Marshal(pieceSeed)
if err != nil {
logger.Errorf("failed to json marshal seed piece: %v", err)
}
logger.Debugf("send piece seed: %s to client: %s", jsonPiece, clientAddr)
return nil
}
func (css *server) GetPieceTasks(ctx context.Context, req *base.PieceTaskRequest) (piecePacket *base.PiecePacket, err error) {
func (css *Server) GetPieceTasks(ctx context.Context, req *base.PieceTaskRequest) (piecePacket *base.PiecePacket, err error) {
var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanGetPieceTasks, trace.WithSpanKind(trace.SpanKindServer))
_, span = tracer.Start(ctx, constants.SpanGetPieceTasks, trace.WithSpanKind(trace.SpanKindServer))
defer span.End()
span.SetAttributes(config.AttributeGetPieceTasksRequest.String(req.String()))
span.SetAttributes(config.AttributeTaskID.String(req.TaskId))
span.SetAttributes(constants.AttributeGetPieceTasksRequest.String(req.String()))
span.SetAttributes(constants.AttributeTaskID.String(req.TaskId))
logger.Infof("get piece tasks: %#v", req)
defer func() {
if r := recover(); r != nil {
err = dferrors.Newf(base.Code_UnknownError, "get task(%s) piece tasks encounter an panic: %v", req.TaskId, r)
span.RecordError(err)
logger.WithTaskID(req.TaskId).Errorf("%v", err)
logger.WithTaskID(req.TaskId).Errorf("get piece tasks failed: %v", err)
}
logger.WithTaskID(req.TaskId).Infof("get piece tasks result success: %t", err == nil)
}()
logger.Infof("get piece tasks: %#v", req)
task, err := css.taskMgr.Get(req.TaskId)
seedTask, err := css.service.GetSeedTask(req.TaskId)
if err != nil {
if cdnerrors.IsDataNotFound(err) {
err = dferrors.Newf(base.Code_CDNTaskNotFound, "failed to get task(%s) from cdn: %v", req.TaskId, err)
if task.IsTaskNotFound(err) {
err = dferrors.Newf(base.Code_CDNTaskNotFound, "failed to get task(%s): %v", req.TaskId, err)
span.RecordError(err)
return nil, err
}
err = dferrors.Newf(base.Code_CDNError, "failed to get task(%s) from cdn: %v", req.TaskId, err)
err = dferrors.Newf(base.Code_CDNError, "failed to get task(%s): %v", req.TaskId, err)
span.RecordError(err)
return nil, err
}
if task.IsError() {
err = dferrors.Newf(base.Code_CDNTaskDownloadFail, "fail to download task(%s), cdnStatus: %s", task.TaskID, task.CdnStatus)
if seedTask.IsError() {
err = dferrors.Newf(base.Code_CDNTaskDownloadFail, "task(%s) status is FAIL, cdnStatus: %s", seedTask.ID, seedTask.CdnStatus)
span.RecordError(err)
return nil, err
}
pieces, err := css.taskMgr.GetPieces(ctx, req.TaskId)
pieces, err := css.service.GetSeedPieces(req.TaskId)
if err != nil {
err = dferrors.Newf(base.Code_CDNError, "failed to get pieces of task(%s) from cdn: %v", task.TaskID, err)
err = dferrors.Newf(base.Code_CDNError, "failed to get pieces of task(%s) from cdn: %v", seedTask.ID, err)
span.RecordError(err)
return nil, err
}
pieceInfos := make([]*base.PieceInfo, 0)
pieceInfos := make([]*base.PieceInfo, 0, len(pieces))
var count uint32 = 0
for _, piece := range pieces {
if piece.PieceNum >= req.StartNum && (count < req.Limit || req.Limit == 0) {
if piece.PieceNum >= req.StartNum && (count < req.Limit || req.Limit <= 0) {
p := &base.PieceInfo{
PieceNum: int32(piece.PieceNum),
RangeStart: piece.PieceRange.StartIndex,
RangeSize: piece.PieceLen,
PieceMd5: piece.PieceMd5,
PieceOffset: piece.OriginRange.StartIndex,
PieceStyle: base.PieceStyle(piece.PieceStyle),
PieceStyle: piece.PieceStyle,
}
pieceInfos = append(pieceInfos, p)
count++
@ -175,12 +205,43 @@ func (css *server) GetPieceTasks(ctx context.Context, req *base.PieceTaskRequest
pp := &base.PiecePacket{
TaskId: req.TaskId,
DstPid: req.DstPid,
DstAddr: fmt.Sprintf("%s:%d", css.cfg.AdvertiseIP, css.cfg.DownloadPort),
DstAddr: fmt.Sprintf("%s:%d", css.config.AdvertiseIP, css.config.DownloadPort),
PieceInfos: pieceInfos,
TotalPiece: task.PieceTotal,
ContentLength: task.SourceFileLength,
PieceMd5Sign: task.PieceMd5Sign,
TotalPiece: seedTask.TotalPieceCount,
ContentLength: seedTask.SourceFileLength,
PieceMd5Sign: seedTask.PieceMd5Sign,
}
span.SetAttributes(config.AttributePiecePacketResult.String(pp.String()))
span.SetAttributes(constants.AttributePiecePacketResult.String(pp.String()))
return pp, nil
}
func (css *Server) ListenAndServe() error {
// Generate GRPC listener
lis, _, err := rpc.ListenWithPortRange(css.config.AdvertiseIP, css.config.ListenPort, css.config.ListenPort)
if err != nil {
return err
}
//Started GRPC server
logger.Infof("====starting grpc server at %s://%s====", lis.Addr().Network(), lis.Addr().String())
return css.Server.Serve(lis)
}
const (
gracefulStopTimeout = 10 * time.Second
)
func (css *Server) Shutdown() error {
defer logger.Infof("====stopped rpc server====")
stopped := make(chan struct{})
go func() {
css.Server.GracefulStop()
close(stopped)
}()
select {
case <-time.After(gracefulStopTimeout):
css.Server.Stop()
case <-stopped:
}
return nil
}

View File

@ -1,122 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rpcserver
import (
"context"
"reflect"
"testing"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
)
func TestCdnSeedServer_GetPieceTasks(t *testing.T) {
type fields struct {
taskMgr supervisor.SeedTaskMgr
cfg *config.Config
}
type args struct {
ctx context.Context
req *base.PieceTaskRequest
}
tests := []struct {
name string
fields fields
args args
wantPiecePacket *base.PiecePacket
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
css := &server{
taskMgr: tt.fields.taskMgr,
cfg: tt.fields.cfg,
}
gotPiecePacket, err := css.GetPieceTasks(tt.args.ctx, tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("GetPieceTasks() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotPiecePacket, tt.wantPiecePacket) {
t.Errorf("GetPieceTasks() gotPiecePacket = %v, want %v", gotPiecePacket, tt.wantPiecePacket)
}
})
}
}
func TestCdnSeedServer_ObtainSeeds(t *testing.T) {
type fields struct {
taskMgr supervisor.SeedTaskMgr
cfg *config.Config
}
type args struct {
ctx context.Context
req *cdnsystem.SeedRequest
psc chan<- *cdnsystem.PieceSeed
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
css := &server{
taskMgr: tt.fields.taskMgr,
cfg: tt.fields.cfg,
}
if err := css.ObtainSeeds(tt.args.ctx, tt.args.req, tt.args.psc); (err != nil) != tt.wantErr {
t.Errorf("ObtainSeeds() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestNewCdnSeedServer(t *testing.T) {
type args struct {
cfg *config.Config
taskMgr supervisor.SeedTaskMgr
}
tests := []struct {
name string
args args
want *server
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := New(tt.args.cfg, tt.args.taskMgr)
if (err != nil) != tt.wantErr {
t.Errorf("NewCdnSeedServer() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewCdnSeedServer() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//go:generate mockgen -destination ./mock_driver.go -package storedriver d7y.io/dragonfly/v2/cdn/storedriver Driver
package storedriver
@ -21,16 +22,53 @@ import (
"fmt"
"io"
"path/filepath"
"strings"
"time"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/pkg/unit"
"d7y.io/dragonfly/v2/pkg/util/fileutils"
"d7y.io/dragonfly/v2/pkg/util/stringutils"
)
// DriverBuilder is a function that creates a new storage driver plugin instant with the giving Config.
type DriverBuilder func(cfg *Config) (Driver, error)
// Register defines an interface to register a driver with specified name.
// All drivers should call this function to register itself to the driverFactory.
func Register(name string, builder DriverBuilder) error {
name = strings.ToLower(name)
// plugin builder
var f = func(conf interface{}) (plugins.Plugin, error) {
cfg := &Config{}
if err := mapstructure.Decode(conf, cfg); err != nil {
return nil, fmt.Errorf("parse config: %v", err)
}
// prepare the base dir
if !filepath.IsAbs(cfg.BaseDir) {
return nil, fmt.Errorf("not absolute path: %s", cfg.BaseDir)
}
if err := fileutils.MkdirAll(cfg.BaseDir); err != nil {
return nil, fmt.Errorf("create baseDir %s: %v", cfg.BaseDir, err)
}
return newDriverPlugin(name, builder, cfg)
}
return plugins.RegisterPluginBuilder(plugins.StorageDriverPlugin, name, f)
}
// Get a store from manager with specified name.
func Get(name string) (Driver, bool) {
v, ok := plugins.GetPlugin(plugins.StorageDriverPlugin, strings.ToLower(name))
if !ok {
return nil, false
}
return v.(*driverPlugin).instance, true
}
// Driver defines an interface to manage the data stored in the driver.
//
// NOTE:
@ -44,7 +82,7 @@ type Driver interface {
// Otherwise, just return the data which starts from raw.offset and the length is raw.length.
Get(raw *Raw) (io.ReadCloser, error)
// Get data from the storage based on raw information.
// GetBytes data from the storage based on raw information.
// The data should be returned in bytes.
// If the length<=0, the storage driver should return all data from the raw.offset.
// Otherwise, just return the data which starts from raw.offset and the length is raw.length.
@ -68,33 +106,33 @@ type Driver interface {
// If not, return the ErrFileNotExist.
Stat(raw *Raw) (*StorageInfo, error)
// GetFreeSpace returns the available disk space in B.
// GetFreeSpace returns the free disk space in B.
GetFreeSpace() (unit.Bytes, error)
// GetTotalAndFreeSpace
// GetTotalAndFreeSpace returns the total and free disk space in B.
GetTotalAndFreeSpace() (unit.Bytes, unit.Bytes, error)
// GetTotalSpace
// GetTotalSpace returns the total disk space in B.
GetTotalSpace() (unit.Bytes, error)
// Walk walks the file tree rooted at root which determined by raw.Bucket and raw.Key,
// calling walkFn for each file or directory in the tree, including root.
Walk(raw *Raw) error
// CreateBaseDir
// CreateBaseDir create base dir
CreateBaseDir() error
// GetPath
// GetPath get path of raw
GetPath(raw *Raw) string
// MoveFile
// MoveFile rename src to dst
MoveFile(src string, dst string) error
// Exits
// Exits check if raw exists
Exits(raw *Raw) bool
// GetHomePath
GetHomePath() string
// GetBaseDir returns base dir
GetBaseDir() string
}
type Config struct {
@ -163,12 +201,10 @@ func (s *driverPlugin) Name() string {
return s.name
}
// GetTotalSpace
func (s *driverPlugin) GetTotalSpace() (unit.Bytes, error) {
return s.instance.GetTotalSpace()
}
// CreateBaseDir
func (s *driverPlugin) CreateBaseDir() error {
return s.instance.CreateBaseDir()
}
@ -225,7 +261,7 @@ func (s *driverPlugin) PutBytes(raw *Raw, data []byte) error {
func (s *driverPlugin) Remove(raw *Raw) error {
if raw == nil || (stringutils.IsBlank(raw.Key) &&
stringutils.IsBlank(raw.Bucket)) {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "cannot set both key and bucket empty at the same time")
return errors.New("both key and bucket are empty")
}
return s.instance.Remove(raw)
}
@ -259,13 +295,13 @@ func (s *driverPlugin) GetFreeSpace() (unit.Bytes, error) {
return s.instance.GetFreeSpace()
}
func (s *driverPlugin) GetHomePath() string {
return s.instance.GetHomePath()
func (s *driverPlugin) GetBaseDir() string {
return s.instance.GetBaseDir()
}
func checkEmptyKey(raw *Raw) error {
if raw == nil || stringutils.IsBlank(raw.Key) {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "raw key is empty")
return errors.New("raw key is empty")
}
return nil
}

View File

@ -22,7 +22,6 @@ import (
"os"
"path/filepath"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/storedriver"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/synclock"
@ -71,7 +70,7 @@ func (ds *driver) GetTotalSpace() (unit.Bytes, error) {
return fileutils.GetTotalSpace(path)
}
func (ds *driver) GetHomePath() string {
func (ds *driver) GetBaseDir() string {
return ds.BaseDir
}
@ -370,9 +369,6 @@ func (ds *driver) statPath(bucket, key string) (string, os.FileInfo, error) {
filePath := filepath.Join(ds.BaseDir, bucket, key)
f, err := os.Stat(filePath)
if err != nil {
if os.IsNotExist(err) {
return "", nil, cdnerrors.ErrFileNotExist{File: "filePath"}
}
return "", nil, err
}
return filePath, f, nil

View File

@ -27,7 +27,6 @@ import (
"github.com/stretchr/testify/suite"
"d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/pkg/unit"
@ -74,8 +73,8 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
putRaw *storedriver.Raw
getRaw *storedriver.Raw
data []byte
getErrCheck func(error) bool
putErrCheck func(error) bool
wantGetErr bool
wantPutErr bool
expected string
}{
{
@ -89,8 +88,8 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Key: "foo1",
},
data: []byte("hello foo"),
putErrCheck: isNil,
getErrCheck: isNil,
wantPutErr: false,
wantGetErr: false,
expected: "hello foo",
}, {
name: "get specific length",
@ -104,8 +103,8 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Offset: 0,
Length: 5,
},
putErrCheck: isNil,
getErrCheck: isNil,
wantPutErr: false,
wantGetErr: false,
data: []byte("hello foo"),
expected: "hello",
}, {
@ -120,8 +119,8 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Offset: 0,
Length: 0,
},
putErrCheck: isNil,
getErrCheck: isNil,
wantPutErr: false,
wantGetErr: false,
data: []byte("hello foo"),
expected: "hello foo",
}, {
@ -136,8 +135,8 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Offset: 0,
Length: -1,
},
putErrCheck: isNil,
getErrCheck: errors.IsInvalidValue,
wantPutErr: false,
wantGetErr: true,
data: []byte("hello foo"),
expected: "",
}, {
@ -151,8 +150,8 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Bucket: "GetPut",
Key: "foo5",
},
putErrCheck: isNil,
getErrCheck: isNil,
wantPutErr: false,
wantGetErr: false,
data: []byte("hello foo"),
expected: "hello",
}, {
@ -167,8 +166,8 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Offset: -1,
},
data: []byte("hello foo"),
putErrCheck: isNil,
getErrCheck: errors.IsInvalidValue,
wantPutErr: false,
wantGetErr: true,
expected: "",
}, {
name: "put/get data from specific offset",
@ -183,8 +182,8 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Offset: 3,
},
data: []byte("hello foo"),
putErrCheck: isNil,
getErrCheck: isNil,
wantPutErr: false,
wantGetErr: false,
expected: "hello foo",
},
}
@ -193,10 +192,10 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
s.Run(v.name, func() {
// put
err := s.PutBytes(v.putRaw, v.data)
s.True(v.putErrCheck(err))
s.True(v.wantPutErr == (err != nil))
// get
result, err := s.GetBytes(v.getRaw)
s.True(v.getErrCheck(err))
s.True(v.wantGetErr == (err != nil))
s.Equal(v.expected, string(result))
// stat
s.checkStat(v.getRaw)
@ -212,7 +211,7 @@ func (s *LocalDriverTestSuite) TestGetPut() {
putRaw *storedriver.Raw
getRaw *storedriver.Raw
data io.Reader
getErrCheck func(error) bool
wantGetErr bool
expected string
}{
{
@ -224,7 +223,7 @@ func (s *LocalDriverTestSuite) TestGetPut() {
Key: "foo0.meta",
},
data: strings.NewReader("hello meta file"),
getErrCheck: isNil,
wantGetErr: false,
expected: "hello meta file",
}, {
putRaw: &storedriver.Raw{
@ -234,7 +233,7 @@ func (s *LocalDriverTestSuite) TestGetPut() {
Key: "foo1.meta",
},
data: strings.NewReader("hello meta file"),
getErrCheck: isNil,
wantGetErr: false,
expected: "hello meta file",
}, {
putRaw: &storedriver.Raw{
@ -245,7 +244,7 @@ func (s *LocalDriverTestSuite) TestGetPut() {
Key: "foo2.meta",
},
data: strings.NewReader("hello meta file"),
getErrCheck: isNil,
wantGetErr: false,
expected: "hello ",
}, {
putRaw: &storedriver.Raw{
@ -257,7 +256,7 @@ func (s *LocalDriverTestSuite) TestGetPut() {
Length: 5,
},
data: strings.NewReader("hello meta file"),
getErrCheck: isNil,
wantGetErr: false,
expected: "llo m",
}, {
putRaw: &storedriver.Raw{
@ -268,7 +267,7 @@ func (s *LocalDriverTestSuite) TestGetPut() {
Offset: 2,
Length: -1,
},
getErrCheck: errors.IsInvalidValue,
wantGetErr: true,
data: strings.NewReader("hello meta file"),
expected: "",
}, {
@ -280,7 +279,7 @@ func (s *LocalDriverTestSuite) TestGetPut() {
Offset: 30,
Length: 5,
},
getErrCheck: errors.IsInvalidValue,
wantGetErr: true,
data: strings.NewReader("hello meta file"),
expected: "",
},
@ -293,7 +292,7 @@ func (s *LocalDriverTestSuite) TestGetPut() {
s.Nil(err)
// get
r, err := s.Get(v.getRaw)
s.True(v.getErrCheck(err))
s.True(v.wantGetErr == (err != nil))
if err == nil {
result, err := io.ReadAll(r)
s.Nil(err)
@ -507,8 +506,8 @@ func (s *LocalDriverTestSuite) TestLocalDriverExitsAndRemove() {
s.False(s.Exits(raw))
}
func (s *LocalDriverTestSuite) TestLocalDriverGetHomePath() {
s.Equal(filepath.Join(s.workHome, "repo"), s.GetHomePath())
func (s *LocalDriverTestSuite) TestLocalDriverGetBaseDir() {
s.Equal(filepath.Join(s.workHome, "repo"), s.GetBaseDir())
}
func (s *LocalDriverTestSuite) TestLocalDriverGetPath() {
@ -522,7 +521,7 @@ func (s *LocalDriverTestSuite) TestLocalDriverGetPath() {
func (s *LocalDriverTestSuite) TestLocalDriverGetTotalAndFreeSpace() {
fs := syscall.Statfs_t{}
s.Nil(syscall.Statfs(s.GetHomePath(), &fs))
s.Nil(syscall.Statfs(s.GetBaseDir(), &fs))
total := unit.Bytes(fs.Blocks * uint64(fs.Bsize))
free := unit.Bytes(fs.Bavail * uint64(fs.Bsize))
got, got1, err := s.GetTotalAndFreeSpace()
@ -559,7 +558,7 @@ func (s *LocalDriverTestSuite) checkStat(raw *storedriver.Raw) {
info, err := s.Stat(raw)
s.Equal(isNil(err), true)
pathTemp := filepath.Join(s.Driver.GetHomePath(), raw.Bucket, raw.Key)
pathTemp := filepath.Join(s.Driver.GetBaseDir(), raw.Bucket, raw.Key)
f, _ := os.Stat(pathTemp)
s.EqualValues(info, &storedriver.StorageInfo{
@ -575,7 +574,7 @@ func (s *LocalDriverTestSuite) checkRemove(raw *storedriver.Raw) {
s.Equal(isNil(err), true)
_, err = s.Stat(raw)
s.Equal(errors.IsFileNotExist(err), true)
s.Equal(os.IsNotExist(err), true)
}
func isNil(err error) bool {

View File

@ -78,6 +78,20 @@ func (mr *MockDriverMockRecorder) Get(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDriver)(nil).Get), arg0)
}
// GetBaseDir mocks base method.
func (m *MockDriver) GetBaseDir() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetBaseDir")
ret0, _ := ret[0].(string)
return ret0
}
// GetBaseDir indicates an expected call of GetBaseDir.
func (mr *MockDriverMockRecorder) GetBaseDir() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBaseDir", reflect.TypeOf((*MockDriver)(nil).GetBaseDir))
}
// GetBytes mocks base method.
func (m *MockDriver) GetBytes(arg0 *Raw) ([]byte, error) {
m.ctrl.T.Helper()
@ -108,20 +122,6 @@ func (mr *MockDriverMockRecorder) GetFreeSpace() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFreeSpace", reflect.TypeOf((*MockDriver)(nil).GetFreeSpace))
}
// GetHomePath mocks base method.
func (m *MockDriver) GetHomePath() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetHomePath")
ret0, _ := ret[0].(string)
return ret0
}
// GetHomePath indicates an expected call of GetHomePath.
func (mr *MockDriverMockRecorder) GetHomePath() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHomePath", reflect.TypeOf((*MockDriver)(nil).GetHomePath))
}
// GetPath mocks base method.
func (m *MockDriver) GetPath(arg0 *Raw) string {
m.ctrl.T.Helper()

View File

@ -1,63 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storedriver
import (
"fmt"
"path/filepath"
"strings"
"github.com/mitchellh/mapstructure"
"d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/pkg/util/fileutils"
)
// DriverBuilder is a function that creates a new storage driver plugin instant with the giving Config.
type DriverBuilder func(cfg *Config) (Driver, error)
// Register defines an interface to register a driver with specified name.
// All drivers should call this function to register itself to the driverFactory.
func Register(name string, builder DriverBuilder) error {
name = strings.ToLower(name)
// plugin builder
var f = func(conf interface{}) (plugins.Plugin, error) {
cfg := &Config{}
if err := mapstructure.Decode(conf, cfg); err != nil {
return nil, fmt.Errorf("parse config: %v", err)
}
// prepare the base dir
if !filepath.IsAbs(cfg.BaseDir) {
return nil, fmt.Errorf("not absolute path: %s", cfg.BaseDir)
}
if err := fileutils.MkdirAll(cfg.BaseDir); err != nil {
return nil, fmt.Errorf("create baseDir %s: %v", cfg.BaseDir, err)
}
return newDriverPlugin(name, builder, cfg)
}
return plugins.RegisterPluginBuilder(plugins.StorageDriverPlugin, name, f)
}
// Get a store from manager with specified name.
func Get(name string) (Driver, bool) {
v, ok := plugins.GetPlugin(plugins.StorageDriverPlugin, strings.ToLower(name))
if !ok {
return nil, false
}
return v.(*driverPlugin).instance, true
}

View File

@ -93,7 +93,7 @@ func (m mockDriver) Exits(_ *Raw) bool {
panic("implement me")
}
func (m mockDriver) GetHomePath() string {
func (m mockDriver) GetBaseDir() string {
panic("implement me")
}

View File

@ -18,25 +18,23 @@ package storedriver
import (
"github.com/pkg/errors"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
)
// CheckGetRaw check before get Raw
func CheckGetRaw(raw *Raw, fileLength int64) error {
// if raw.Length < 0 ,read All data
if raw.Offset < 0 {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the offset: %d is a negative integer", raw.Offset)
return errors.Errorf("the offset: %d is a negative integer", raw.Offset)
}
if raw.Length < 0 {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the length: %d is a negative integer", raw.Length)
return errors.Errorf("the length: %d is a negative integer", raw.Length)
}
if fileLength < raw.Offset {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the offset: %d is lager than the file length: %d", raw.Offset, fileLength)
return errors.Errorf("the offset: %d is lager than the file length: %d", raw.Offset, fileLength)
}
if fileLength < (raw.Offset + raw.Length) {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the offset: %d and length: %d is lager than the file length: %d", raw.Offset, raw.Length, fileLength)
return errors.Errorf("the offset: %d and length: %d is lager than the file length: %d", raw.Offset, raw.Length, fileLength)
}
return nil
}
@ -44,10 +42,10 @@ func CheckGetRaw(raw *Raw, fileLength int64) error {
// CheckPutRaw check before put Raw
func CheckPutRaw(raw *Raw) error {
if raw.Offset < 0 {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the offset: %d is a negative integer", raw.Offset)
return errors.Errorf("the offset: %d is a negative integer", raw.Offset)
}
if raw.Length < 0 {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the length: %d is a negative integer", raw.Length)
return errors.Errorf("the length: %d is a negative integer", raw.Length)
}
return nil
}
@ -55,7 +53,7 @@ func CheckPutRaw(raw *Raw) error {
// CheckTrunc check before trunc file content
func CheckTrunc(raw *Raw) error {
if raw.Trunc && raw.TruncSize < 0 {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the truncSize: %d is a negative integer", raw.Length)
return errors.Errorf("the truncSize: %d is a negative integer", raw.Length)
}
return nil
}

View File

@ -27,81 +27,68 @@ import (
"github.com/pkg/errors"
"go.opentelemetry.io/otel/trace"
"d7y.io/dragonfly/v2/cdn/config"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/util/digestutils"
"d7y.io/dragonfly/v2/pkg/util/stringutils"
)
// cacheDetector detect task cache
type cacheDetector struct {
cacheDataManager *cacheDataManager
metadataManager *metadataManager
storageManager storage.Manager
}
// cacheResult cache result of detect
type cacheResult struct {
breakPoint int64 // break-point of task file
pieceMetaRecords []*storage.PieceMetaRecord // piece meta data records of task
fileMetadata *storage.FileMetadata // file meta data of task
}
func (s *cacheResult) String() string {
return fmt.Sprintf("{breakNum: %d, pieceMetaRecords: %#v, fileMetadata: %#v}", s.breakPoint, s.pieceMetaRecords, s.fileMetadata)
BreakPoint int64 `json:"break_point"` // break-point of task file
PieceMetaRecords []*storage.PieceMetaRecord `json:"piece_meta_records"` // piece metadata records of task
FileMetadata *storage.FileMetadata `json:"file_metadata"` // file meta data of task
}
// newCacheDetector create a new cache detector
func newCacheDetector(cacheDataManager *cacheDataManager) *cacheDetector {
func newCacheDetector(metadataManager *metadataManager, storageManager storage.Manager) *cacheDetector {
return &cacheDetector{
cacheDataManager: cacheDataManager,
metadataManager: metadataManager,
storageManager: storageManager,
}
}
func (cd *cacheDetector) detectCache(ctx context.Context, task *types.SeedTask, fileDigest hash.Hash) (result *cacheResult, err error) {
//err := cd.cacheStore.CreateUploadLink(ctx, task.TaskId)
//if err != nil {
// return nil, errors.Wrapf(err, "failed to create upload symbolic link")
//}
func (cd *cacheDetector) detectCache(ctx context.Context, seedTask *task.SeedTask, fileDigest hash.Hash) (result *cacheResult, err error) {
var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanDetectCache)
ctx, span = tracer.Start(ctx, constants.SpanDetectCache)
defer span.End()
defer func() {
span.SetAttributes(config.AttributeDetectCacheResult.String(result.String()))
}()
result, err = cd.doDetect(ctx, task, fileDigest)
result, err = cd.doDetect(ctx, seedTask, fileDigest)
if err != nil {
task.Log().Infof("failed to detect cache, reset cache: %v", err)
metadata, err := cd.resetCache(task)
if err == nil {
result = &cacheResult{
fileMetadata: metadata,
metadata, err := cd.resetCache(seedTask)
if err != nil {
return nil, errors.Wrapf(err, "reset cache")
}
return result, nil
return &cacheResult{
FileMetadata: metadata,
}, nil
}
return result, err
}
if err := cd.cacheDataManager.updateAccessTime(task.TaskID, getCurrentTimeMillisFunc()); err != nil {
task.Log().Warnf("failed to update task access time ")
if err := cd.metadataManager.updateAccessTime(seedTask.ID, getCurrentTimeMillisFunc()); err != nil {
seedTask.Log().Warnf("failed to update task access time ")
}
return result, nil
}
// doDetect the actual detect action which detects file metadata and pieces metadata of specific task
func (cd *cacheDetector) doDetect(ctx context.Context, task *types.SeedTask, fileDigest hash.Hash) (result *cacheResult, err error) {
span := trace.SpanFromContext(ctx)
fileMetadata, err := cd.cacheDataManager.readFileMetadata(task.TaskID)
// doDetect do the actual detect action which detects file metadata and pieces metadata of specific task
func (cd *cacheDetector) doDetect(ctx context.Context, seedTask *task.SeedTask, fileDigest hash.Hash) (*cacheResult, error) {
if _, err := cd.storageManager.StatDownloadFile(seedTask.ID); err != nil {
return nil, err
}
fileMetadata, err := cd.metadataManager.readFileMetadata(seedTask.ID)
if err != nil {
span.RecordError(err)
return nil, errors.Wrapf(err, "read file meta data of task %s", task.TaskID)
return nil, errors.Wrapf(err, "read file metadata")
}
span.SetAttributes()
if err := checkSameFile(task, fileMetadata); err != nil {
return nil, errors.Wrapf(err, "check same file")
if ok, cause := checkMetadata(seedTask, fileMetadata); !ok {
return nil, errors.Errorf("fileMetadata is inconsistent with task: %s", cause)
}
checkExpiredRequest, err := source.NewRequestWithContext(ctx, task.URL, task.Header)
checkExpiredRequest, err := source.NewRequestWithContext(ctx, seedTask.RawURL, seedTask.Header)
if err != nil {
return nil, errors.Wrapf(err, "create request")
}
@ -111,21 +98,21 @@ func (cd *cacheDetector) doDetect(ctx context.Context, task *types.SeedTask, fil
})
if err != nil {
// If the check fails, the resource is regarded as not expired to prevent the source from being knocked down
task.Log().Warnf("failed to check whether the source is expired. To prevent the source from being suspended, "+
seedTask.Log().Warnf("failed to check whether the source is expired. To prevent the source from being suspended, "+
"assume that the source is not expired: %v", err)
}
task.Log().Debugf("task resource expired result: %t", expired)
seedTask.Log().Debugf("task resource expired result: %t", expired)
if expired {
return nil, errors.Errorf("resource %s has expired", task.TaskURL)
return nil, errors.Errorf("resource %s has expired", seedTask.TaskURL)
}
// not expired
if fileMetadata.Finish {
// quickly detect the cache situation through the metadata
return cd.detectByReadMetaFile(task.TaskID, fileMetadata)
return cd.detectByReadMetaFile(seedTask.ID, fileMetadata)
}
// check if the resource supports range request. if so,
// detect the cache situation by reading piece meta and data file
checkSupportRangeRequest, err := source.NewRequestWithContext(ctx, task.URL, task.Header)
checkSupportRangeRequest, err := source.NewRequestWithContext(ctx, seedTask.RawURL, seedTask.Header)
if err != nil {
return nil, errors.Wrapf(err, "create check support range request")
}
@ -135,61 +122,58 @@ func (cd *cacheDetector) doDetect(ctx context.Context, task *types.SeedTask, fil
return nil, errors.Wrap(err, "check if support range")
}
if !supportRange {
return nil, errors.Errorf("resource %s is not support range request", task.URL)
return nil, errors.Errorf("resource %s is not support range request", seedTask.TaskURL)
}
return cd.detectByReadFile(task.TaskID, fileMetadata, fileDigest)
return cd.detectByReadFile(seedTask.ID, fileMetadata, fileDigest)
}
// parseByReadMetaFile detect cache by read meta and pieceMeta files of task
// detectByReadMetaFile detect cache by read metadata and pieceMeta files of specific task
func (cd *cacheDetector) detectByReadMetaFile(taskID string, fileMetadata *storage.FileMetadata) (*cacheResult, error) {
if !fileMetadata.Success {
return nil, fmt.Errorf("success flag of taskID %s is false", taskID)
return nil, errors.New("metadata success flag is false")
}
pieceMetaRecords, err := cd.cacheDataManager.readAndCheckPieceMetaRecords(taskID, fileMetadata.PieceMd5Sign)
md5Sign, pieceMetaRecords, err := cd.metadataManager.getPieceMd5Sign(taskID)
if err != nil {
return nil, errors.Wrapf(err, "check piece meta integrity")
return nil, errors.Wrap(err, "get pieces md5 sign")
}
if fileMetadata.TotalPieceCount > 0 && len(pieceMetaRecords) != int(fileMetadata.TotalPieceCount) {
err := cdnerrors.ErrInconsistentValues{Expected: fileMetadata.TotalPieceCount, Actual: len(pieceMetaRecords)}
return nil, errors.Wrapf(err, "compare file piece count")
return nil, errors.Errorf("total piece count is inconsistent, expected is %d, but got %d", fileMetadata.TotalPieceCount, len(pieceMetaRecords))
}
storageInfo, err := cd.cacheDataManager.statDownloadFile(taskID)
if fileMetadata.PieceMd5Sign != "" && md5Sign != fileMetadata.PieceMd5Sign {
return nil, errors.Errorf("piece md5 sign is inconsistent, expected is %s, but got %s", fileMetadata.PieceMd5Sign, md5Sign)
}
storageInfo, err := cd.storageManager.StatDownloadFile(taskID)
if err != nil {
return nil, errors.Wrapf(err, "get cdn file length")
return nil, errors.Wrap(err, "stat download file info")
}
// check file data integrity by file size
if fileMetadata.CdnFileLength != storageInfo.Size {
err := cdnerrors.ErrInconsistentValues{
Expected: fileMetadata.CdnFileLength,
Actual: storageInfo.Size,
}
return nil, errors.Wrapf(err, "compare file cdn file length")
return nil, errors.Errorf("file size is inconsistent, expected is %d, but got %d", fileMetadata.CdnFileLength, storageInfo.Size)
}
// TODO For hybrid storage mode, synchronize disk data to memory
return &cacheResult{
breakPoint: -1,
pieceMetaRecords: pieceMetaRecords,
fileMetadata: fileMetadata,
BreakPoint: -1,
PieceMetaRecords: pieceMetaRecords,
FileMetadata: fileMetadata,
}, nil
}
// parseByReadFile detect cache by read pieceMeta and data files of task
func (cd *cacheDetector) detectByReadFile(taskID string, metadata *storage.FileMetadata, fileDigest hash.Hash) (*cacheResult, error) {
reader, err := cd.cacheDataManager.readDownloadFile(taskID)
reader, err := cd.storageManager.ReadDownloadFile(taskID)
if err != nil {
return nil, errors.Wrapf(err, "read download data file")
}
defer reader.Close()
tempRecords, err := cd.cacheDataManager.readPieceMetaRecords(taskID)
tempRecords, err := cd.metadataManager.readPieceMetaRecords(taskID)
if err != nil {
return nil, errors.Wrapf(err, "read piece meta records")
}
// sort piece meta records by pieceNum
sort.Slice(tempRecords, func(i, j int) bool {
return tempRecords[i].PieceNum < tempRecords[j].PieceNum
})
var breakPoint uint64 = 0
var breakPoint int64 = 0
pieceMetaRecords := make([]*storage.PieceMetaRecord, 0, len(tempRecords))
for index := range tempRecords {
if uint32(index) != tempRecords[index].PieceNum {
@ -197,14 +181,14 @@ func (cd *cacheDetector) detectByReadFile(taskID string, metadata *storage.FileM
}
// read content TODO concurrent by multi-goroutine
if err := checkPieceContent(reader, tempRecords[index], fileDigest); err != nil {
logger.WithTaskID(taskID).Errorf("read content of pieceNum %d failed: %v", tempRecords[index].PieceNum, err)
logger.WithTaskID(taskID).Errorf("check content of pieceNum %d failed: %v", tempRecords[index].PieceNum, err)
break
}
breakPoint = tempRecords[index].OriginRange.EndIndex + 1
breakPoint = int64(tempRecords[index].OriginRange.EndIndex + 1)
pieceMetaRecords = append(pieceMetaRecords, tempRecords[index])
}
if len(tempRecords) != len(pieceMetaRecords) {
if err := cd.cacheDataManager.writePieceMetaRecords(taskID, pieceMetaRecords); err != nil {
if err := cd.metadataManager.writePieceMetaRecords(taskID, pieceMetaRecords); err != nil {
return nil, errors.Wrapf(err, "write piece meta records failed")
}
}
@ -217,54 +201,66 @@ func (cd *cacheDetector) detectByReadFile(taskID string, metadata *storage.FileM
// fileMd5: fileMd5,
// }, nil
//}
// TODO 整理数据文件 truncate breakpoint之后的数据内容
// TODO 整理数据文件 truncate breakpoint 之后的数据内容
return &cacheResult{
breakPoint: int64(breakPoint),
pieceMetaRecords: pieceMetaRecords,
fileMetadata: metadata,
BreakPoint: breakPoint,
PieceMetaRecords: pieceMetaRecords,
FileMetadata: metadata,
}, nil
}
// resetCache
func (cd *cacheDetector) resetCache(task *types.SeedTask) (*storage.FileMetadata, error) {
err := cd.cacheDataManager.resetRepo(task)
// resetCache file
func (cd *cacheDetector) resetCache(seedTask *task.SeedTask) (*storage.FileMetadata, error) {
err := cd.storageManager.ResetRepo(seedTask)
if err != nil {
return nil, err
}
// initialize meta data file
return cd.cacheDataManager.writeFileMetadataByTask(task)
return cd.metadataManager.writeFileMetadataByTask(seedTask)
}
/*
helper functions
*/
// checkSameFile check whether meta file is modified
func checkSameFile(task *types.SeedTask, metadata *storage.FileMetadata) error {
if task == nil || metadata == nil {
return errors.Errorf("task or metadata is nil, task: %v, metadata: %v", task, metadata)
// checkMetadata check whether meta file is modified
func checkMetadata(seedTask *task.SeedTask, metadata *storage.FileMetadata) (bool, string) {
if seedTask == nil || metadata == nil {
return false, fmt.Sprintf("task or metadata is nil, task: %v, metadata: %v", seedTask, metadata)
}
if metadata.PieceSize != task.PieceSize {
return errors.Errorf("meta piece size(%d) is not equals with task piece size(%d)", metadata.PieceSize,
task.PieceSize)
if metadata.TaskID != seedTask.ID {
return false, fmt.Sprintf("metadata TaskID(%s) is not equals with task ID(%s)", metadata.TaskID, seedTask.ID)
}
if metadata.TaskID != task.TaskID {
return errors.Errorf("meta task TaskId(%s) is not equals with task TaskId(%s)", metadata.TaskID, task.TaskID)
if metadata.TaskURL != seedTask.TaskURL {
return false, fmt.Sprintf("metadata taskURL(%s) is not equals with task taskURL(%s)", metadata.TaskURL, seedTask.TaskURL)
}
if metadata.TaskURL != task.TaskURL {
return errors.Errorf("meta task taskUrl(%s) is not equals with task taskUrl(%s)", metadata.TaskURL, task.URL)
if metadata.PieceSize != seedTask.PieceSize {
return false, fmt.Sprintf("metadata piece size(%d) is not equals with task piece size(%d)", metadata.PieceSize, seedTask.PieceSize)
}
if !stringutils.IsBlank(metadata.SourceRealDigest) && !stringutils.IsBlank(task.RequestDigest) &&
metadata.SourceRealDigest != task.RequestDigest {
return errors.Errorf("meta task source digest(%s) is not equals with task request digest(%s)",
metadata.SourceRealDigest, task.RequestDigest)
if seedTask.Range != metadata.Range {
return false, fmt.Sprintf("metadata range(%s) is not equals with task range(%s)", metadata.Range, seedTask.Range)
}
return nil
if seedTask.Digest != metadata.Digest {
return false, fmt.Sprintf("meta digest(%s) is not equals with task request digest(%s)",
metadata.SourceRealDigest, seedTask.Digest)
}
if seedTask.Tag != metadata.Tag {
return false, fmt.Sprintf("metadata tag(%s) is not equals with task tag(%s)", metadata.Range, seedTask.Range)
}
if seedTask.Filter != metadata.Filter {
return false, fmt.Sprintf("metadata filter(%s) is not equals with task filter(%s)", metadata.Filter, seedTask.Filter)
}
return true, ""
}
//checkPieceContent read piece content from reader and check data integrity by pieceMetaRecord
// checkPieceContent read piece content from reader and check data integrity by pieceMetaRecord
func checkPieceContent(reader io.Reader, pieceRecord *storage.PieceMetaRecord, fileDigest hash.Hash) error {
// TODO Analyze the original data for the slice format to calculate fileMd5
pieceMd5 := md5.New()
@ -275,11 +271,7 @@ func checkPieceContent(reader io.Reader, pieceRecord *storage.PieceMetaRecord, f
realPieceMd5 := digestutils.ToHashString(pieceMd5)
// check piece content
if realPieceMd5 != pieceRecord.Md5 {
err := cdnerrors.ErrInconsistentValues{
Expected: pieceRecord.Md5,
Actual: realPieceMd5,
}
return errors.Wrap(err, "compare piece md5")
return errors.Errorf("piece md5 sign is inconsistent, expected is %s, but got %s", pieceRecord.Md5, realPieceMd5)
}
return nil
}

View File

@ -33,7 +33,7 @@ import (
"d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
storageMock "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/mock"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/source/httpprotocol"
sourceMock "d7y.io/dragonfly/v2/pkg/source/mock"
@ -56,8 +56,8 @@ func (suite *CacheDetectorTestSuite) SetupSuite() {
source.UnRegister("http")
suite.Require().Nil(source.Register("http", sourceClient, httpprotocol.Adapter))
storageManager := storageMock.NewMockManager(ctrl)
cacheDataManager := newCacheDataManager(storageManager)
suite.detector = newCacheDetector(cacheDataManager)
cacheDataManager := newMetadataManager(storageManager)
suite.detector = newCacheDetector(cacheDataManager, storageManager)
storageManager.EXPECT().ReadFileMetadata(fullExpiredCache.taskID).Return(fullExpiredCache.fileMeta, nil).AnyTimes()
storageManager.EXPECT().ReadFileMetadata(fullNoExpiredCache.taskID).Return(fullNoExpiredCache.fileMeta, nil).AnyTimes()
storageManager.EXPECT().ReadFileMetadata(partialNotSupportRangeCache.taskID).Return(partialNotSupportRangeCache.fileMeta, nil).AnyTimes()
@ -258,7 +258,7 @@ func newPartialFileMeta(taskID string, URL string) *storage.FileMetadata {
func (suite *CacheDetectorTestSuite) TestDetectCache() {
type args struct {
task *types.SeedTask
task *task.SeedTask
}
tests := []struct {
name string
@ -269,9 +269,9 @@ func (suite *CacheDetectorTestSuite) TestDetectCache() {
{
name: "no cache",
args: args{
task: &types.SeedTask{
TaskID: noCacheTask,
URL: noExpiredAndSupportURL,
task: &task.SeedTask{
ID: noCacheTask,
RawURL: noExpiredAndSupportURL,
TaskURL: noExpiredAndSupportURL,
},
},
@ -281,27 +281,27 @@ func (suite *CacheDetectorTestSuite) TestDetectCache() {
{
name: "partial cache and support range",
args: args{
task: &types.SeedTask{
TaskID: partialAndSupportCacheTask,
URL: noExpiredAndSupportURL,
task: &task.SeedTask{
ID: partialAndSupportCacheTask,
RawURL: noExpiredAndSupportURL,
TaskURL: noExpiredAndSupportURL,
SourceFileLength: 9789,
PieceSize: 2000,
},
},
want: &cacheResult{
breakPoint: 4000,
pieceMetaRecords: partialPieceMetaRecords,
fileMetadata: newPartialFileMeta(partialAndSupportCacheTask, noExpiredAndSupportURL),
BreakPoint: 4000,
PieceMetaRecords: partialPieceMetaRecords,
FileMetadata: newPartialFileMeta(partialAndSupportCacheTask, noExpiredAndSupportURL),
},
wantErr: false,
},
{
name: "partial cache and not support range",
args: args{
task: &types.SeedTask{
TaskID: partialAndNotSupportCacheTask,
URL: noExpiredAndNotSupportURL,
task: &task.SeedTask{
ID: partialAndNotSupportCacheTask,
RawURL: noExpiredAndNotSupportURL,
TaskURL: noExpiredAndNotSupportURL,
SourceFileLength: 9789,
PieceSize: 2000,
@ -313,27 +313,27 @@ func (suite *CacheDetectorTestSuite) TestDetectCache() {
{
name: "full cache and not expire",
args: args{
task: &types.SeedTask{
TaskID: fullCacheNotExpiredTask,
URL: noExpiredAndNotSupportURL,
task: &task.SeedTask{
ID: fullCacheNotExpiredTask,
RawURL: noExpiredAndNotSupportURL,
TaskURL: noExpiredAndNotSupportURL,
SourceFileLength: 9789,
PieceSize: 2000,
},
},
want: &cacheResult{
breakPoint: -1,
pieceMetaRecords: fullPieceMetaRecords,
fileMetadata: newCompletedFileMeta(fullCacheNotExpiredTask, noExpiredAndNotSupportURL, true),
BreakPoint: -1,
PieceMetaRecords: fullPieceMetaRecords,
FileMetadata: newCompletedFileMeta(fullCacheNotExpiredTask, noExpiredAndNotSupportURL, true),
},
wantErr: false,
},
{
name: "full cache and expired",
args: args{
task: &types.SeedTask{
TaskID: fullCacheExpiredTask,
URL: expiredAndSupportURL,
task: &task.SeedTask{
ID: fullCacheExpiredTask,
RawURL: expiredAndSupportURL,
TaskURL: expiredAndNotSupportURL,
},
},
@ -369,9 +369,9 @@ func (suite *CacheDetectorTestSuite) TestParseByReadFile() {
metadata: partialSupportRangeCache.fileMeta,
},
want: &cacheResult{
breakPoint: 4000,
pieceMetaRecords: partialSupportRangeCache.pieces,
fileMetadata: partialSupportRangeCache.fileMeta,
BreakPoint: 4000,
PieceMetaRecords: partialSupportRangeCache.pieces,
FileMetadata: partialSupportRangeCache.fileMeta,
},
wantErr: false,
},
@ -403,9 +403,9 @@ func (suite *CacheDetectorTestSuite) TestParseByReadMetaFile() {
fileMetadata: fullNoExpiredCache.fileMeta,
},
want: &cacheResult{
breakPoint: -1,
pieceMetaRecords: fullNoExpiredCache.pieces,
fileMetadata: fullNoExpiredCache.fileMeta,
BreakPoint: -1,
PieceMetaRecords: fullNoExpiredCache.pieces,
FileMetadata: fullNoExpiredCache.fileMeta,
},
wantErr: false,
},

View File

@ -21,24 +21,26 @@ import (
"context"
"crypto/md5"
"encoding/json"
"fmt"
"io"
"sync"
"github.com/pkg/errors"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/types"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/ratelimiter/limitreader"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/util/digestutils"
"d7y.io/dragonfly/v2/pkg/util/rangeutils"
)
type piece struct {
taskID string
pieceNum int32
pieceSize int32
pieceNum uint32
pieceSize uint32
pieceContent *bytes.Buffer
}
@ -46,63 +48,66 @@ type downloadMetadata struct {
backSourceLength int64 // back to source download file length
realCdnFileLength int64 // the actual length of the stored file
realSourceFileLength int64 // actually read the length of the source
pieceTotalCount int32 // piece total count
totalPieceCount int32 // total number of pieces
pieceMd5Sign string
sourceRealDigest string
}
type cacheWriter struct {
cdnReporter *reporter
cacheDataManager *cacheDataManager
cacheStore storage.Manager
metadataManager *metadataManager
}
func newCacheWriter(cdnReporter *reporter, cacheDataManager *cacheDataManager) *cacheWriter {
func newCacheWriter(cdnReporter *reporter, metadataManager *metadataManager, cacheStore storage.Manager) *cacheWriter {
return &cacheWriter{
cdnReporter: cdnReporter,
cacheDataManager: cacheDataManager,
cacheStore: cacheStore,
metadataManager: metadataManager,
}
}
// startWriter writes the stream data from the reader to the underlying storage.
func (cw *cacheWriter) startWriter(ctx context.Context, reader io.Reader, task *types.SeedTask, detectResult *cacheResult) (*downloadMetadata, error) {
func (cw *cacheWriter) startWriter(ctx context.Context, reader *limitreader.LimitReader, seedTask *task.SeedTask, breakPoint int64) (*downloadMetadata,
error) {
var writeSpan trace.Span
ctx, writeSpan = tracer.Start(ctx, config.SpanWriteData)
ctx, writeSpan = tracer.Start(ctx, constants.SpanWriteData)
defer writeSpan.End()
if detectResult == nil {
detectResult = &cacheResult{}
}
// currentSourceFileLength is used to calculate the source file Length dynamically
currentSourceFileLength := detectResult.breakPoint
// the pieceNum currently have been processed
curPieceNum := len(detectResult.pieceMetaRecords)
routineCount := calculateRoutineCount(task.SourceFileLength-currentSourceFileLength, task.PieceSize)
writeSpan.SetAttributes(config.AttributeWriteGoroutineCount.Int(routineCount))
currentSourceFileLength := breakPoint
routineCount := calculateRoutineCount(seedTask.SourceFileLength-currentSourceFileLength, seedTask.PieceSize)
writeSpan.SetAttributes(constants.AttributeWriteGoroutineCount.Int(routineCount))
// start writer pool
backSourceLength, totalPieceCount, err := cw.doWrite(ctx, reader, task, routineCount, curPieceNum)
backSourceLength, totalPieceCount, err := cw.doWrite(ctx, reader, seedTask, routineCount, breakPoint)
if err != nil {
return &downloadMetadata{backSourceLength: backSourceLength}, fmt.Errorf("write data: %v", err)
return &downloadMetadata{backSourceLength: backSourceLength}, errors.Wrap(err, "do write data action")
}
storageInfo, err := cw.cacheDataManager.statDownloadFile(task.TaskID)
storageInfo, err := cw.cacheStore.StatDownloadFile(seedTask.ID)
if err != nil {
return &downloadMetadata{backSourceLength: backSourceLength}, fmt.Errorf("stat cdn download file: %v", err)
return &downloadMetadata{backSourceLength: backSourceLength}, errors.Wrap(err, "stat cdn download file")
}
storageInfoBytes, _ := json.Marshal(storageInfo)
writeSpan.SetAttributes(config.AttributeDownloadFileInfo.String(string(storageInfoBytes)))
writeSpan.SetAttributes(constants.AttributeDownloadFileInfo.String(string(storageInfoBytes)))
// TODO Try getting it from the ProgressManager first
pieceMd5Sign, _, err := cw.cacheDataManager.getPieceMd5Sign(task.TaskID)
pieceMd5Sign, _, err := cw.metadataManager.getPieceMd5Sign(seedTask.ID)
if err != nil {
return &downloadMetadata{backSourceLength: backSourceLength}, fmt.Errorf("get piece md5 sign: %v", err)
return &downloadMetadata{backSourceLength: backSourceLength}, errors.Wrap(err, "get piece md5 sign")
}
return &downloadMetadata{
backSourceLength: backSourceLength,
realCdnFileLength: storageInfo.Size,
realSourceFileLength: currentSourceFileLength + backSourceLength,
pieceTotalCount: int32(totalPieceCount),
totalPieceCount: totalPieceCount,
pieceMd5Sign: pieceMd5Sign,
sourceRealDigest: reader.Digest(),
}, nil
}
func (cw *cacheWriter) doWrite(ctx context.Context, reader io.Reader, task *types.SeedTask, routineCount int, curPieceNum int) (n int64, totalPiece int,
// doWrite do actual write data to storage
func (cw *cacheWriter) doWrite(ctx context.Context, reader io.Reader, seedTask *task.SeedTask, routineCount int, breakPoint int64) (n int64, totalPiece int32,
err error) {
// the pieceNum currently have been processed
curPieceNum := int32(breakPoint / int64(seedTask.PieceSize))
var bufPool = &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
@ -111,63 +116,72 @@ func (cw *cacheWriter) doWrite(ctx context.Context, reader io.Reader, task *type
var backSourceLength int64
buf := make([]byte, 256*1024)
jobCh := make(chan *piece)
var wg = &sync.WaitGroup{}
cw.writerPool(ctx, wg, routineCount, jobCh, bufPool)
var g, writeCtx = errgroup.WithContext(ctx)
cw.writerPool(writeCtx, g, routineCount, jobCh, bufPool)
loop:
for {
select {
case <-writeCtx.Done():
break loop
default:
var bb = bufPool.Get().(*bytes.Buffer)
bb.Reset()
limitReader := io.LimitReader(reader, int64(task.PieceSize))
limitReader := io.LimitReader(reader, int64(seedTask.PieceSize))
n, err = io.CopyBuffer(bb, limitReader, buf)
if err != nil {
close(jobCh)
return backSourceLength, 0, fmt.Errorf("read source taskID %s pieceNum %d piece: %v", task.TaskID, curPieceNum, err)
return backSourceLength, 0, errors.Errorf("read taskID %s pieceNum %d piece from source failed: %v", seedTask.ID, curPieceNum, err)
}
if n == 0 {
break
break loop
}
backSourceLength += n
jobCh <- &piece{
taskID: task.TaskID,
pieceNum: int32(curPieceNum),
pieceSize: task.PieceSize,
taskID: seedTask.ID,
pieceNum: uint32(curPieceNum),
pieceSize: uint32(seedTask.PieceSize),
pieceContent: bb,
}
curPieceNum++
if n < int64(task.PieceSize) {
break
if n < int64(seedTask.PieceSize) {
break loop
}
}
}
close(jobCh)
wg.Wait()
if err := g.Wait(); err != nil {
return backSourceLength, 0, errors.Wrapf(err, "write pool")
}
return backSourceLength, curPieceNum, nil
}
func (cw *cacheWriter) writerPool(ctx context.Context, wg *sync.WaitGroup, routineCount int, pieceCh chan *piece, bufPool *sync.Pool) {
wg.Add(routineCount)
func (cw *cacheWriter) writerPool(ctx context.Context, g *errgroup.Group, routineCount int, pieceCh chan *piece, bufPool *sync.Pool) {
for i := 0; i < routineCount; i++ {
go func() {
defer wg.Done()
g.Go(func() error {
for p := range pieceCh {
select {
case <-ctx.Done():
return ctx.Err()
default:
// TODO Subsequent compression and other features are implemented through waitToWriteContent and pieceStyle
waitToWriteContent := p.pieceContent
originPieceLen := waitToWriteContent.Len() // the length of the original data that has not been processed
pieceLen := originPieceLen // the real length written to the storage medium after processing
pieceStyle := types.PlainUnspecified
pieceLen := originPieceLen // the real length written to the storage driver after processed
pieceStyle := int32(base.PieceStyle_PLAIN.Number())
pieceMd5 := md5.New()
err := cw.cacheDataManager.writeDownloadFile(
err := cw.cacheStore.WriteDownloadFile(
p.taskID, int64(p.pieceNum)*int64(p.pieceSize), int64(waitToWriteContent.Len()),
io.TeeReader(io.LimitReader(p.pieceContent, int64(waitToWriteContent.Len())), pieceMd5))
if err != nil {
return errors.Errorf("write taskID %s pieceNum %d to download file failed: %v", p.taskID, p.pieceNum, err)
}
// Recycle Buffer
bufPool.Put(waitToWriteContent)
if err != nil {
logger.Errorf("write taskID %s pieceNum %d file: %v", p.taskID, p.pieceNum, err)
continue
}
start := uint64(p.pieceNum) * uint64(p.pieceSize)
end := start + uint64(pieceLen) - 1
pieceRecord := &storage.PieceMetaRecord{
PieceNum: uint32(p.pieceNum),
PieceNum: p.pieceNum,
PieceLen: uint32(pieceLen),
Md5: digestutils.ToHashString(pieceMd5),
Range: &rangeutils.Range{
@ -181,28 +195,28 @@ func (cw *cacheWriter) writerPool(ctx context.Context, wg *sync.WaitGroup, routi
PieceStyle: pieceStyle,
}
// write piece meta to storage
if err = cw.cacheDataManager.appendPieceMetadata(p.taskID, pieceRecord); err != nil {
logger.Errorf("write piece meta file: %v", err)
continue
if err = cw.metadataManager.appendPieceMetadata(p.taskID, pieceRecord); err != nil {
return errors.Errorf("write piece meta to piece meta file failed: %v", err)
}
if cw.cdnReporter != nil {
// report piece info
if err = cw.cdnReporter.reportPieceMetaRecord(ctx, p.taskID, pieceRecord, DownloaderReport); err != nil {
// NOTE: should we do this job again?
logger.Errorf("report piece status, pieceNum %d pieceMetaRecord %s: %v", p.pieceNum, pieceRecord, err)
return errors.Errorf("report piece status, pieceNum %d pieceMetaRecord %s: %v", p.pieceNum, pieceRecord, err)
}
}
}
}()
return nil
})
}
}
/*
helper functions
max goroutine count is CDNWriterRoutineLimit
*/
// calculateRoutineCount max goroutine count is CDNWriterRoutineLimit
func calculateRoutineCount(remainingFileLength int64, pieceSize int32) int {
routineSize := config.CDNWriterRoutineLimit
routineSize := constants.CDNWriterRoutineLimit
if remainingFileLength < 0 || pieceSize <= 0 {
return routineSize
}

View File

@ -20,22 +20,24 @@ import (
"bufio"
"context"
"fmt"
"io"
"os"
"strings"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/suite"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/storedriver/local"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/disk"
"d7y.io/dragonfly/v2/cdn/supervisor/progress"
"d7y.io/dragonfly/v2/cdn/types"
progressMock "d7y.io/dragonfly/v2/cdn/supervisor/mocks/progress"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/ratelimiter/limitreader"
"d7y.io/dragonfly/v2/pkg/unit"
)
@ -63,12 +65,12 @@ func NewPlugins(workHome string) map[plugins.PluginType][]*plugins.PluginPropert
{
Name: disk.StorageMode,
Enable: true,
Config: &storage.Config{
Config: &config.StorageConfig{
GCInitialDelay: 0 * time.Second,
GCInterval: 15 * time.Second,
DriverConfigs: map[string]*storage.DriverConfig{
DriverConfigs: map[string]*config.DriverConfig{
local.DiskDriverName: {
GCConfig: &storage.GCConfig{
GCConfig: &config.GCConfig{
YoungGCThreshold: 100 * unit.GB,
FullGCThreshold: 5 * unit.GB,
CleanRatio: 1,
@ -85,14 +87,17 @@ func (suite *CacheWriterTestSuite) SetupSuite() {
suite.workHome, _ = os.MkdirTemp("/tmp", "cdn-CacheWriterDetectorTestSuite-")
suite.T().Log("workHome:", suite.workHome)
suite.Nil(plugins.Initialize(NewPlugins(suite.workHome)))
storeMgr, ok := storage.Get(config.DefaultStorageMode)
ctrl := gomock.NewController(suite.T())
progressManager := progressMock.NewMockManager(ctrl)
progressManager.EXPECT().PublishPiece(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
progressManager.EXPECT().PublishTask(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
storeManager, ok := storage.Get(constants.DefaultStorageMode)
if !ok {
suite.Failf("failed to get storage mode %s", config.DefaultStorageMode)
suite.Failf("failed to get storage mode %s", constants.DefaultStorageMode)
}
cacheDataManager := newCacheDataManager(storeMgr)
progressMgr, _ := progress.NewManager()
cdnReporter := newReporter(progressMgr)
suite.writer = newCacheWriter(cdnReporter, cacheDataManager)
metadataManager := newMetadataManager(storeManager)
cdnReporter := newReporter(progressManager)
suite.writer = newCacheWriter(cdnReporter, metadataManager, storeManager)
}
func (suite *CacheWriterTestSuite) TearDownSuite() {
@ -108,9 +113,9 @@ func (suite *CacheWriterTestSuite) TestStartWriter() {
suite.Nil(err)
contentLen := int64(len(content))
type args struct {
reader io.Reader
task *types.SeedTask
detectResult *cacheResult
reader *limitreader.LimitReader
task *task.SeedTask
breakPoint int64
}
tests := []struct {
@ -122,9 +127,9 @@ func (suite *CacheWriterTestSuite) TestStartWriter() {
{
name: "write with nil detectResult",
args: args{
reader: bufio.NewReader(strings.NewReader(string(content))),
task: &types.SeedTask{
TaskID: "5806501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e9e",
reader: limitreader.NewLimitReader(bufio.NewReader(strings.NewReader(string(content))), 100),
task: &task.SeedTask{
ID: "5806501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e9e",
PieceSize: 50,
},
},
@ -132,67 +137,56 @@ func (suite *CacheWriterTestSuite) TestStartWriter() {
backSourceLength: contentLen,
realCdnFileLength: contentLen,
realSourceFileLength: contentLen,
pieceTotalCount: int32((contentLen + 49) / 50),
totalPieceCount: int32((contentLen + 49) / 50),
pieceMd5Sign: "3f4585787609b0d7d4c9fc800db61655a74494f83507c8acd2818d0461d9cdc5",
},
}, {
name: "write with non nil detectResult",
args: args{
reader: bufio.NewReader(strings.NewReader(string(content))),
task: &types.SeedTask{
TaskID: "5816501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e9e",
reader: limitreader.NewLimitReader(bufio.NewReader(strings.NewReader(string(content))), 100),
task: &task.SeedTask{
ID: "5816501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e9e",
PieceSize: 50,
},
detectResult: &cacheResult{
breakPoint: 0,
pieceMetaRecords: nil,
fileMetadata: nil,
},
},
result: &downloadMetadata{
backSourceLength: contentLen,
realCdnFileLength: contentLen,
realSourceFileLength: contentLen,
pieceTotalCount: int32((contentLen + 49) / 50),
totalPieceCount: int32((contentLen + 49) / 50),
pieceMd5Sign: "3f4585787609b0d7d4c9fc800db61655a74494f83507c8acd2818d0461d9cdc5",
},
}, {
name: "write with task length",
args: args{
reader: bufio.NewReader(strings.NewReader(string(content))),
task: &types.SeedTask{
TaskID: "5826501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e93",
reader: limitreader.NewLimitReader(bufio.NewReader(strings.NewReader(string(content))), 100),
task: &task.SeedTask{
ID: "5826501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e93",
PieceSize: 50,
SourceFileLength: contentLen,
},
detectResult: &cacheResult{
breakPoint: 0,
pieceMetaRecords: nil,
fileMetadata: nil,
},
},
result: &downloadMetadata{
backSourceLength: contentLen,
realCdnFileLength: contentLen,
realSourceFileLength: contentLen,
pieceTotalCount: int32((contentLen + 49) / 50),
totalPieceCount: int32((contentLen + 49) / 50),
pieceMd5Sign: "3f4585787609b0d7d4c9fc800db61655a74494f83507c8acd2818d0461d9cdc5",
},
},
}
for _, tt := range tests {
suite.Run(tt.name, func() {
suite.writer.cdnReporter.progress.InitSeedProgress(context.Background(), tt.args.task.TaskID)
downloadMetadata, err := suite.writer.startWriter(context.Background(), tt.args.reader, tt.args.task, tt.args.detectResult)
downloadMetadata, err := suite.writer.startWriter(context.Background(), tt.args.reader, tt.args.task, tt.args.breakPoint)
suite.Equal(tt.wantErr, err != nil)
suite.Equal(tt.result, downloadMetadata)
suite.checkFileSize(suite.writer.cacheDataManager, tt.args.task.TaskID, contentLen)
suite.checkFileSize(suite.writer.cacheStore, tt.args.task.ID, contentLen)
})
}
}
func (suite *CacheWriterTestSuite) checkFileSize(cacheDataMgr *cacheDataManager, taskID string, expectedSize int64) {
storageInfo, err := cacheDataMgr.statDownloadFile(taskID)
func (suite *CacheWriterTestSuite) checkFileSize(cacheStore storage.Manager, taskID string, expectedSize int64) {
storageInfo, err := cacheStore.StatDownloadFile(taskID)
suite.Nil(err)
suite.Equal(expectedSize, storageInfo.Size)
}

View File

@ -23,24 +23,24 @@ import (
"github.com/pkg/errors"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/util/rangeutils"
"d7y.io/dragonfly/v2/pkg/util/stringutils"
)
func (cm *Manager) download(ctx context.Context, task *types.SeedTask, breakPoint int64) (io.ReadCloser, error) {
func (cm *manager) download(ctx context.Context, seedTask *task.SeedTask, breakPoint int64) (io.ReadCloser, error) {
var err error
breakRange := task.Range
breakRange := seedTask.Range
if breakPoint > 0 {
// todo replace task.SourceFileLength with totalSourceFileLength to get BreakRange
breakRange, err = getBreakRange(breakPoint, task.Range, task.SourceFileLength)
breakRange, err = getBreakRange(breakPoint, seedTask.Range, seedTask.SourceFileLength)
if err != nil {
return nil, errors.Wrapf(err, "calculate the breakRange")
}
}
task.Log().Infof("start downloading URL %s at range %s with header %s", task.URL, breakRange, task.Header)
downloadRequest, err := source.NewRequestWithContext(ctx, task.URL, task.Header)
seedTask.Log().Infof("start downloading URL %s at range %s with header %s", seedTask.RawURL, breakRange, seedTask.Header)
downloadRequest, err := source.NewRequestWithContext(ctx, seedTask.RawURL, seedTask.Header)
if err != nil {
return nil, errors.Wrap(err, "create download request")
}
@ -50,7 +50,7 @@ func (cm *Manager) download(ctx context.Context, task *types.SeedTask, breakPoin
body, expireInfo, err := source.DownloadWithExpireInfo(downloadRequest)
// update Expire info
if err == nil {
cm.updateExpireInfo(task.TaskID, map[string]string{
cm.updateExpireInfo(seedTask.ID, map[string]string{
source.LastModified: expireInfo.LastModified,
source.ETag: expireInfo.ETag,
})

View File

@ -19,6 +19,7 @@ package cdn
import (
"context"
"crypto/md5"
"encoding/json"
"fmt"
"time"
@ -27,11 +28,12 @@ import (
"go.opentelemetry.io/otel/trace"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
_ "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/disk" // nolint
_ "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/hybrid" // nolint
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/cdn/supervisor/progress"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/ratelimiter/limitreader"
"d7y.io/dragonfly/v2/pkg/ratelimiter/ratelimiter"
@ -42,128 +44,150 @@ import (
"d7y.io/dragonfly/v2/pkg/util/timeutils"
)
// Ensure that Manager implements the CDNMgr interface
var _ supervisor.CDNMgr = (*Manager)(nil)
// Manager as an interface defines all operations against CDN and
// operates on the underlying files stored on the local disk, etc.
type Manager interface {
var tracer trace.Tracer
// TriggerCDN will trigger the download resource from sourceURL.
TriggerCDN(context.Context, *task.SeedTask) (*task.SeedTask, error)
func init() {
tracer = otel.Tracer("cdn-server")
// Delete the cdn meta with specified taskID.
// The file on the disk will be deleted when the force is true.
Delete(taskID string) error
// TryFreeSpace checks if the free space of the storage is larger than the fileLength.
TryFreeSpace(fileLength int64) (bool, error)
}
// Manager is an implementation of the interface of CDNMgr.
type Manager struct {
// Ensure that Manager implements the CDNManager interface
var _ Manager = (*manager)(nil)
var tracer = otel.Tracer("cdn-server")
// Manager is an implementation of the interface of Manager.
type manager struct {
cfg *config.Config
cacheStore storage.Manager
limiter *ratelimiter.RateLimiter
cdnLocker *synclock.LockerPool
cacheDataManager *cacheDataManager
progressMgr supervisor.SeedProgressMgr
metadataManager *metadataManager
progressManager progress.Manager
taskManager task.Manager
cdnReporter *reporter
detector *cacheDetector
writer *cacheWriter
}
// NewManager returns a new Manager.
func NewManager(cfg *config.Config, cacheStore storage.Manager, progressMgr supervisor.SeedProgressMgr) (supervisor.CDNMgr, error) {
return newManager(cfg, cacheStore, progressMgr)
func NewManager(cfg *config.Config, cacheStore storage.Manager, progressManager progress.Manager,
taskManager task.Manager) (Manager, error) {
return newManager(cfg, cacheStore, progressManager, taskManager)
}
func newManager(cfg *config.Config, cacheStore storage.Manager, progressMgr supervisor.SeedProgressMgr) (*Manager, error) {
func newManager(cfg *config.Config, cacheStore storage.Manager, progressManager progress.Manager, taskManager task.Manager) (Manager, error) {
rateLimiter := ratelimiter.NewRateLimiter(ratelimiter.TransRate(int64(cfg.MaxBandwidth-cfg.SystemReservedBandwidth)), 2)
cacheDataManager := newCacheDataManager(cacheStore)
cdnReporter := newReporter(progressMgr)
return &Manager{
metadataManager := newMetadataManager(cacheStore)
cdnReporter := newReporter(progressManager)
return &manager{
cfg: cfg,
cacheStore: cacheStore,
limiter: rateLimiter,
cdnLocker: synclock.NewLockerPool(),
cacheDataManager: cacheDataManager,
metadataManager: metadataManager,
cdnReporter: cdnReporter,
progressMgr: progressMgr,
detector: newCacheDetector(cacheDataManager),
writer: newCacheWriter(cdnReporter, cacheDataManager),
progressManager: progressManager,
taskManager: taskManager,
detector: newCacheDetector(metadataManager, cacheStore),
writer: newCacheWriter(cdnReporter, metadataManager, cacheStore),
cdnLocker: synclock.NewLockerPool(),
}, nil
}
func (cm *Manager) TriggerCDN(ctx context.Context, task *types.SeedTask) (seedTask *types.SeedTask, err error) {
func (cm *manager) TriggerCDN(ctx context.Context, seedTask *task.SeedTask) (*task.SeedTask, error) {
updateTaskInfo, err := cm.doTrigger(ctx, seedTask)
if err != nil {
seedTask.Log().Errorf("failed to trigger cdn: %v", err)
// todo source not reach error SOURCE_ERROR
updateTaskInfo = getUpdateTaskInfoWithStatusOnly(seedTask, task.StatusFailed)
}
err = cm.progressManager.PublishTask(ctx, seedTask.ID, updateTaskInfo)
return updateTaskInfo, err
}
func (cm *manager) doTrigger(ctx context.Context, seedTask *task.SeedTask) (*task.SeedTask, error) {
var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanTriggerCDN)
ctx, span = tracer.Start(ctx, constants.SpanTriggerCDN)
defer span.End()
tempTask := *task
seedTask = &tempTask
// obtain taskId write lock
cm.cdnLocker.Lock(task.TaskID, false)
defer cm.cdnLocker.UnLock(task.TaskID, false)
cm.cdnLocker.Lock(seedTask.ID, false)
defer cm.cdnLocker.UnLock(seedTask.ID, false)
var fileDigest = md5.New()
var digestType = digestutils.Md5Hash.String()
if !stringutils.IsBlank(task.RequestDigest) {
requestDigest := digestutils.Parse(task.RequestDigest)
if !stringutils.IsBlank(seedTask.Digest) {
requestDigest := digestutils.Parse(seedTask.Digest)
digestType = requestDigest[0]
fileDigest = digestutils.CreateHash(digestType)
}
// first: detect Cache
detectResult, err := cm.detector.detectCache(ctx, task, fileDigest)
detectResult, err := cm.detector.detectCache(ctx, seedTask, fileDigest)
if err != nil {
seedTask.UpdateStatus(types.TaskInfoCdnStatusFailed)
return seedTask, errors.Wrapf(err, "failed to detect cache")
return nil, errors.Wrap(err, "detect task cache")
}
span.SetAttributes(config.AttributeCacheResult.String(detectResult.String()))
task.Log().Debugf("detects cache result: %#v", detectResult)
// second: report detect result
err = cm.cdnReporter.reportCache(ctx, task.TaskID, detectResult)
jsonResult, err := json.Marshal(detectResult)
if err != nil {
task.Log().Errorf("failed to report cache, reset detectResult: %v", err)
return nil, errors.Wrapf(err, "json marshal detectResult: %#v", detectResult)
}
seedTask.Log().Debugf("detects cache result: %s", jsonResult)
// second: report detect result
err = cm.cdnReporter.reportDetectResult(ctx, seedTask.ID, detectResult)
if err != nil {
seedTask.Log().Errorf("failed to report detect cache result: %v", err)
return nil, errors.Wrapf(err, "report detect cache result")
}
// full cache
if detectResult.breakPoint == -1 {
task.Log().Infof("cache full hit on local")
seedTask.UpdateTaskInfo(types.TaskInfoCdnStatusSuccess, detectResult.fileMetadata.SourceRealDigest, detectResult.fileMetadata.PieceMd5Sign,
detectResult.fileMetadata.SourceFileLen, detectResult.fileMetadata.CdnFileLength)
return seedTask, nil
if detectResult.BreakPoint == -1 {
seedTask.Log().Infof("cache full hit on local")
return getUpdateTaskInfo(seedTask, task.StatusSuccess, detectResult.FileMetadata.SourceRealDigest, detectResult.FileMetadata.PieceMd5Sign,
detectResult.FileMetadata.SourceFileLen, detectResult.FileMetadata.CdnFileLength, detectResult.FileMetadata.TotalPieceCount), nil
}
server.StatSeedStart(task.TaskID, task.URL)
start := time.Now()
// third: start to download the source file
var downloadSpan trace.Span
ctx, downloadSpan = tracer.Start(ctx, config.SpanDownloadSource)
ctx, downloadSpan = tracer.Start(ctx, constants.SpanDownloadSource)
downloadSpan.End()
body, err := cm.download(ctx, task, detectResult.breakPoint)
server.StatSeedStart(seedTask.ID, seedTask.RawURL)
respBody, err := cm.download(ctx, seedTask, detectResult.BreakPoint)
// download fail
if err != nil {
downloadSpan.RecordError(err)
server.StatSeedFinish(task.TaskID, task.URL, false, err, start, time.Now(), 0, 0)
seedTask.UpdateStatus(types.TaskInfoCdnStatusSourceError)
return seedTask, err
server.StatSeedFinish(seedTask.ID, seedTask.RawURL, false, err, start, time.Now(), 0, 0)
return nil, errors.Wrap(err, "download task file data")
}
defer body.Close()
reader := limitreader.NewLimitReaderWithLimiterAndDigest(body, cm.limiter, fileDigest, digestutils.Algorithms[digestType])
defer respBody.Close()
reader := limitreader.NewLimitReaderWithLimiterAndDigest(respBody, cm.limiter, fileDigest, digestutils.Algorithms[digestType])
// forth: write to storage
downloadMetadata, err := cm.writer.startWriter(ctx, reader, task, detectResult)
downloadMetadata, err := cm.writer.startWriter(ctx, reader, seedTask, detectResult.BreakPoint)
if err != nil {
server.StatSeedFinish(task.TaskID, task.URL, false, err, start, time.Now(), downloadMetadata.backSourceLength,
server.StatSeedFinish(seedTask.ID, seedTask.RawURL, false, err, start, time.Now(), downloadMetadata.backSourceLength,
downloadMetadata.realSourceFileLength)
task.Log().Errorf("failed to write for task: %v", err)
seedTask.UpdateStatus(types.TaskInfoCdnStatusFailed)
return seedTask, err
return nil, errors.Wrap(err, "write task file data")
}
server.StatSeedFinish(task.TaskID, task.URL, true, nil, start, time.Now(), downloadMetadata.backSourceLength,
server.StatSeedFinish(seedTask.ID, seedTask.RawURL, true, nil, start, time.Now(), downloadMetadata.backSourceLength,
downloadMetadata.realSourceFileLength)
sourceDigest := reader.Digest()
// fifth: handle CDN result
success, err := cm.handleCDNResult(task, sourceDigest, downloadMetadata)
if err != nil || !success {
seedTask.UpdateStatus(types.TaskInfoCdnStatusFailed)
return seedTask, err
err = cm.handleCDNResult(seedTask, downloadMetadata)
if err != nil {
return nil, err
}
seedTask.UpdateTaskInfo(types.TaskInfoCdnStatusSuccess, sourceDigest, downloadMetadata.pieceMd5Sign,
downloadMetadata.realSourceFileLength, downloadMetadata.realCdnFileLength)
return seedTask, nil
return getUpdateTaskInfo(seedTask, task.StatusSuccess, downloadMetadata.sourceRealDigest, downloadMetadata.pieceMd5Sign,
downloadMetadata.realSourceFileLength, downloadMetadata.realCdnFileLength, downloadMetadata.totalPieceCount), nil
}
func (cm *Manager) Delete(taskID string) error {
func (cm *manager) Delete(taskID string) error {
cm.cdnLocker.Lock(taskID, false)
defer cm.cdnLocker.UnLock(taskID, false)
err := cm.cacheStore.DeleteTask(taskID)
if err != nil {
return errors.Wrap(err, "failed to delete task files")
@ -171,67 +195,76 @@ func (cm *Manager) Delete(taskID string) error {
return nil
}
func (cm *Manager) TryFreeSpace(fileLength int64) (bool, error) {
func (cm *manager) TryFreeSpace(fileLength int64) (bool, error) {
return cm.cacheStore.TryFreeSpace(fileLength)
}
func (cm *Manager) handleCDNResult(task *types.SeedTask, sourceDigest string, downloadMetadata *downloadMetadata) (bool, error) {
task.Log().Debugf("handle cdn result, downloadMetadata: %#v", downloadMetadata)
var isSuccess = true
var errorMsg string
// TODO Different error representations are returned to the caller
func (cm *manager) handleCDNResult(seedTask *task.SeedTask, downloadMetadata *downloadMetadata) error {
seedTask.Log().Debugf("handle cdn result, downloadMetadata: %#v", downloadMetadata)
var success = true
var errMsg string
// check md5
if !stringutils.IsBlank(task.RequestDigest) && task.RequestDigest != sourceDigest {
errorMsg = fmt.Sprintf("file digest not match expected: %s real: %s", task.RequestDigest, sourceDigest)
isSuccess = false
if !stringutils.IsBlank(seedTask.Digest) && seedTask.Digest != downloadMetadata.sourceRealDigest {
errMsg = fmt.Sprintf("file digest not match expected: %s real: %s", seedTask.Digest, downloadMetadata.sourceRealDigest)
success = false
}
// check source length
if isSuccess && task.SourceFileLength >= 0 && task.SourceFileLength != downloadMetadata.realSourceFileLength {
errorMsg = fmt.Sprintf("file length not match expected: %d real: %d", task.SourceFileLength, downloadMetadata.realSourceFileLength)
isSuccess = false
if success && seedTask.SourceFileLength >= 0 && seedTask.SourceFileLength != downloadMetadata.realSourceFileLength {
errMsg = fmt.Sprintf("file length not match expected: %d real: %d", seedTask.SourceFileLength, downloadMetadata.realSourceFileLength)
success = false
}
if isSuccess && task.PieceTotal > 0 && downloadMetadata.pieceTotalCount != task.PieceTotal {
errorMsg = fmt.Sprintf("task total piece count not match expected: %d real: %d", task.PieceTotal, downloadMetadata.pieceTotalCount)
isSuccess = false
if success && seedTask.TotalPieceCount > 0 && downloadMetadata.totalPieceCount != seedTask.TotalPieceCount {
errMsg = fmt.Sprintf("task total piece count not match expected: %d real: %d", seedTask.TotalPieceCount, downloadMetadata.totalPieceCount)
success = false
}
sourceFileLen := task.SourceFileLength
if isSuccess && task.SourceFileLength <= 0 {
sourceFileLen := seedTask.SourceFileLength
if success && seedTask.SourceFileLength <= 0 {
sourceFileLen = downloadMetadata.realSourceFileLength
}
cdnFileLength := downloadMetadata.realCdnFileLength
pieceMd5Sign := downloadMetadata.pieceMd5Sign
// if validate fail
if !isSuccess {
cdnFileLength = 0
}
if err := cm.cacheDataManager.updateStatusAndResult(task.TaskID, &storage.FileMetadata{
if err := cm.metadataManager.updateStatusAndResult(seedTask.ID, &storage.FileMetadata{
Finish: true,
Success: isSuccess,
SourceRealDigest: sourceDigest,
PieceMd5Sign: pieceMd5Sign,
CdnFileLength: cdnFileLength,
Success: success,
SourceFileLen: sourceFileLen,
TotalPieceCount: downloadMetadata.pieceTotalCount,
CdnFileLength: downloadMetadata.realCdnFileLength,
SourceRealDigest: downloadMetadata.sourceRealDigest,
TotalPieceCount: downloadMetadata.totalPieceCount,
PieceMd5Sign: downloadMetadata.pieceMd5Sign,
}); err != nil {
return false, errors.Wrap(err, "failed to update task status and result")
return errors.Wrapf(err, "update metadata")
}
if !isSuccess {
return false, errors.New(errorMsg)
if !success {
return errors.New(errMsg)
}
task.Log().Infof("success to get task, downloadMetadata: %#v realDigest: %s", downloadMetadata, sourceDigest)
return true, nil
return nil
}
func (cm *Manager) updateExpireInfo(taskID string, expireInfo map[string]string) {
if err := cm.cacheDataManager.updateExpireInfo(taskID, expireInfo); err != nil {
func (cm *manager) updateExpireInfo(taskID string, expireInfo map[string]string) {
if err := cm.metadataManager.updateExpireInfo(taskID, expireInfo); err != nil {
logger.WithTaskID(taskID).Errorf("failed to update expireInfo(%s): %v", expireInfo, err)
}
logger.WithTaskID(taskID).Infof("success to update expireInfo(%s)", expireInfo)
logger.WithTaskID(taskID).Debugf("success to update metadata expireInfo(%s)", expireInfo)
}
/*
helper functions
*/
var getCurrentTimeMillisFunc = timeutils.CurrentTimeMillis
func getUpdateTaskInfoWithStatusOnly(seedTask *task.SeedTask, cdnStatus string) *task.SeedTask {
cloneTask := seedTask.Clone()
cloneTask.CdnStatus = cdnStatus
return cloneTask
}
func getUpdateTaskInfo(seedTask *task.SeedTask, cdnStatus, realMD5, pieceMd5Sign string, sourceFileLength, cdnFileLength int64,
totalPieceCount int32) *task.SeedTask {
cloneTask := seedTask.Clone()
cloneTask.SourceFileLength = sourceFileLength
cloneTask.CdnFileLength = cdnFileLength
cloneTask.CdnStatus = cdnStatus
cloneTask.TotalPieceCount = totalPieceCount
cloneTask.SourceRealDigest = realMD5
cloneTask.PieceMd5Sign = pieceMd5Sign
return cloneTask
}

View File

@ -28,10 +28,13 @@ import (
"github.com/stretchr/testify/suite"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/mock"
"d7y.io/dragonfly/v2/cdn/types"
_ "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/disk"
progressMock "d7y.io/dragonfly/v2/cdn/supervisor/mocks/progress"
taskMock "d7y.io/dragonfly/v2/cdn/supervisor/mocks/task"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/internal/idgen"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/source"
@ -48,7 +51,7 @@ func TestCDNManagerSuite(t *testing.T) {
type CDNManagerTestSuite struct {
workHome string
cm *Manager
cm Manager
suite.Suite
}
@ -56,21 +59,24 @@ func (suite *CDNManagerTestSuite) SetupSuite() {
suite.workHome, _ = os.MkdirTemp("/tmp", "cdn-ManagerTestSuite-")
fmt.Printf("workHome: %s", suite.workHome)
suite.Nil(plugins.Initialize(NewPlugins(suite.workHome)))
storeMgr, ok := storage.Get(config.DefaultStorageMode)
storeMgr, ok := storage.Get(constants.DefaultStorageMode)
if !ok {
suite.Failf("failed to get storage mode %s", config.DefaultStorageMode)
suite.Failf("failed to get storage mode %s", constants.DefaultStorageMode)
}
ctrl := gomock.NewController(suite.T())
progressMgr := mock.NewMockSeedProgressMgr(ctrl)
progressMgr.EXPECT().PublishPiece(gomock.Any(), md5TaskID, gomock.Any()).Return(nil).Times(98 * 2)
progressMgr.EXPECT().PublishPiece(gomock.Any(), sha256TaskID, gomock.Any()).Return(nil).Times(98 * 2)
suite.cm, _ = newManager(config.New(), storeMgr, progressMgr)
taskManager := taskMock.NewMockManager(ctrl)
progressManager := progressMock.NewMockManager(ctrl)
progressManager.EXPECT().PublishPiece(gomock.Any(), md5TaskID, gomock.Any()).Return(nil).Times(98 * 2)
progressManager.EXPECT().PublishPiece(gomock.Any(), sha256TaskID, gomock.Any()).Return(nil).Times(98 * 2)
progressManager.EXPECT().PublishTask(gomock.Any(), md5TaskID, gomock.Any()).Return(nil).Times(2)
progressManager.EXPECT().PublishTask(gomock.Any(), sha256TaskID, gomock.Any()).Return(nil).Times(2)
suite.cm, _ = newManager(config.New(), storeMgr, progressManager, taskManager)
}
var (
dragonflyURL = "http://dragonfly.io.com?a=a&b=b&c=c"
md5TaskID = idgen.TaskID(dragonflyURL, &base.UrlMeta{Digest: "md5:f1e2488bba4d1267948d9e2f7008571c", Tag: "dragonfly", Filter: "a&b"})
sha256TaskID = idgen.TaskID(dragonflyURL, &base.UrlMeta{Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5", Tag: "dragonfly", Filter: "a&b"})
dragonflyRawURL = "http://dragonfly.io.com?a=a&b=b&c=c"
md5TaskID = idgen.TaskID(dragonflyRawURL, &base.UrlMeta{Digest: "md5:f1e2488bba4d1267948d9e2f7008571c", Tag: "dragonfly", Filter: "a&b"})
sha256TaskID = idgen.TaskID(dragonflyRawURL, &base.UrlMeta{Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5", Tag: "dragonfly", Filter: "a&b"})
)
func (suite *CDNManagerTestSuite) TearDownSuite() {
@ -133,67 +139,67 @@ func (suite *CDNManagerTestSuite) TestTriggerCDN() {
tests := []struct {
name string
sourceTask *types.SeedTask
targetTask *types.SeedTask
sourceTask *task.SeedTask
targetTask *task.SeedTask
}{
{
name: "trigger_md5",
sourceTask: &types.SeedTask{
TaskID: md5TaskID,
URL: dragonflyURL,
TaskURL: urlutils.FilterURLParam(dragonflyURL, []string{"a", "b"}),
sourceTask: &task.SeedTask{
ID: md5TaskID,
RawURL: dragonflyRawURL,
TaskURL: urlutils.FilterURLParam(dragonflyRawURL, []string{"a", "b"}),
SourceFileLength: 9789,
CdnFileLength: 0,
PieceSize: 100,
Header: map[string]string{"md5": "f1e2488bba4d1267948d9e2f7008571c"},
CdnStatus: types.TaskInfoCdnStatusRunning,
PieceTotal: 0,
RequestDigest: "md5:f1e2488bba4d1267948d9e2f7008571c",
CdnStatus: task.StatusRunning,
TotalPieceCount: 98,
Digest: "md5:f1e2488bba4d1267948d9e2f7008571c",
SourceRealDigest: "",
PieceMd5Sign: "",
},
targetTask: &types.SeedTask{
TaskID: md5TaskID,
URL: dragonflyURL,
TaskURL: urlutils.FilterURLParam(dragonflyURL, []string{"a", "b"}),
targetTask: &task.SeedTask{
ID: md5TaskID,
RawURL: dragonflyRawURL,
TaskURL: urlutils.FilterURLParam(dragonflyRawURL, []string{"a", "b"}),
SourceFileLength: 9789,
CdnFileLength: 9789,
PieceSize: 100,
Header: map[string]string{"md5": "f1e2488bba4d1267948d9e2f7008571c"},
CdnStatus: types.TaskInfoCdnStatusSuccess,
PieceTotal: 0,
RequestDigest: "md5:f1e2488bba4d1267948d9e2f7008571c",
CdnStatus: task.StatusSuccess,
TotalPieceCount: 98,
Digest: "md5:f1e2488bba4d1267948d9e2f7008571c",
SourceRealDigest: "md5:f1e2488bba4d1267948d9e2f7008571c",
PieceMd5Sign: "bb138842f338fff90af737e4a6b2c6f8e2a7031ca9d5900bc9b646f6406d890f",
},
},
{
name: "trigger_sha256",
sourceTask: &types.SeedTask{
TaskID: sha256TaskID,
URL: dragonflyURL,
TaskURL: urlutils.FilterURLParam(dragonflyURL, []string{"a", "b"}),
sourceTask: &task.SeedTask{
ID: sha256TaskID,
RawURL: dragonflyRawURL,
TaskURL: urlutils.FilterURLParam(dragonflyRawURL, []string{"a", "b"}),
SourceFileLength: 9789,
CdnFileLength: 0,
PieceSize: 100,
Header: map[string]string{"sha256": "b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5"},
CdnStatus: types.TaskInfoCdnStatusRunning,
PieceTotal: 0,
RequestDigest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5",
CdnStatus: task.StatusRunning,
TotalPieceCount: 98,
Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5",
SourceRealDigest: "",
PieceMd5Sign: "",
},
targetTask: &types.SeedTask{
TaskID: sha256TaskID,
URL: dragonflyURL,
TaskURL: urlutils.FilterURLParam(dragonflyURL, []string{"a", "b"}),
targetTask: &task.SeedTask{
ID: sha256TaskID,
RawURL: dragonflyRawURL,
TaskURL: urlutils.FilterURLParam(dragonflyRawURL, []string{"a", "b"}),
SourceFileLength: 9789,
CdnFileLength: 9789,
PieceSize: 100,
Header: map[string]string{"sha256": "b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5"},
CdnStatus: types.TaskInfoCdnStatusSuccess,
PieceTotal: 0,
RequestDigest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5",
CdnStatus: task.StatusSuccess,
TotalPieceCount: 98,
Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5",
SourceRealDigest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5",
PieceMd5Sign: "bb138842f338fff90af737e4a6b2c6f8e2a7031ca9d5900bc9b646f6406d890f",
},
@ -204,10 +210,10 @@ func (suite *CDNManagerTestSuite) TestTriggerCDN() {
suite.Run(tt.name, func() {
gotSeedTask, err := suite.cm.TriggerCDN(context.Background(), tt.sourceTask)
suite.Nil(err)
suite.Equal(tt.targetTask, gotSeedTask)
suite.True(task.IsEqual(*tt.targetTask, *gotSeedTask))
cacheSeedTask, err := suite.cm.TriggerCDN(context.Background(), gotSeedTask)
suite.Nil(err)
suite.Equal(tt.targetTask, cacheSeedTask)
suite.True(task.IsEqual(*tt.targetTask, *cacheSeedTask))
})
}

View File

@ -17,15 +17,12 @@
package cdn
import (
"fmt"
"io"
"sort"
"github.com/pkg/errors"
"d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/synclock"
"d7y.io/dragonfly/v2/pkg/util/digestutils"
@ -33,42 +30,46 @@ import (
"d7y.io/dragonfly/v2/pkg/util/timeutils"
)
// cacheDataManager manages the meta file and piece meta file of each TaskId.
type cacheDataManager struct {
// metadataManager manages the meta file and piece meta file of each TaskID.
type metadataManager struct {
storage storage.Manager
cacheLocker *synclock.LockerPool
}
func newCacheDataManager(storeMgr storage.Manager) *cacheDataManager {
return &cacheDataManager{
storeMgr,
func newMetadataManager(storageManager storage.Manager) *metadataManager {
return &metadataManager{
storageManager,
synclock.NewLockerPool(),
}
}
// writeFileMetadataByTask stores the metadata of task by task to storage.
func (mm *cacheDataManager) writeFileMetadataByTask(task *types.SeedTask) (*storage.FileMetadata, error) {
mm.cacheLocker.Lock(task.TaskID, false)
defer mm.cacheLocker.UnLock(task.TaskID, false)
// writeFileMetadataByTask stores metadata of task
func (mm *metadataManager) writeFileMetadataByTask(seedTask *task.SeedTask) (*storage.FileMetadata, error) {
mm.cacheLocker.Lock(seedTask.ID, false)
defer mm.cacheLocker.UnLock(seedTask.ID, false)
metadata := &storage.FileMetadata{
TaskID: task.TaskID,
TaskURL: task.TaskURL,
PieceSize: task.PieceSize,
SourceFileLen: task.SourceFileLength,
TaskID: seedTask.ID,
TaskURL: seedTask.TaskURL,
PieceSize: seedTask.PieceSize,
SourceFileLen: seedTask.SourceFileLength,
AccessTime: getCurrentTimeMillisFunc(),
CdnFileLength: task.CdnFileLength,
TotalPieceCount: task.PieceTotal,
CdnFileLength: seedTask.CdnFileLength,
Digest: seedTask.Digest,
Tag: seedTask.Tag,
TotalPieceCount: seedTask.TotalPieceCount,
Range: seedTask.Range,
Filter: seedTask.Filter,
}
if err := mm.storage.WriteFileMetadata(task.TaskID, metadata); err != nil {
return nil, errors.Wrapf(err, "write task %s metadata file", task.TaskID)
if err := mm.storage.WriteFileMetadata(seedTask.ID, metadata); err != nil {
return nil, errors.Wrapf(err, "write task metadata file")
}
return metadata, nil
}
// updateAccessTime update access and interval
func (mm *cacheDataManager) updateAccessTime(taskID string, accessTime int64) error {
func (mm *metadataManager) updateAccessTime(taskID string, accessTime int64) error {
mm.cacheLocker.Lock(taskID, false)
defer mm.cacheLocker.UnLock(taskID, false)
@ -89,7 +90,7 @@ func (mm *cacheDataManager) updateAccessTime(taskID string, accessTime int64) er
return mm.storage.WriteFileMetadata(taskID, originMetadata)
}
func (mm *cacheDataManager) updateExpireInfo(taskID string, expireInfo map[string]string) error {
func (mm *metadataManager) updateExpireInfo(taskID string, expireInfo map[string]string) error {
mm.cacheLocker.Lock(taskID, false)
defer mm.cacheLocker.UnLock(taskID, false)
@ -103,7 +104,7 @@ func (mm *cacheDataManager) updateExpireInfo(taskID string, expireInfo map[strin
return mm.storage.WriteFileMetadata(taskID, originMetadata)
}
func (mm *cacheDataManager) updateStatusAndResult(taskID string, metadata *storage.FileMetadata) error {
func (mm *metadataManager) updateStatusAndResult(taskID string, metadata *storage.FileMetadata) error {
mm.cacheLocker.Lock(taskID, false)
defer mm.cacheLocker.UnLock(taskID, false)
@ -130,8 +131,12 @@ func (mm *cacheDataManager) updateStatusAndResult(taskID string, metadata *stora
return mm.storage.WriteFileMetadata(taskID, originMetadata)
}
func (mm *metadataManager) readFileMetadata(taskID string) (*storage.FileMetadata, error) {
return mm.storage.ReadFileMetadata(taskID)
}
// appendPieceMetadata append piece meta info to storage
func (mm *cacheDataManager) appendPieceMetadata(taskID string, record *storage.PieceMetaRecord) error {
func (mm *metadataManager) appendPieceMetadata(taskID string, record *storage.PieceMetaRecord) error {
mm.cacheLocker.Lock(taskID, false)
defer mm.cacheLocker.UnLock(taskID, false)
// write to the storage
@ -139,36 +144,31 @@ func (mm *cacheDataManager) appendPieceMetadata(taskID string, record *storage.P
}
// appendPieceMetadata append piece meta info to storage
func (mm *cacheDataManager) writePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error {
func (mm *metadataManager) writePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error {
mm.cacheLocker.Lock(taskID, false)
defer mm.cacheLocker.UnLock(taskID, false)
// write to the storage
return mm.storage.WritePieceMetaRecords(taskID, records)
}
// readAndCheckPieceMetaRecords reads pieceMetaRecords from storage and check data integrity by the md5 file of the TaskId
func (mm *cacheDataManager) readAndCheckPieceMetaRecords(taskID, pieceMd5Sign string) ([]*storage.PieceMetaRecord, error) {
// readPieceMetaRecords reads pieceMetaRecords from storage and without check data integrity
func (mm *metadataManager) readPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) {
mm.cacheLocker.Lock(taskID, true)
defer mm.cacheLocker.UnLock(taskID, true)
md5Sign, pieceMetaRecords, err := mm.getPieceMd5Sign(taskID)
pieceMetaRecords, err := mm.storage.ReadPieceMetaRecords(taskID)
if err != nil {
return nil, err
}
if md5Sign != pieceMd5Sign {
return nil, fmt.Errorf("check piece meta data integrity fail, expectMd5Sign: %s, actualMd5Sign: %s",
pieceMd5Sign, md5Sign)
return nil, errors.Wrapf(err, "read piece meta file")
}
// sort piece meta records by pieceNum
sort.Slice(pieceMetaRecords, func(i, j int) bool {
return pieceMetaRecords[i].PieceNum < pieceMetaRecords[j].PieceNum
})
return pieceMetaRecords, nil
}
// readPieceMetaRecords reads pieceMetaRecords from storage and without check data integrity
func (mm *cacheDataManager) readPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) {
func (mm *metadataManager) getPieceMd5Sign(taskID string) (string, []*storage.PieceMetaRecord, error) {
mm.cacheLocker.Lock(taskID, true)
defer mm.cacheLocker.UnLock(taskID, true)
return mm.storage.ReadPieceMetaRecords(taskID)
}
func (mm *cacheDataManager) getPieceMd5Sign(taskID string) (string, []*storage.PieceMetaRecord, error) {
pieceMetaRecords, err := mm.storage.ReadPieceMetaRecords(taskID)
if err != nil {
return "", nil, errors.Wrapf(err, "read piece meta file")
@ -182,29 +182,3 @@ func (mm *cacheDataManager) getPieceMd5Sign(taskID string) (string, []*storage.P
}
return digestutils.Sha256(pieceMd5...), pieceMetaRecords, nil
}
func (mm *cacheDataManager) readFileMetadata(taskID string) (*storage.FileMetadata, error) {
fileMeta, err := mm.storage.ReadFileMetadata(taskID)
if err != nil {
return nil, errors.Wrapf(err, "read file metadata of task %s from storage", taskID)
}
return fileMeta, nil
}
func (mm *cacheDataManager) statDownloadFile(taskID string) (*storedriver.StorageInfo, error) {
return mm.storage.StatDownloadFile(taskID)
}
func (mm *cacheDataManager) readDownloadFile(taskID string) (io.ReadCloser, error) {
return mm.storage.ReadDownloadFile(taskID)
}
func (mm *cacheDataManager) resetRepo(task *types.SeedTask) error {
mm.cacheLocker.Lock(task.TaskID, false)
defer mm.cacheLocker.UnLock(task.TaskID, false)
return mm.storage.ResetRepo(task)
}
func (mm *cacheDataManager) writeDownloadFile(taskID string, offset int64, len int64, data io.Reader) error {
return mm.storage.WriteDownloadFile(taskID, offset, len, data)
}

View File

@ -22,14 +22,16 @@ import (
"github.com/pkg/errors"
"go.uber.org/zap"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/cdn/supervisor/progress"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
)
type reporter struct {
progress supervisor.SeedProgressMgr
progressManager progress.Manager
taskManager task.Manager
}
const (
@ -37,17 +39,17 @@ const (
DownloaderReport = "download"
)
func newReporter(publisher supervisor.SeedProgressMgr) *reporter {
func newReporter(publisher progress.Manager) *reporter {
return &reporter{
progress: publisher,
progressManager: publisher,
}
}
// report cache result
func (re *reporter) reportCache(ctx context.Context, taskID string, detectResult *cacheResult) error {
// reportDetectResult report detect cache result
func (re *reporter) reportDetectResult(ctx context.Context, taskID string, detectResult *cacheResult) error {
// report cache pieces status
if detectResult != nil && detectResult.pieceMetaRecords != nil {
for _, record := range detectResult.pieceMetaRecords {
if detectResult != nil && detectResult.PieceMetaRecords != nil {
for _, record := range detectResult.PieceMetaRecords {
if err := re.reportPieceMetaRecord(ctx, taskID, record, CacheReport); err != nil {
return errors.Wrapf(err, "publish pieceMetaRecord: %v, seedPiece: %v", record,
convertPieceMeta2SeedPiece(record))
@ -57,24 +59,23 @@ func (re *reporter) reportCache(ctx context.Context, taskID string, detectResult
return nil
}
// reportPieceMetaRecord
func (re *reporter) reportPieceMetaRecord(ctx context.Context, taskID string, record *storage.PieceMetaRecord,
from string) error {
// report cache pieces status
// reportPieceMetaRecord report piece meta record
func (re *reporter) reportPieceMetaRecord(ctx context.Context, taskID string, record *storage.PieceMetaRecord, from string) error {
// report cache piece status
logger.DownloaderLogger.Info(taskID,
zap.Uint32("pieceNum", record.PieceNum),
zap.String("md5", record.Md5),
zap.String("from", from))
return re.progress.PublishPiece(ctx, taskID, convertPieceMeta2SeedPiece(record))
return re.progressManager.PublishPiece(ctx, taskID, convertPieceMeta2SeedPiece(record))
}
/*
helper functions
*/
func convertPieceMeta2SeedPiece(record *storage.PieceMetaRecord) *types.SeedPiece {
return &types.SeedPiece{
PieceStyle: record.PieceStyle,
PieceNum: uint32(record.PieceNum),
func convertPieceMeta2SeedPiece(record *storage.PieceMetaRecord) *task.PieceInfo {
return &task.PieceInfo{
PieceStyle: base.PieceStyle(record.PieceStyle),
PieceNum: record.PieceNum,
PieceMd5: record.Md5,
PieceRange: record.Range,
OriginRange: record.OriginRange,

View File

@ -29,13 +29,12 @@ import (
"github.com/sirupsen/logrus"
"go.uber.org/atomic"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/gc"
"d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/storedriver/local"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/gc"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/synclock"
"d7y.io/dragonfly/v2/pkg/unit"
@ -45,8 +44,8 @@ import (
const StorageMode = storage.DiskStorageMode
var (
_ gc.Executor = (*diskStorageMgr)(nil)
_ storage.Manager = (*diskStorageMgr)(nil)
_ gc.Executor = (*diskStorageManager)(nil)
_ storage.Manager = (*diskStorageManager)(nil)
)
func init() {
@ -55,7 +54,7 @@ func init() {
}
}
func newStorageManager(cfg *storage.Config) (storage.Manager, error) {
func newStorageManager(cfg *config.StorageConfig) (storage.Manager, error) {
if len(cfg.DriverConfigs) != 1 {
return nil, fmt.Errorf("disk storage manager should have only one disk driver, cfg's driver number is wrong config: %v", cfg)
}
@ -64,22 +63,22 @@ func newStorageManager(cfg *storage.Config) (storage.Manager, error) {
return nil, fmt.Errorf("can not find disk driver for disk storage manager, config is %#v", cfg)
}
storageMgr := &diskStorageMgr{
storageManager := &diskStorageManager{
cfg: cfg,
diskDriver: diskDriver,
}
gc.Register("diskStorage", cfg.GCInitialDelay, cfg.GCInterval, storageMgr)
return storageMgr, nil
gc.Register("diskStorage", cfg.GCInitialDelay, cfg.GCInterval, storageManager)
return storageManager, nil
}
type diskStorageMgr struct {
cfg *storage.Config
type diskStorageManager struct {
cfg *config.StorageConfig
diskDriver storedriver.Driver
cleaner *storage.Cleaner
taskMgr supervisor.SeedTaskMgr
cleaner storage.Cleaner
taskManager task.Manager
}
func (s *diskStorageMgr) getDefaultGcConfig() *storage.GCConfig {
func (s *diskStorageManager) getDefaultGcConfig() *config.GCConfig {
totalSpace, err := s.diskDriver.GetTotalSpace()
if err != nil {
logger.GcLogger.With("type", "disk").Errorf("get total space of disk: %v", err)
@ -88,7 +87,7 @@ func (s *diskStorageMgr) getDefaultGcConfig() *storage.GCConfig {
if totalSpace > 0 && totalSpace/4 < yongGcThreshold {
yongGcThreshold = totalSpace / 4
}
return &storage.GCConfig{
return &config.GCConfig{
YoungGCThreshold: yongGcThreshold,
FullGCThreshold: 25 * unit.GB,
IntervalThreshold: 2 * time.Hour,
@ -96,21 +95,21 @@ func (s *diskStorageMgr) getDefaultGcConfig() *storage.GCConfig {
}
}
func (s *diskStorageMgr) Initialize(taskMgr supervisor.SeedTaskMgr) {
s.taskMgr = taskMgr
func (s *diskStorageManager) Initialize(taskManager task.Manager) {
s.taskManager = taskManager
diskGcConfig := s.cfg.DriverConfigs[local.DiskDriverName].GCConfig
if diskGcConfig == nil {
diskGcConfig = s.getDefaultGcConfig()
logger.GcLogger.With("type", "disk").Warnf("disk gc config is nil, use default gcConfig: %v", diskGcConfig)
}
s.cleaner, _ = storage.NewStorageCleaner(diskGcConfig, s.diskDriver, s, taskMgr)
s.cleaner, _ = storage.NewStorageCleaner(diskGcConfig, s.diskDriver, s, taskManager)
}
func (s *diskStorageMgr) AppendPieceMetadata(taskID string, pieceRecord *storage.PieceMetaRecord) error {
func (s *diskStorageManager) AppendPieceMetadata(taskID string, pieceRecord *storage.PieceMetaRecord) error {
return s.diskDriver.PutBytes(storage.GetAppendPieceMetadataRaw(taskID), []byte(pieceRecord.String()+"\n"))
}
func (s *diskStorageMgr) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) {
func (s *diskStorageManager) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) {
readBytes, err := s.diskDriver.GetBytes(storage.GetPieceMetadataRaw(taskID))
if err != nil {
return nil, err
@ -127,7 +126,7 @@ func (s *diskStorageMgr) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMe
return result, nil
}
func (s *diskStorageMgr) GC() error {
func (s *diskStorageManager) GC() error {
logger.GcLogger.With("type", "disk").Info("start the disk storage gc job")
gcTaskIDs, err := s.cleaner.GC("disk", false)
if err != nil {
@ -137,7 +136,7 @@ func (s *diskStorageMgr) GC() error {
for _, taskID := range gcTaskIDs {
synclock.Lock(taskID, false)
// try to ensure the taskID is not using again
if _, exist := s.taskMgr.Exist(taskID); exist {
if _, exist := s.taskManager.Exist(taskID); exist {
synclock.UnLock(taskID, false)
continue
}
@ -153,14 +152,14 @@ func (s *diskStorageMgr) GC() error {
return nil
}
func (s *diskStorageMgr) WriteDownloadFile(taskID string, offset int64, len int64, data io.Reader) error {
func (s *diskStorageManager) WriteDownloadFile(taskID string, offset int64, len int64, data io.Reader) error {
raw := storage.GetDownloadRaw(taskID)
raw.Offset = offset
raw.Length = len
return s.diskDriver.Put(raw, data)
}
func (s *diskStorageMgr) ReadFileMetadata(taskID string) (*storage.FileMetadata, error) {
func (s *diskStorageManager) ReadFileMetadata(taskID string) (*storage.FileMetadata, error) {
bytes, err := s.diskDriver.GetBytes(storage.GetTaskMetadataRaw(taskID))
if err != nil {
return nil, errors.Wrapf(err, "get metadata bytes")
@ -173,7 +172,7 @@ func (s *diskStorageMgr) ReadFileMetadata(taskID string) (*storage.FileMetadata,
return metadata, nil
}
func (s *diskStorageMgr) WriteFileMetadata(taskID string, metadata *storage.FileMetadata) error {
func (s *diskStorageManager) WriteFileMetadata(taskID string, metadata *storage.FileMetadata) error {
data, err := json.Marshal(metadata)
if err != nil {
return errors.Wrapf(err, "marshal metadata")
@ -181,7 +180,7 @@ func (s *diskStorageMgr) WriteFileMetadata(taskID string, metadata *storage.File
return s.diskDriver.PutBytes(storage.GetTaskMetadataRaw(taskID), data)
}
func (s *diskStorageMgr) WritePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error {
func (s *diskStorageManager) WritePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error {
recordStrs := make([]string, 0, len(records))
for i := range records {
recordStrs = append(recordStrs, records[i].String())
@ -192,19 +191,19 @@ func (s *diskStorageMgr) WritePieceMetaRecords(taskID string, records []*storage
return s.diskDriver.PutBytes(pieceRaw, []byte(strings.Join(recordStrs, "\n")))
}
func (s *diskStorageMgr) ReadPieceMetaBytes(taskID string) ([]byte, error) {
func (s *diskStorageManager) ReadPieceMetaBytes(taskID string) ([]byte, error) {
return s.diskDriver.GetBytes(storage.GetPieceMetadataRaw(taskID))
}
func (s *diskStorageMgr) ReadDownloadFile(taskID string) (io.ReadCloser, error) {
func (s *diskStorageManager) ReadDownloadFile(taskID string) (io.ReadCloser, error) {
return s.diskDriver.Get(storage.GetDownloadRaw(taskID))
}
func (s *diskStorageMgr) StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) {
func (s *diskStorageManager) StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) {
return s.diskDriver.Stat(storage.GetDownloadRaw(taskID))
}
func (s *diskStorageMgr) CreateUploadLink(taskID string) error {
func (s *diskStorageManager) CreateUploadLink(taskID string) error {
// create a soft link from the upload file to the download file
if err := fileutils.SymbolicLink(s.diskDriver.GetPath(storage.GetDownloadRaw(taskID)),
s.diskDriver.GetPath(storage.GetUploadRaw(taskID))); err != nil {
@ -213,31 +212,31 @@ func (s *diskStorageMgr) CreateUploadLink(taskID string) error {
return nil
}
func (s *diskStorageMgr) DeleteTask(taskID string) error {
if err := s.diskDriver.Remove(storage.GetTaskMetadataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) {
func (s *diskStorageManager) DeleteTask(taskID string) error {
if err := s.diskDriver.Remove(storage.GetTaskMetadataRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err
}
if err := s.diskDriver.Remove(storage.GetPieceMetadataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) {
if err := s.diskDriver.Remove(storage.GetPieceMetadataRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err
}
if err := s.diskDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) {
if err := s.diskDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err
}
if err := s.diskDriver.Remove(storage.GetUploadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) {
if err := s.diskDriver.Remove(storage.GetUploadRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err
}
// try to clean the parent bucket
if err := s.diskDriver.Remove(storage.GetParentRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) {
if err := s.diskDriver.Remove(storage.GetParentRaw(taskID)); err != nil && !os.IsNotExist(err) {
logrus.Warnf("taskID: %s failed remove parent bucket: %v", taskID, err)
}
return nil
}
func (s *diskStorageMgr) ResetRepo(task *types.SeedTask) error {
return s.DeleteTask(task.TaskID)
func (s *diskStorageManager) ResetRepo(task *task.SeedTask) error {
return s.DeleteTask(task.ID)
}
func (s *diskStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
func (s *diskStorageManager) TryFreeSpace(fileLength int64) (bool, error) {
freeSpace, err := s.diskDriver.GetFreeSpace()
if err != nil {
return false, err
@ -251,7 +250,7 @@ func (s *diskStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
WalkFn: func(filePath string, info os.FileInfo, err error) error {
if fileutils.IsRegular(filePath) {
taskID := strings.Split(path.Base(filePath), ".")[0]
task, exist := s.taskMgr.Exist(taskID)
task, exist := s.taskManager.Exist(taskID)
if exist {
var totalLen int64 = 0
if task.CdnFileLength > 0 {

View File

@ -25,7 +25,7 @@ import (
"d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/mock"
taskMock "d7y.io/dragonfly/v2/cdn/supervisor/mocks/task"
"d7y.io/dragonfly/v2/pkg/unit"
)
@ -34,20 +34,20 @@ func TestDiskStorageMgrSuite(t *testing.T) {
}
type DiskStorageMgrSuite struct {
m *diskStorageMgr
m *diskStorageManager
suite.Suite
}
func (suite *DiskStorageMgrSuite) TestTryFreeSpace() {
ctrl := gomock.NewController(suite.T())
diskDriver := storedriver.NewMockDriver(ctrl)
taskMgr := mock.NewMockSeedTaskMgr(ctrl)
suite.m = &diskStorageMgr{
taskManager := taskMock.NewMockManager(ctrl)
suite.m = &diskStorageManager{
diskDriver: diskDriver,
taskMgr: taskMgr,
taskManager: taskManager,
}
diskDriver.EXPECT().GetTotalSpace().Return(100*unit.GB, nil)
cleaner, _ := storage.NewStorageCleaner(suite.m.getDefaultGcConfig(), diskDriver, suite.m, taskMgr)
cleaner, _ := storage.NewStorageCleaner(suite.m.getDefaultGcConfig(), diskDriver, suite.m, taskManager)
suite.m.cleaner = cleaner
tests := []struct {

View File

@ -29,13 +29,12 @@ import (
"github.com/pkg/errors"
"go.uber.org/atomic"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/gc"
"d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/storedriver/local"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/gc"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/synclock"
"d7y.io/dragonfly/v2/pkg/unit"
@ -46,8 +45,8 @@ const StorageMode = storage.HybridStorageMode
const secureLevel = 500 * unit.MB
var _ storage.Manager = (*hybridStorageMgr)(nil)
var _ gc.Executor = (*hybridStorageMgr)(nil)
var _ storage.Manager = (*hybridStorageManager)(nil)
var _ gc.Executor = (*hybridStorageManager)(nil)
func init() {
if err := storage.Register(StorageMode, newStorageManager); err != nil {
@ -56,7 +55,7 @@ func init() {
}
// NewStorageManager performs initialization for storage manager and return a storage Manager.
func newStorageManager(cfg *storage.Config) (storage.Manager, error) {
func newStorageManager(cfg *config.StorageConfig) (storage.Manager, error) {
if len(cfg.DriverConfigs) != 2 {
return nil, fmt.Errorf("disk storage manager should have two driver, cfg's driver number is wrong : %v", cfg)
}
@ -68,36 +67,36 @@ func newStorageManager(cfg *storage.Config) (storage.Manager, error) {
if !ok {
return nil, fmt.Errorf("can not find memory driver for hybrid storage manager, config %v", cfg)
}
storageMgr := &hybridStorageMgr{
storageManager := &hybridStorageManager{
cfg: cfg,
memoryDriver: memoryDriver,
diskDriver: diskDriver,
hasShm: true,
shmSwitch: newShmSwitch(),
}
gc.Register("hybridStorage", cfg.GCInitialDelay, cfg.GCInterval, storageMgr)
return storageMgr, nil
gc.Register("hybridStorage", cfg.GCInitialDelay, cfg.GCInterval, storageManager)
return storageManager, nil
}
func (h *hybridStorageMgr) Initialize(taskMgr supervisor.SeedTaskMgr) {
h.taskMgr = taskMgr
func (h *hybridStorageManager) Initialize(taskManager task.Manager) {
h.taskManager = taskManager
diskGcConfig := h.cfg.DriverConfigs[local.DiskDriverName].GCConfig
if diskGcConfig == nil {
diskGcConfig = h.getDiskDefaultGcConfig()
logger.GcLogger.With("type", "hybrid").Warnf("disk gc config is nil, use default gcConfig: %v", diskGcConfig)
}
h.diskDriverCleaner, _ = storage.NewStorageCleaner(diskGcConfig, h.diskDriver, h, taskMgr)
h.diskDriverCleaner, _ = storage.NewStorageCleaner(diskGcConfig, h.diskDriver, h, taskManager)
memoryGcConfig := h.cfg.DriverConfigs[local.MemoryDriverName].GCConfig
if memoryGcConfig == nil {
memoryGcConfig = h.getMemoryDefaultGcConfig()
logger.GcLogger.With("type", "hybrid").Warnf("memory gc config is nil, use default gcConfig: %v", diskGcConfig)
}
h.memoryDriverCleaner, _ = storage.NewStorageCleaner(memoryGcConfig, h.memoryDriver, h, taskMgr)
h.memoryDriverCleaner, _ = storage.NewStorageCleaner(memoryGcConfig, h.memoryDriver, h, taskManager)
logger.GcLogger.With("type", "hybrid").Info("success initialize hybrid cleaners")
}
func (h *hybridStorageMgr) getDiskDefaultGcConfig() *storage.GCConfig {
func (h *hybridStorageManager) getDiskDefaultGcConfig() *config.GCConfig {
totalSpace, err := h.diskDriver.GetTotalSpace()
if err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("failed to get total space of disk: %v", err)
@ -106,7 +105,7 @@ func (h *hybridStorageMgr) getDiskDefaultGcConfig() *storage.GCConfig {
if totalSpace > 0 && totalSpace/4 < yongGcThreshold {
yongGcThreshold = totalSpace / 4
}
return &storage.GCConfig{
return &config.GCConfig{
YoungGCThreshold: yongGcThreshold,
FullGCThreshold: 25 * unit.GB,
IntervalThreshold: 2 * time.Hour,
@ -114,7 +113,7 @@ func (h *hybridStorageMgr) getDiskDefaultGcConfig() *storage.GCConfig {
}
}
func (h *hybridStorageMgr) getMemoryDefaultGcConfig() *storage.GCConfig {
func (h *hybridStorageManager) getMemoryDefaultGcConfig() *config.GCConfig {
// determine whether the shared cache can be used
diff := unit.Bytes(0)
totalSpace, err := h.memoryDriver.GetTotalSpace()
@ -127,7 +126,7 @@ func (h *hybridStorageMgr) getMemoryDefaultGcConfig() *storage.GCConfig {
if diff >= totalSpace {
h.hasShm = false
}
return &storage.GCConfig{
return &config.GCConfig{
YoungGCThreshold: 10*unit.GB + diff,
FullGCThreshold: 2*unit.GB + diff,
CleanRatio: 3,
@ -135,90 +134,34 @@ func (h *hybridStorageMgr) getMemoryDefaultGcConfig() *storage.GCConfig {
}
}
type hybridStorageMgr struct {
cfg *storage.Config
type hybridStorageManager struct {
cfg *config.StorageConfig
memoryDriver storedriver.Driver
diskDriver storedriver.Driver
diskDriverCleaner *storage.Cleaner
memoryDriverCleaner *storage.Cleaner
taskMgr supervisor.SeedTaskMgr
diskDriverCleaner storage.Cleaner
memoryDriverCleaner storage.Cleaner
taskManager task.Manager
shmSwitch *shmSwitch
// whether enable shm
hasShm bool
}
func (h *hybridStorageMgr) GC() error {
logger.GcLogger.With("type", "hybrid").Info("start the hybrid storage gc job")
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
gcTaskIDs, err := h.diskDriverCleaner.GC("hybrid", false)
if err != nil {
logger.GcLogger.With("type", "hybrid").Error("gc disk: failed to get gcTaskIds")
}
realGCCount := h.gcTasks(gcTaskIDs, true)
logger.GcLogger.With("type", "hybrid").Infof("at most %d tasks can be cleaned up from disk, actual gc %d tasks", len(gcTaskIDs), realGCCount)
}()
if h.hasShm {
wg.Add(1)
go func() {
defer wg.Done()
gcTaskIDs, err := h.memoryDriverCleaner.GC("hybrid", false)
logger.GcLogger.With("type", "hybrid").Infof("at most %d tasks can be cleaned up from memory", len(gcTaskIDs))
if err != nil {
logger.GcLogger.With("type", "hybrid").Error("gc memory: failed to get gcTaskIds")
}
h.gcTasks(gcTaskIDs, false)
}()
}
wg.Wait()
return nil
}
func (h *hybridStorageMgr) gcTasks(gcTaskIDs []string, isDisk bool) int {
var realGCCount int
for _, taskID := range gcTaskIDs {
synclock.Lock(taskID, false)
// try to ensure the taskID is not using again
if _, exist := h.taskMgr.Exist(taskID); exist {
synclock.UnLock(taskID, false)
continue
}
realGCCount++
if isDisk {
if err := h.deleteDiskFiles(taskID); err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("gc disk: failed to delete disk files with taskID(%s): %v", taskID, err)
synclock.UnLock(taskID, false)
continue
}
} else {
if err := h.deleteMemoryFiles(taskID); err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("gc memory: failed to delete memory files with taskID(%s): %v", taskID, err)
synclock.UnLock(taskID, false)
continue
}
}
synclock.UnLock(taskID, false)
}
return realGCCount
}
func (h *hybridStorageMgr) WriteDownloadFile(taskID string, offset int64, len int64, data io.Reader) error {
func (h *hybridStorageManager) WriteDownloadFile(taskID string, offset int64, len int64, data io.Reader) error {
raw := storage.GetDownloadRaw(taskID)
raw.Offset = offset
raw.Length = len
return h.diskDriver.Put(raw, data)
}
func (h *hybridStorageMgr) DeleteTask(taskID string) error {
return h.deleteTaskFiles(taskID, true, true)
func (h *hybridStorageManager) DeleteTask(taskID string) error {
return h.deleteTaskFiles(taskID, true)
}
func (h *hybridStorageMgr) ReadDownloadFile(taskID string) (io.ReadCloser, error) {
func (h *hybridStorageManager) ReadDownloadFile(taskID string) (io.ReadCloser, error) {
return h.diskDriver.Get(storage.GetDownloadRaw(taskID))
}
func (h *hybridStorageMgr) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) {
func (h *hybridStorageManager) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) {
readBytes, err := h.diskDriver.GetBytes(storage.GetPieceMetadataRaw(taskID))
if err != nil {
return nil, err
@ -235,7 +178,7 @@ func (h *hybridStorageMgr) ReadPieceMetaRecords(taskID string) ([]*storage.Piece
return result, nil
}
func (h *hybridStorageMgr) ReadFileMetadata(taskID string) (*storage.FileMetadata, error) {
func (h *hybridStorageManager) ReadFileMetadata(taskID string) (*storage.FileMetadata, error) {
readBytes, err := h.diskDriver.GetBytes(storage.GetTaskMetadataRaw(taskID))
if err != nil {
return nil, errors.Wrapf(err, "get metadata bytes")
@ -248,11 +191,11 @@ func (h *hybridStorageMgr) ReadFileMetadata(taskID string) (*storage.FileMetadat
return metadata, nil
}
func (h *hybridStorageMgr) AppendPieceMetadata(taskID string, record *storage.PieceMetaRecord) error {
func (h *hybridStorageManager) AppendPieceMetadata(taskID string, record *storage.PieceMetaRecord) error {
return h.diskDriver.PutBytes(storage.GetAppendPieceMetadataRaw(taskID), []byte(record.String()+"\n"))
}
func (h *hybridStorageMgr) WriteFileMetadata(taskID string, metadata *storage.FileMetadata) error {
func (h *hybridStorageManager) WriteFileMetadata(taskID string, metadata *storage.FileMetadata) error {
data, err := json.Marshal(metadata)
if err != nil {
return errors.Wrapf(err, "marshal metadata")
@ -260,7 +203,7 @@ func (h *hybridStorageMgr) WriteFileMetadata(taskID string, metadata *storage.Fi
return h.diskDriver.PutBytes(storage.GetTaskMetadataRaw(taskID), data)
}
func (h *hybridStorageMgr) WritePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error {
func (h *hybridStorageManager) WritePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error {
recordStrings := make([]string, 0, len(records))
for i := range records {
recordStrings = append(recordStrings, records[i].String())
@ -268,36 +211,37 @@ func (h *hybridStorageMgr) WritePieceMetaRecords(taskID string, records []*stora
return h.diskDriver.PutBytes(storage.GetPieceMetadataRaw(taskID), []byte(strings.Join(recordStrings, "\n")))
}
func (h *hybridStorageMgr) CreateUploadLink(taskID string) error {
func (h *hybridStorageManager) ResetRepo(seedTask *task.SeedTask) error {
if err := h.deleteTaskFiles(seedTask.ID, true); err != nil {
return errors.Errorf("delete task %s files: %v", seedTask.ID, err)
}
// 判断是否有足够空间存放
if shmPath, err := h.tryShmSpace(seedTask.RawURL, seedTask.ID, seedTask.SourceFileLength); err != nil {
if _, err := os.Create(h.diskDriver.GetPath(storage.GetDownloadRaw(seedTask.ID))); err != nil {
return err
}
} else {
if err := fileutils.SymbolicLink(shmPath, h.diskDriver.GetPath(storage.GetDownloadRaw(seedTask.ID))); err != nil {
return err
}
}
// create a soft link from the upload file to the download file
if err := fileutils.SymbolicLink(h.diskDriver.GetPath(storage.GetDownloadRaw(taskID)),
h.diskDriver.GetPath(storage.GetUploadRaw(taskID))); err != nil {
if err := fileutils.SymbolicLink(h.diskDriver.GetPath(storage.GetDownloadRaw(seedTask.ID)),
h.diskDriver.GetPath(storage.GetUploadRaw(seedTask.ID))); err != nil {
return err
}
return nil
}
func (h *hybridStorageMgr) ResetRepo(task *types.SeedTask) error {
if err := h.deleteTaskFiles(task.TaskID, false, true); err != nil {
task.Log().Errorf("reset repo: failed to delete task files: %v", err)
}
// 判断是否有足够空间存放
shmPath, err := h.tryShmSpace(task.URL, task.TaskID, task.SourceFileLength)
if err == nil {
return fileutils.SymbolicLink(shmPath, h.diskDriver.GetPath(storage.GetDownloadRaw(task.TaskID)))
}
return nil
}
func (h *hybridStorageMgr) GetDownloadPath(rawFunc *storedriver.Raw) string {
func (h *hybridStorageManager) GetDownloadPath(rawFunc *storedriver.Raw) string {
return h.diskDriver.GetPath(rawFunc)
}
func (h *hybridStorageMgr) StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) {
func (h *hybridStorageManager) StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) {
return h.diskDriver.Stat(storage.GetDownloadRaw(taskID))
}
func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
func (h *hybridStorageManager) TryFreeSpace(fileLength int64) (bool, error) {
diskFreeSpace, err := h.diskDriver.GetFreeSpace()
if err != nil {
return false, err
@ -311,13 +255,13 @@ func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
WalkFn: func(filePath string, info os.FileInfo, err error) error {
if fileutils.IsRegular(filePath) {
taskID := strings.Split(path.Base(filePath), ".")[0]
task, exist := h.taskMgr.Exist(taskID)
seedTask, exist := h.taskManager.Exist(taskID)
if exist {
var totalLen int64 = 0
if task.CdnFileLength > 0 {
totalLen = task.CdnFileLength
if seedTask.CdnFileLength > 0 {
totalLen = seedTask.CdnFileLength
} else {
totalLen = task.SourceFileLength
totalLen = seedTask.SourceFileLength
}
if totalLen > 0 {
remainder.Add(totalLen - info.Size())
@ -331,7 +275,7 @@ func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
return false, err
}
enoughSpace := diskFreeSpace.ToNumber()-remainder.Load() > fileLength
enoughSpace := diskFreeSpace.ToNumber()-remainder.Load() > (fileLength + int64(5*unit.GB))
if !enoughSpace {
if _, err := h.diskDriverCleaner.GC("hybrid", true); err != nil {
return false, err
@ -345,7 +289,7 @@ func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
if err != nil {
return false, err
}
enoughSpace = diskFreeSpace.ToNumber()-remainder.Load() > fileLength
enoughSpace = diskFreeSpace.ToNumber()-remainder.Load() > (fileLength + int64(5*unit.GB))
}
if !enoughSpace {
return false, nil
@ -354,69 +298,67 @@ func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
return true, nil
}
func (h *hybridStorageMgr) deleteDiskFiles(taskID string) error {
return h.deleteTaskFiles(taskID, true, true)
func (h *hybridStorageManager) deleteDiskFiles(taskID string) error {
return h.deleteTaskFiles(taskID, true)
}
func (h *hybridStorageMgr) deleteMemoryFiles(taskID string) error {
return h.deleteTaskFiles(taskID, true, false)
func (h *hybridStorageManager) deleteMemoryFiles(taskID string) error {
return h.deleteTaskFiles(taskID, false)
}
func (h *hybridStorageMgr) deleteTaskFiles(taskID string, deleteUploadPath bool, deleteHardLink bool) error {
func (h *hybridStorageManager) deleteTaskFiles(taskID string, deleteHardLink bool) error {
// delete task file data
if err := h.diskDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) {
if err := h.diskDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err
}
// delete memory file
if err := h.memoryDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) {
if err := h.memoryDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err
}
if deleteUploadPath {
if err := h.diskDriver.Remove(storage.GetUploadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) {
// delete upload file
if err := h.diskDriver.Remove(storage.GetUploadRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err
}
}
exists := h.diskDriver.Exits(getHardLinkRaw(taskID))
if !deleteHardLink && exists {
if err := h.diskDriver.MoveFile(h.diskDriver.GetPath(getHardLinkRaw(taskID)), h.diskDriver.GetPath(storage.GetDownloadRaw(taskID))); err != nil {
return err
}
} else {
if err := h.diskDriver.Remove(getHardLinkRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) {
if err := h.diskDriver.Remove(getHardLinkRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err
}
// deleteTaskFiles delete files associated with taskID
if err := h.diskDriver.Remove(storage.GetTaskMetadataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) {
if err := h.diskDriver.Remove(storage.GetTaskMetadataRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err
}
// delete piece meta data
if err := h.diskDriver.Remove(storage.GetPieceMetadataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) {
if err := h.diskDriver.Remove(storage.GetPieceMetadataRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err
}
}
// try to clean the parent bucket
if err := h.diskDriver.Remove(storage.GetParentRaw(taskID)); err != nil &&
!cdnerrors.IsFileNotExist(err) {
!os.IsNotExist(err) {
logger.WithTaskID(taskID).Warnf("failed to remove parent bucket: %v", err)
}
return nil
}
func (h *hybridStorageMgr) tryShmSpace(url, taskID string, fileLength int64) (string, error) {
func (h *hybridStorageManager) tryShmSpace(url, taskID string, fileLength int64) (string, error) {
if h.shmSwitch.check(url, fileLength) && h.hasShm {
remainder := atomic.NewInt64(0)
if err := h.memoryDriver.Walk(&storedriver.Raw{
WalkFn: func(filePath string, info os.FileInfo, err error) error {
if fileutils.IsRegular(filePath) {
taskID := strings.Split(path.Base(filePath), ".")[0]
task, exist := h.taskMgr.Exist(taskID)
seedTask, exist := h.taskManager.Exist(taskID)
if exist {
var totalLen int64 = 0
if task.CdnFileLength > 0 {
totalLen = task.CdnFileLength
if seedTask.CdnFileLength > 0 {
totalLen = seedTask.CdnFileLength
} else {
totalLen = task.SourceFileLength
totalLen = seedTask.SourceFileLength
}
if totalLen > 0 {
remainder.Add(totalLen - info.Size())
@ -451,7 +393,63 @@ func (h *hybridStorageMgr) tryShmSpace(url, taskID string, fileLength int64) (st
return "", fmt.Errorf("shared memory is not allowed")
}
func (h *hybridStorageMgr) getMemoryUsableSpace() unit.Bytes {
func (h *hybridStorageManager) GC() error {
logger.GcLogger.With("type", "hybrid").Info("start the hybrid storage gc job")
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
gcTaskIDs, err := h.diskDriverCleaner.GC("hybrid", false)
if err != nil {
logger.GcLogger.With("type", "hybrid").Error("gc disk: failed to get gcTaskIds")
}
realGCCount := h.gcTasks(gcTaskIDs, true)
logger.GcLogger.With("type", "hybrid").Infof("at most %d tasks can be cleaned up from disk, actual gc %d tasks", len(gcTaskIDs), realGCCount)
}()
if h.hasShm {
wg.Add(1)
go func() {
defer wg.Done()
gcTaskIDs, err := h.memoryDriverCleaner.GC("hybrid", false)
logger.GcLogger.With("type", "hybrid").Infof("at most %d tasks can be cleaned up from memory", len(gcTaskIDs))
if err != nil {
logger.GcLogger.With("type", "hybrid").Error("gc memory: failed to get gcTaskIds")
}
h.gcTasks(gcTaskIDs, false)
}()
}
wg.Wait()
return nil
}
func (h *hybridStorageManager) gcTasks(gcTaskIDs []string, isDisk bool) int {
var realGCCount int
for _, taskID := range gcTaskIDs {
// try to ensure the taskID is not using again
if _, exist := h.taskManager.Exist(taskID); exist {
continue
}
realGCCount++
synclock.Lock(taskID, false)
if isDisk {
if err := h.deleteDiskFiles(taskID); err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("gc disk: failed to delete disk files with taskID(%s): %v", taskID, err)
synclock.UnLock(taskID, false)
continue
}
} else {
if err := h.deleteMemoryFiles(taskID); err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("gc memory: failed to delete memory files with taskID(%s): %v", taskID, err)
synclock.UnLock(taskID, false)
continue
}
}
synclock.UnLock(taskID, false)
}
return realGCCount
}
func (h *hybridStorageManager) getMemoryUsableSpace() unit.Bytes {
totalSize, freeSize, err := h.memoryDriver.GetTotalAndFreeSpace()
if err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("failed to get total and free space of memory: %v", err)

View File

@ -9,9 +9,8 @@ import (
reflect "reflect"
storedriver "d7y.io/dragonfly/v2/cdn/storedriver"
supervisor "d7y.io/dragonfly/v2/cdn/supervisor"
storage "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
types "d7y.io/dragonfly/v2/cdn/types"
task "d7y.io/dragonfly/v2/cdn/supervisor/task"
gomock "github.com/golang/mock/gomock"
)
@ -67,7 +66,7 @@ func (mr *MockManagerMockRecorder) DeleteTask(arg0 interface{}) *gomock.Call {
}
// Initialize mocks base method.
func (m *MockManager) Initialize(arg0 supervisor.SeedTaskMgr) {
func (m *MockManager) Initialize(arg0 task.Manager) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Initialize", arg0)
}
@ -124,7 +123,7 @@ func (mr *MockManagerMockRecorder) ReadPieceMetaRecords(arg0 interface{}) *gomoc
}
// ResetRepo mocks base method.
func (m *MockManager) ResetRepo(arg0 *types.SeedTask) error {
func (m *MockManager) ResetRepo(arg0 *task.SeedTask) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ResetRepo", arg0)
ret0, _ := ret[0].(error)

View File

@ -28,6 +28,8 @@ const (
// which is a relative path.
DownloadHome = "download"
// UploadHome is the parent directory where the upload files are stored
// which is a relative path
UploadHome = "upload"
)

View File

@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//go:generate mockgen -destination ./mock/mock_storage_mgr.go -package mock d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage Manager
//go:generate mockgen -destination ./mock/mock_storage_manager.go -package mock d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage Manager
package storage
@ -29,21 +29,21 @@ import (
"github.com/pkg/errors"
"gopkg.in/yaml.v3"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/unit"
"d7y.io/dragonfly/v2/pkg/util/rangeutils"
)
type Manager interface {
Initialize(taskMgr supervisor.SeedTaskMgr)
Initialize(taskManager task.Manager)
// ResetRepo reset the storage of task
ResetRepo(*types.SeedTask) error
ResetRepo(*task.SeedTask) error
// StatDownloadFile stat download file info
// StatDownloadFile stat download file info, if task file is not exist on storage, return errTaskNotPersisted
StatDownloadFile(taskID string) (*storedriver.StorageInfo, error)
// WriteDownloadFile write data to download file
@ -83,23 +83,32 @@ type FileMetadata struct {
AccessTime int64 `json:"accessTime"`
Interval int64 `json:"interval"`
CdnFileLength int64 `json:"cdnFileLength"`
Digest string `json:"digest"`
SourceRealDigest string `json:"sourceRealDigest"`
PieceMd5Sign string `json:"pieceMd5Sign"`
Tag string `json:"tag"`
ExpireInfo map[string]string `json:"expireInfo"`
Finish bool `json:"finish"`
Success bool `json:"success"`
TotalPieceCount int32 `json:"totalPieceCount"`
//PieceMetadataSign string `json:"pieceMetadataSign"`
PieceMd5Sign string `json:"pieceMd5Sign"`
Range string `json:"range"`
Filter string `json:"filter"`
}
// PieceMetaRecord meta data of piece
type PieceMetaRecord struct {
PieceNum uint32 `json:"pieceNum"` // piece Num start from 0
PieceLen uint32 `json:"pieceLen"` // 存储到存储介质的真实长度
Md5 string `json:"md5"` // for transported piece content不是origin source 的 md5是真是存储到存储介质后的md5为了读取数据文件时方便校验完整性
Range *rangeutils.Range `json:"range"` // 下载存储到磁盘的range不是origin source的range.提供给客户端发送下载请求,for transported piece content
OriginRange *rangeutils.Range `json:"originRange"` // piece's real offset in the file
PieceStyle types.PieceFormat `json:"pieceStyle"` // 1: PlainUnspecified
// piece Num start from 0
PieceNum uint32 `json:"pieceNum"`
// 存储到存储介质的真实长度
PieceLen uint32 `json:"pieceLen"`
// for transported piece content不是origin source 的 md5是真是存储到存储介质后的md5为了读取数据文件时方便校验完整性
Md5 string `json:"md5"`
// 下载存储到磁盘的range不是origin source的range.提供给客户端发送下载请求,for transported piece content
Range *rangeutils.Range `json:"range"`
// piece's real offset in the file
OriginRange *rangeutils.Range `json:"originRange"`
// 0: PlainUnspecified
PieceStyle int32 `json:"pieceStyle"`
}
const fieldSeparator = ":"
@ -116,11 +125,11 @@ func ParsePieceMetaRecord(value string) (record *PieceMetaRecord, err error) {
}
}()
fields := strings.Split(value, fieldSeparator)
pieceNum, err := strconv.ParseInt(fields[0], 10, 32)
pieceNum, err := strconv.ParseUint(fields[0], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "invalid pieceNum: %s", fields[0])
}
pieceLen, err := strconv.ParseInt(fields[1], 10, 32)
pieceLen, err := strconv.ParseUint(fields[1], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "invalid pieceLen: %s", fields[1])
}
@ -143,7 +152,7 @@ func ParsePieceMetaRecord(value string) (record *PieceMetaRecord, err error) {
Md5: md5,
Range: pieceRange,
OriginRange: originRange,
PieceStyle: types.PieceFormat(pieceStyle),
PieceStyle: int32(pieceStyle),
}, nil
}
@ -162,7 +171,7 @@ func (m *managerPlugin) Name() string {
return m.name
}
func (m *managerPlugin) ResetRepo(task *types.SeedTask) error {
func (m *managerPlugin) ResetRepo(task *task.SeedTask) error {
return m.instance.ResetRepo(task)
}
@ -203,7 +212,7 @@ func (m *managerPlugin) DeleteTask(taskID string) error {
}
// ManagerBuilder is a function that creates a new storage manager plugin instant with the giving conf.
type ManagerBuilder func(cfg *Config) (Manager, error)
type ManagerBuilder func(cfg *config.StorageConfig) (Manager, error)
// Register defines an interface to register a storage manager with specified name.
// All storage managers should call this function to register itself to the storage manager factory.
@ -211,7 +220,7 @@ func Register(name string, builder ManagerBuilder) error {
name = strings.ToLower(name)
// plugin builder
var f = func(conf interface{}) (plugins.Plugin, error) {
cfg := &Config{}
cfg := &config.StorageConfig{}
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.ComposeDecodeHookFunc(func(from, to reflect.Type, v interface{}) (interface{}, error) {
switch to {
@ -241,7 +250,7 @@ func Register(name string, builder ManagerBuilder) error {
return plugins.RegisterPluginBuilder(plugins.StorageManagerPlugin, name, f)
}
func newManagerPlugin(name string, builder ManagerBuilder, cfg *Config) (plugins.Plugin, error) {
func newManagerPlugin(name string, builder ManagerBuilder, cfg *config.StorageConfig) (plugins.Plugin, error) {
if name == "" || builder == nil {
return nil, fmt.Errorf("storage manager plugin's name and builder cannot be nil")
}
@ -266,24 +275,6 @@ func Get(name string) (Manager, bool) {
return v.(*managerPlugin).instance, true
}
type Config struct {
GCInitialDelay time.Duration `yaml:"gcInitialDelay"`
GCInterval time.Duration `yaml:"gcInterval"`
DriverConfigs map[string]*DriverConfig `yaml:"driverConfigs"`
}
type DriverConfig struct {
GCConfig *GCConfig `yaml:"gcConfig"`
}
// GCConfig gc config
type GCConfig struct {
YoungGCThreshold unit.Bytes `yaml:"youngGCThreshold"`
FullGCThreshold unit.Bytes `yaml:"fullGCThreshold"`
CleanRatio int `yaml:"cleanRatio"`
IntervalThreshold time.Duration `yaml:"intervalThreshold"`
}
const (
HybridStorageMode = "hybrid"
DiskStorageMode = "disk"

View File

@ -25,33 +25,37 @@ import (
"github.com/emirpasic/gods/maps/treemap"
godsutils "github.com/emirpasic/gods/utils"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/util/timeutils"
)
type Cleaner struct {
cfg *GCConfig
driver storedriver.Driver
taskMgr supervisor.SeedTaskMgr
storageMgr Manager
type Cleaner interface {
GC(storagePattern string, force bool) ([]string, error)
}
func NewStorageCleaner(cfg *GCConfig, driver storedriver.Driver, storageMgr Manager, taskMgr supervisor.SeedTaskMgr) (*Cleaner, error) {
return &Cleaner{
type cleaner struct {
cfg *config.GCConfig
driver storedriver.Driver
taskManager task.Manager
storageManager Manager
}
func NewStorageCleaner(cfg *config.GCConfig, driver storedriver.Driver, storageManager Manager, taskManager task.Manager) (Cleaner, error) {
return &cleaner{
cfg: cfg,
driver: driver,
taskMgr: taskMgr,
storageMgr: storageMgr,
taskManager: taskManager,
storageManager: storageManager,
}, nil
}
func (cleaner *Cleaner) GC(storagePattern string, force bool) ([]string, error) {
func (cleaner *cleaner) GC(storagePattern string, force bool) ([]string, error) {
freeSpace, err := cleaner.driver.GetFreeSpace()
if err != nil {
if cdnerrors.IsFileNotExist(err) {
if os.IsNotExist(err) {
err = cleaner.driver.CreateBaseDir()
if err != nil {
return nil, err
@ -74,7 +78,7 @@ func (cleaner *Cleaner) GC(storagePattern string, force bool) ([]string, error)
}
}
logger.GcLogger.With("type", storagePattern).Debugf("start to exec gc with fullGC: %t", fullGC)
logger.GcLogger.With("type", storagePattern).Debugf("storage is insufficient, start to exec gc with fullGC: %t", fullGC)
gapTasks := treemap.NewWith(godsutils.Int64Comparator)
intervalTasks := treemap.NewWith(godsutils.Int64Comparator)
@ -100,8 +104,8 @@ func (cleaner *Cleaner) GC(storagePattern string, force bool) ([]string, error)
}
walkTaskIds[taskID] = true
// we should return directly when we success to get info which means it is being used
if _, exist := cleaner.taskMgr.Exist(taskID); exist {
// we should return directly when success to get info which means it is being used
if _, exist := cleaner.taskManager.Exist(taskID); exist {
return nil
}
@ -111,13 +115,13 @@ func (cleaner *Cleaner) GC(storagePattern string, force bool) ([]string, error)
return nil
}
metadata, err := cleaner.storageMgr.ReadFileMetadata(taskID)
metadata, err := cleaner.storageManager.ReadFileMetadata(taskID)
if err != nil || metadata == nil {
logger.GcLogger.With("type", storagePattern).Debugf("taskID: %s, failed to get metadata: %v", taskID, err)
gcTaskIDs = append(gcTaskIDs, taskID)
return nil
}
// put taskId into gapTasks or intervalTasks which will sort by some rules
// put taskID into gapTasks or intervalTasks which will sort by some rules
if err := cleaner.sortInert(gapTasks, intervalTasks, metadata); err != nil {
logger.GcLogger.With("type", storagePattern).Errorf("failed to parse inert metadata(%#v): %v", metadata, err)
}
@ -138,12 +142,12 @@ func (cleaner *Cleaner) GC(storagePattern string, force bool) ([]string, error)
return gcTaskIDs, nil
}
func (cleaner *Cleaner) sortInert(gapTasks, intervalTasks *treemap.Map, metadata *FileMetadata) error {
func (cleaner *cleaner) sortInert(gapTasks, intervalTasks *treemap.Map, metadata *FileMetadata) error {
gap := timeutils.CurrentTimeMillis() - metadata.AccessTime
if metadata.Interval > 0 &&
gap <= metadata.Interval+(int64(cleaner.cfg.IntervalThreshold.Seconds())*int64(time.Millisecond)) {
info, err := cleaner.storageMgr.StatDownloadFile(metadata.TaskID)
info, err := cleaner.storageManager.StatDownloadFile(metadata.TaskID)
if err != nil {
return err
}
@ -168,7 +172,7 @@ func (cleaner *Cleaner) sortInert(gapTasks, intervalTasks *treemap.Map, metadata
return nil
}
func (cleaner *Cleaner) getGCTasks(gapTasks, intervalTasks *treemap.Map) []string {
func (cleaner *cleaner) getGCTasks(gapTasks, intervalTasks *treemap.Map) []string {
var gcTasks = make([]string, 0)
for _, v := range gapTasks.Values() {

View File

@ -1,40 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//go:generate mockgen -destination ./mock/mock_cdn_mgr.go -package mock d7y.io/dragonfly/v2/cdn/supervisor CDNMgr
package supervisor
import (
"context"
"d7y.io/dragonfly/v2/cdn/types"
)
// CDNMgr as an interface defines all operations against CDN and
// operates on the underlying files stored on the local disk, etc.
type CDNMgr interface {
// TriggerCDN will trigger CDN to download the resource from sourceUrl.
TriggerCDN(context.Context, *types.SeedTask) (*types.SeedTask, error)
// Delete the cdn meta with specified taskID.
// The file on the disk will be deleted when the force is true.
Delete(string) error
// TryFreeSpace checks if the free space of the storage is larger than the fileLength.
TryFreeSpace(fileLength int64) (bool, error)
}

View File

@ -1,80 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor (interfaces: CDNMgr)
// Package mock is a generated GoMock package.
package mock
import (
context "context"
reflect "reflect"
types "d7y.io/dragonfly/v2/cdn/types"
gomock "github.com/golang/mock/gomock"
)
// MockCDNMgr is a mock of CDNMgr interface.
type MockCDNMgr struct {
ctrl *gomock.Controller
recorder *MockCDNMgrMockRecorder
}
// MockCDNMgrMockRecorder is the mock recorder for MockCDNMgr.
type MockCDNMgrMockRecorder struct {
mock *MockCDNMgr
}
// NewMockCDNMgr creates a new mock instance.
func NewMockCDNMgr(ctrl *gomock.Controller) *MockCDNMgr {
mock := &MockCDNMgr{ctrl: ctrl}
mock.recorder = &MockCDNMgrMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCDNMgr) EXPECT() *MockCDNMgrMockRecorder {
return m.recorder
}
// Delete mocks base method.
func (m *MockCDNMgr) Delete(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockCDNMgrMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockCDNMgr)(nil).Delete), arg0)
}
// TriggerCDN mocks base method.
func (m *MockCDNMgr) TriggerCDN(arg0 context.Context, arg1 *types.SeedTask) (*types.SeedTask, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TriggerCDN", arg0, arg1)
ret0, _ := ret[0].(*types.SeedTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// TriggerCDN indicates an expected call of TriggerCDN.
func (mr *MockCDNMgrMockRecorder) TriggerCDN(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TriggerCDN", reflect.TypeOf((*MockCDNMgr)(nil).TriggerCDN), arg0, arg1)
}
// TryFreeSpace mocks base method.
func (m *MockCDNMgr) TryFreeSpace(arg0 int64) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TryFreeSpace", arg0)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// TryFreeSpace indicates an expected call of TryFreeSpace.
func (mr *MockCDNMgrMockRecorder) TryFreeSpace(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TryFreeSpace", reflect.TypeOf((*MockCDNMgr)(nil).TryFreeSpace), arg0)
}

View File

@ -1,63 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor/mgr (interfaces: GCMgr)
// Package mock is a generated GoMock package.
package mock
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockGCMgr is a mock of GCMgr interface.
type MockGCMgr struct {
ctrl *gomock.Controller
recorder *MockGCMgrMockRecorder
}
// MockGCMgrMockRecorder is the mock recorder for MockGCMgr.
type MockGCMgrMockRecorder struct {
mock *MockGCMgr
}
// NewMockGCMgr creates a new mock instance.
func NewMockGCMgr(ctrl *gomock.Controller) *MockGCMgr {
mock := &MockGCMgr{ctrl: ctrl}
mock.recorder = &MockGCMgrMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGCMgr) EXPECT() *MockGCMgrMockRecorder {
return m.recorder
}
// GCTask mocks base method.
func (m *MockGCMgr) GCTask(arg0 string, arg1 bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GCTask", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// GCTask indicates an expected call of GCTask.
func (mr *MockGCMgrMockRecorder) GCTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GCTask", reflect.TypeOf((*MockGCMgr)(nil).GCTask), arg0, arg1)
}
// StartGC mocks base method.
func (m *MockGCMgr) StartGC(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StartGC", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// StartGC indicates an expected call of StartGC.
func (mr *MockGCMgrMockRecorder) StartGC(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartGC", reflect.TypeOf((*MockGCMgr)(nil).StartGC), arg0)
}

View File

@ -1,133 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor (interfaces: SeedProgressMgr)
// Package mock is a generated GoMock package.
package mock
import (
context "context"
reflect "reflect"
supervisor "d7y.io/dragonfly/v2/cdn/supervisor"
types "d7y.io/dragonfly/v2/cdn/types"
gomock "github.com/golang/mock/gomock"
)
// MockSeedProgressMgr is a mock of SeedProgressMgr interface.
type MockSeedProgressMgr struct {
ctrl *gomock.Controller
recorder *MockSeedProgressMgrMockRecorder
}
// MockSeedProgressMgrMockRecorder is the mock recorder for MockSeedProgressMgr.
type MockSeedProgressMgrMockRecorder struct {
mock *MockSeedProgressMgr
}
// NewMockSeedProgressMgr creates a new mock instance.
func NewMockSeedProgressMgr(ctrl *gomock.Controller) *MockSeedProgressMgr {
mock := &MockSeedProgressMgr{ctrl: ctrl}
mock.recorder = &MockSeedProgressMgrMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSeedProgressMgr) EXPECT() *MockSeedProgressMgrMockRecorder {
return m.recorder
}
// Clear mocks base method.
func (m *MockSeedProgressMgr) Clear(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Clear", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Clear indicates an expected call of Clear.
func (mr *MockSeedProgressMgrMockRecorder) Clear(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clear", reflect.TypeOf((*MockSeedProgressMgr)(nil).Clear), arg0)
}
// GetPieces mocks base method.
func (m *MockSeedProgressMgr) GetPieces(arg0 context.Context, arg1 string) ([]*types.SeedPiece, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPieces", arg0, arg1)
ret0, _ := ret[0].([]*types.SeedPiece)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieces indicates an expected call of GetPieces.
func (mr *MockSeedProgressMgrMockRecorder) GetPieces(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieces", reflect.TypeOf((*MockSeedProgressMgr)(nil).GetPieces), arg0, arg1)
}
// InitSeedProgress mocks base method.
func (m *MockSeedProgressMgr) InitSeedProgress(arg0 context.Context, arg1 string) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "InitSeedProgress", arg0, arg1)
}
// InitSeedProgress indicates an expected call of InitSeedProgress.
func (mr *MockSeedProgressMgrMockRecorder) InitSeedProgress(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitSeedProgress", reflect.TypeOf((*MockSeedProgressMgr)(nil).InitSeedProgress), arg0, arg1)
}
// PublishPiece mocks base method.
func (m *MockSeedProgressMgr) PublishPiece(arg0 context.Context, arg1 string, arg2 *types.SeedPiece) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PublishPiece", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// PublishPiece indicates an expected call of PublishPiece.
func (mr *MockSeedProgressMgrMockRecorder) PublishPiece(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishPiece", reflect.TypeOf((*MockSeedProgressMgr)(nil).PublishPiece), arg0, arg1, arg2)
}
// PublishTask mocks base method.
func (m *MockSeedProgressMgr) PublishTask(arg0 context.Context, arg1 string, arg2 *types.SeedTask) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PublishTask", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// PublishTask indicates an expected call of PublishTask.
func (mr *MockSeedProgressMgrMockRecorder) PublishTask(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishTask", reflect.TypeOf((*MockSeedProgressMgr)(nil).PublishTask), arg0, arg1, arg2)
}
// SetTaskMgr mocks base method.
func (m *MockSeedProgressMgr) SetTaskMgr(arg0 supervisor.SeedTaskMgr) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetTaskMgr", arg0)
}
// SetTaskMgr indicates an expected call of SetTaskMgr.
func (mr *MockSeedProgressMgrMockRecorder) SetTaskMgr(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTaskMgr", reflect.TypeOf((*MockSeedProgressMgr)(nil).SetTaskMgr), arg0)
}
// WatchSeedProgress mocks base method.
func (m *MockSeedProgressMgr) WatchSeedProgress(arg0 context.Context, arg1 string) (<-chan *types.SeedPiece, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WatchSeedProgress", arg0, arg1)
ret0, _ := ret[0].(<-chan *types.SeedPiece)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// WatchSeedProgress indicates an expected call of WatchSeedProgress.
func (mr *MockSeedProgressMgrMockRecorder) WatchSeedProgress(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchSeedProgress", reflect.TypeOf((*MockSeedProgressMgr)(nil).WatchSeedProgress), arg0, arg1)
}

View File

@ -1,110 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor (interfaces: SeedTaskMgr)
// Package mock is a generated GoMock package.
package mock
import (
context "context"
reflect "reflect"
types "d7y.io/dragonfly/v2/cdn/types"
gomock "github.com/golang/mock/gomock"
)
// MockSeedTaskMgr is a mock of SeedTaskMgr interface.
type MockSeedTaskMgr struct {
ctrl *gomock.Controller
recorder *MockSeedTaskMgrMockRecorder
}
// MockSeedTaskMgrMockRecorder is the mock recorder for MockSeedTaskMgr.
type MockSeedTaskMgrMockRecorder struct {
mock *MockSeedTaskMgr
}
// NewMockSeedTaskMgr creates a new mock instance.
func NewMockSeedTaskMgr(ctrl *gomock.Controller) *MockSeedTaskMgr {
mock := &MockSeedTaskMgr{ctrl: ctrl}
mock.recorder = &MockSeedTaskMgrMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSeedTaskMgr) EXPECT() *MockSeedTaskMgrMockRecorder {
return m.recorder
}
// Delete mocks base method.
func (m *MockSeedTaskMgr) Delete(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockSeedTaskMgrMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockSeedTaskMgr)(nil).Delete), arg0)
}
// Exist mocks base method.
func (m *MockSeedTaskMgr) Exist(arg0 string) (*types.SeedTask, bool) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Exist", arg0)
ret0, _ := ret[0].(*types.SeedTask)
ret1, _ := ret[1].(bool)
return ret0, ret1
}
// Exist indicates an expected call of Exist.
func (mr *MockSeedTaskMgrMockRecorder) Exist(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exist", reflect.TypeOf((*MockSeedTaskMgr)(nil).Exist), arg0)
}
// Get mocks base method.
func (m *MockSeedTaskMgr) Get(arg0 string) (*types.SeedTask, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0)
ret0, _ := ret[0].(*types.SeedTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get.
func (mr *MockSeedTaskMgrMockRecorder) Get(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSeedTaskMgr)(nil).Get), arg0)
}
// GetPieces mocks base method.
func (m *MockSeedTaskMgr) GetPieces(arg0 context.Context, arg1 string) ([]*types.SeedPiece, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPieces", arg0, arg1)
ret0, _ := ret[0].([]*types.SeedPiece)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieces indicates an expected call of GetPieces.
func (mr *MockSeedTaskMgrMockRecorder) GetPieces(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieces", reflect.TypeOf((*MockSeedTaskMgr)(nil).GetPieces), arg0, arg1)
}
// Register mocks base method.
func (m *MockSeedTaskMgr) Register(arg0 context.Context, arg1 *types.SeedTask) (<-chan *types.SeedPiece, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Register", arg0, arg1)
ret0, _ := ret[0].(<-chan *types.SeedPiece)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Register indicates an expected call of Register.
func (mr *MockSeedTaskMgrMockRecorder) Register(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockSeedTaskMgr)(nil).Register), arg0, arg1)
}

View File

@ -0,0 +1,80 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor/cdn (interfaces: Manager)
// Package cdn is a generated GoMock package.
package cdn
import (
context "context"
reflect "reflect"
task "d7y.io/dragonfly/v2/cdn/supervisor/task"
gomock "github.com/golang/mock/gomock"
)
// MockManager is a mock of Manager interface.
type MockManager struct {
ctrl *gomock.Controller
recorder *MockManagerMockRecorder
}
// MockManagerMockRecorder is the mock recorder for MockManager.
type MockManagerMockRecorder struct {
mock *MockManager
}
// NewMockManager creates a new mock instance.
func NewMockManager(ctrl *gomock.Controller) *MockManager {
mock := &MockManager{ctrl: ctrl}
mock.recorder = &MockManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager) EXPECT() *MockManagerMockRecorder {
return m.recorder
}
// Delete mocks base method.
func (m *MockManager) Delete(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockManagerMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockManager)(nil).Delete), arg0)
}
// TriggerCDN mocks base method.
func (m *MockManager) TriggerCDN(arg0 context.Context, arg1 *task.SeedTask) (*task.SeedTask, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TriggerCDN", arg0, arg1)
ret0, _ := ret[0].(*task.SeedTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// TriggerCDN indicates an expected call of TriggerCDN.
func (mr *MockManagerMockRecorder) TriggerCDN(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TriggerCDN", reflect.TypeOf((*MockManager)(nil).TriggerCDN), arg0, arg1)
}
// TryFreeSpace mocks base method.
func (m *MockManager) TryFreeSpace(arg0 int64) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TryFreeSpace", arg0)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// TryFreeSpace indicates an expected call of TryFreeSpace.
func (mr *MockManagerMockRecorder) TryFreeSpace(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TryFreeSpace", reflect.TypeOf((*MockManager)(nil).TryFreeSpace), arg0)
}

View File

@ -0,0 +1,79 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor/progress (interfaces: Manager)
// Package progress is a generated GoMock package.
package progress
import (
context "context"
reflect "reflect"
task "d7y.io/dragonfly/v2/cdn/supervisor/task"
gomock "github.com/golang/mock/gomock"
)
// MockManager is a mock of Manager interface.
type MockManager struct {
ctrl *gomock.Controller
recorder *MockManagerMockRecorder
}
// MockManagerMockRecorder is the mock recorder for MockManager.
type MockManagerMockRecorder struct {
mock *MockManager
}
// NewMockManager creates a new mock instance.
func NewMockManager(ctrl *gomock.Controller) *MockManager {
mock := &MockManager{ctrl: ctrl}
mock.recorder = &MockManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager) EXPECT() *MockManagerMockRecorder {
return m.recorder
}
// PublishPiece mocks base method.
func (m *MockManager) PublishPiece(arg0 context.Context, arg1 string, arg2 *task.PieceInfo) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PublishPiece", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// PublishPiece indicates an expected call of PublishPiece.
func (mr *MockManagerMockRecorder) PublishPiece(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishPiece", reflect.TypeOf((*MockManager)(nil).PublishPiece), arg0, arg1, arg2)
}
// PublishTask mocks base method.
func (m *MockManager) PublishTask(arg0 context.Context, arg1 string, arg2 *task.SeedTask) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PublishTask", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// PublishTask indicates an expected call of PublishTask.
func (mr *MockManagerMockRecorder) PublishTask(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishTask", reflect.TypeOf((*MockManager)(nil).PublishTask), arg0, arg1, arg2)
}
// WatchSeedProgress mocks base method.
func (m *MockManager) WatchSeedProgress(arg0 context.Context, arg1, arg2 string) (<-chan *task.PieceInfo, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WatchSeedProgress", arg0, arg1, arg2)
ret0, _ := ret[0].(<-chan *task.PieceInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// WatchSeedProgress indicates an expected call of WatchSeedProgress.
func (mr *MockManagerMockRecorder) WatchSeedProgress(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchSeedProgress", reflect.TypeOf((*MockManager)(nil).WatchSeedProgress), arg0, arg1, arg2)
}

View File

@ -0,0 +1,135 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor/task (interfaces: Manager)
// Package task is a generated GoMock package.
package task
import (
reflect "reflect"
task "d7y.io/dragonfly/v2/cdn/supervisor/task"
gomock "github.com/golang/mock/gomock"
)
// MockManager is a mock of Manager interface.
type MockManager struct {
ctrl *gomock.Controller
recorder *MockManagerMockRecorder
}
// MockManagerMockRecorder is the mock recorder for MockManager.
type MockManagerMockRecorder struct {
mock *MockManager
}
// NewMockManager creates a new mock instance.
func NewMockManager(ctrl *gomock.Controller) *MockManager {
mock := &MockManager{ctrl: ctrl}
mock.recorder = &MockManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager) EXPECT() *MockManagerMockRecorder {
return m.recorder
}
// AddOrUpdate mocks base method.
func (m *MockManager) AddOrUpdate(arg0 *task.SeedTask) (*task.SeedTask, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddOrUpdate", arg0)
ret0, _ := ret[0].(*task.SeedTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AddOrUpdate indicates an expected call of AddOrUpdate.
func (mr *MockManagerMockRecorder) AddOrUpdate(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddOrUpdate", reflect.TypeOf((*MockManager)(nil).AddOrUpdate), arg0)
}
// Delete mocks base method.
func (m *MockManager) Delete(arg0 string) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Delete", arg0)
}
// Delete indicates an expected call of Delete.
func (mr *MockManagerMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockManager)(nil).Delete), arg0)
}
// Exist mocks base method.
func (m *MockManager) Exist(arg0 string) (*task.SeedTask, bool) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Exist", arg0)
ret0, _ := ret[0].(*task.SeedTask)
ret1, _ := ret[1].(bool)
return ret0, ret1
}
// Exist indicates an expected call of Exist.
func (mr *MockManagerMockRecorder) Exist(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exist", reflect.TypeOf((*MockManager)(nil).Exist), arg0)
}
// Get mocks base method.
func (m *MockManager) Get(arg0 string) (*task.SeedTask, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0)
ret0, _ := ret[0].(*task.SeedTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get.
func (mr *MockManagerMockRecorder) Get(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockManager)(nil).Get), arg0)
}
// GetProgress mocks base method.
func (m *MockManager) GetProgress(arg0 string) (map[uint32]*task.PieceInfo, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetProgress", arg0)
ret0, _ := ret[0].(map[uint32]*task.PieceInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetProgress indicates an expected call of GetProgress.
func (mr *MockManagerMockRecorder) GetProgress(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProgress", reflect.TypeOf((*MockManager)(nil).GetProgress), arg0)
}
// Update mocks base method.
func (m *MockManager) Update(arg0 string, arg1 *task.SeedTask) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Update indicates an expected call of Update.
func (mr *MockManagerMockRecorder) Update(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockManager)(nil).Update), arg0, arg1)
}
// UpdateProgress mocks base method.
func (m *MockManager) UpdateProgress(arg0 string, arg1 *task.PieceInfo) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateProgress", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateProgress indicates an expected call of UpdateProgress.
func (mr *MockManagerMockRecorder) UpdateProgress(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProgress", reflect.TypeOf((*MockManager)(nil).UpdateProgress), arg0, arg1)
}

View File

@ -14,231 +14,131 @@
* limitations under the License.
*/
//go:generate mockgen -destination ../mocks/progress/mock_progress_manager.go -package progress d7y.io/dragonfly/v2/cdn/supervisor/progress Manager
package progress
import (
"container/list"
"context"
"encoding/json"
"fmt"
"sort"
"strconv"
"sync"
"time"
"github.com/pkg/errors"
"go.opentelemetry.io/otel/trace"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/internal/dferrors"
"d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/synclock"
"d7y.io/dragonfly/v2/pkg/syncmap"
)
var _ supervisor.SeedProgressMgr = (*Manager)(nil)
// Manager as an interface defines all operations about seed progress
type Manager interface {
type Manager struct {
seedSubscribers *syncmap.SyncMap
taskPieceMetaRecords *syncmap.SyncMap
taskMgr supervisor.SeedTaskMgr
// WatchSeedProgress watch task seed progress
WatchSeedProgress(ctx context.Context, clientAddr string, taskID string) (<-chan *task.PieceInfo, error)
// PublishPiece publish piece seed
PublishPiece(ctx context.Context, taskID string, piece *task.PieceInfo) error
// PublishTask publish task seed
PublishTask(ctx context.Context, taskID string, task *task.SeedTask) error
}
var _ Manager = (*manager)(nil)
type manager struct {
mu *synclock.LockerPool
timeout time.Duration
buffer int
taskManager task.Manager
seedTaskSubjects map[string]*publisher
}
func (pm *Manager) SetTaskMgr(taskMgr supervisor.SeedTaskMgr) {
pm.taskMgr = taskMgr
func NewManager(taskManager task.Manager) (Manager, error) {
return newManager(taskManager)
}
func NewManager() (supervisor.SeedProgressMgr, error) {
return &Manager{
seedSubscribers: syncmap.NewSyncMap(),
taskPieceMetaRecords: syncmap.NewSyncMap(),
func newManager(taskManager task.Manager) (*manager, error) {
return &manager{
mu: synclock.NewLockerPool(),
timeout: 3 * time.Second,
buffer: 4,
taskManager: taskManager,
seedTaskSubjects: make(map[string]*publisher),
}, nil
}
func (pm *Manager) InitSeedProgress(ctx context.Context, taskID string) {
span := trace.SpanFromContext(ctx)
span.AddEvent(config.EventInitSeedProgress)
pm.mu.Lock(taskID, true)
if _, ok := pm.seedSubscribers.Load(taskID); ok {
logger.WithTaskID(taskID).Debugf("the task seedSubscribers already exist")
if _, ok := pm.taskPieceMetaRecords.Load(taskID); ok {
logger.WithTaskID(taskID).Debugf("the task taskPieceMetaRecords already exist")
pm.mu.UnLock(taskID, true)
return
}
}
pm.mu.UnLock(taskID, true)
func (pm *manager) WatchSeedProgress(ctx context.Context, clientAddr string, taskID string) (<-chan *task.PieceInfo, error) {
pm.mu.Lock(taskID, false)
defer pm.mu.UnLock(taskID, false)
if _, loaded := pm.seedSubscribers.LoadOrStore(taskID, list.New()); loaded {
logger.WithTaskID(taskID).Info("the task seedSubscribers already exist")
span := trace.SpanFromContext(ctx)
span.AddEvent(constants.EventWatchSeedProgress)
seedTask, err := pm.taskManager.Get(taskID)
if err != nil {
return nil, err
}
if _, loaded := pm.taskPieceMetaRecords.LoadOrStore(taskID, syncmap.NewSyncMap()); loaded {
logger.WithTaskID(taskID).Info("the task taskPieceMetaRecords already exist")
if seedTask.IsDone() {
pieceChan := make(chan *task.PieceInfo)
go func(pieceChan chan *task.PieceInfo) {
defer func() {
logger.Debugf("subscriber %s starts watching task %s seed progress", clientAddr, taskID)
close(pieceChan)
}()
pieceNums := make([]uint32, 0, len(seedTask.Pieces))
for pieceNum := range seedTask.Pieces {
pieceNums = append(pieceNums, pieceNum)
}
sort.Slice(pieceNums, func(i, j int) bool {
return pieceNums[i] < pieceNums[j]
})
for _, pieceNum := range pieceNums {
logger.Debugf("notifies subscriber %s about %d piece info of taskID %s", clientAddr, pieceNum, taskID)
pieceChan <- seedTask.Pieces[pieceNum]
}
}(pieceChan)
return pieceChan, nil
}
var progressPublisher, ok = pm.seedTaskSubjects[taskID]
if !ok {
progressPublisher = newProgressPublisher(taskID)
pm.seedTaskSubjects[taskID] = progressPublisher
}
observer := newProgressSubscriber(ctx, clientAddr, seedTask.ID, seedTask.Pieces)
progressPublisher.AddSubscriber(observer)
return observer.Receiver(), nil
}
func (pm *Manager) WatchSeedProgress(ctx context.Context, taskID string) (<-chan *types.SeedPiece, error) {
span := trace.SpanFromContext(ctx)
span.AddEvent(config.EventWatchSeedProgress)
logger.Debugf("watch seed progress begin for taskID: %s", taskID)
pm.mu.Lock(taskID, true)
defer pm.mu.UnLock(taskID, true)
chanList, err := pm.seedSubscribers.GetAsList(taskID)
if err != nil {
return nil, fmt.Errorf("get seed subscribers: %v", err)
}
pieceMetadataRecords, err := pm.getPieceMetaRecordsByTaskID(taskID)
if err != nil {
return nil, fmt.Errorf("get piece meta records by taskID: %v", err)
}
ch := make(chan *types.SeedPiece, pm.buffer)
ele := chanList.PushBack(ch)
go func(seedCh chan *types.SeedPiece, ele *list.Element) {
for _, pieceMetaRecord := range pieceMetadataRecords {
logger.Debugf("seed piece meta record %#v", pieceMetaRecord)
select {
case seedCh <- pieceMetaRecord:
case <-time.After(pm.timeout):
}
}
if task, err := pm.taskMgr.Get(taskID); err == nil && task.IsDone() {
chanList.Remove(ele)
close(seedCh)
}
}(ch, ele)
return ch, nil
}
// PublishPiece publish seedPiece
func (pm *Manager) PublishPiece(ctx context.Context, taskID string, record *types.SeedPiece) error {
span := trace.SpanFromContext(ctx)
recordBytes, _ := json.Marshal(record)
span.AddEvent(config.EventPublishPiece, trace.WithAttributes(config.AttributeSeedPiece.String(string(recordBytes))))
logger.Debugf("seed piece meta record %#v", record)
func (pm *manager) PublishPiece(ctx context.Context, taskID string, record *task.PieceInfo) (err error) {
pm.mu.Lock(taskID, false)
defer pm.mu.UnLock(taskID, false)
// update task access time
if pm.taskMgr != nil {
if _, err := pm.taskMgr.Get(taskID); err != nil {
span := trace.SpanFromContext(ctx)
jsonRecord, err := json.Marshal(record)
if err != nil {
return errors.Wrapf(err, "json marshal piece record: %#v", record)
}
span.AddEvent(constants.EventPublishPiece, trace.WithAttributes(constants.AttributeSeedPiece.String(string(jsonRecord))))
logger.Debugf("publish task %s seed piece record: %s", taskID, jsonRecord)
var progressPublisher, ok = pm.seedTaskSubjects[taskID]
if ok {
progressPublisher.NotifySubscribers(record)
}
return pm.taskManager.UpdateProgress(taskID, record)
}
func (pm *manager) PublishTask(ctx context.Context, taskID string, seedTask *task.SeedTask) error {
jsonTask, err := json.Marshal(seedTask)
if err != nil {
return errors.Wrapf(err, "json marshal seedTask: %#v", seedTask)
}
logger.Debugf("publish task %s seed piece record: %s", taskID, jsonTask)
pm.mu.Lock(taskID, false)
defer pm.mu.UnLock(taskID, false)
span := trace.SpanFromContext(ctx)
recordBytes, _ := json.Marshal(seedTask)
span.AddEvent(constants.EventPublishTask, trace.WithAttributes(constants.AttributeSeedTask.String(string(recordBytes))))
if err := pm.taskManager.Update(taskID, seedTask); err != nil {
return err
}
}
err := pm.setPieceMetaRecord(taskID, record)
if err != nil {
return fmt.Errorf("set piece meta record: %v", err)
}
chanList, err := pm.seedSubscribers.GetAsList(taskID)
if err != nil {
return fmt.Errorf("get seed subscribers: %v", err)
}
var wg sync.WaitGroup
for e := chanList.Front(); e != nil; e = e.Next() {
wg.Add(1)
sub := e.Value.(chan *types.SeedPiece)
go func(sub chan *types.SeedPiece, record *types.SeedPiece) {
defer wg.Done()
select {
case sub <- record:
case <-time.After(pm.timeout):
}
}(sub, record)
}
wg.Wait()
return nil
}
func (pm *Manager) PublishTask(ctx context.Context, taskID string, task *types.SeedTask) error {
span := trace.SpanFromContext(ctx)
taskBytes, _ := json.Marshal(task)
span.AddEvent(config.EventPublishTask, trace.WithAttributes(config.AttributeSeedTask.String(string(taskBytes))))
logger.Debugf("publish task record %#v", task)
pm.mu.Lock(taskID, false)
defer pm.mu.UnLock(taskID, false)
chanList, err := pm.seedSubscribers.GetAsList(taskID)
if err != nil {
return fmt.Errorf("get seed subscribers: %v", err)
}
// unwatch
for e := chanList.Front(); e != nil; e = e.Next() {
chanList.Remove(e)
sub, ok := e.Value.(chan *types.SeedPiece)
if !ok {
logger.Warnf("failed to convert chan seedPiece, e.Value: %v", e.Value)
continue
}
close(sub)
if progressPublisher, ok := pm.seedTaskSubjects[taskID]; ok {
progressPublisher.RemoveAllSubscribers()
delete(pm.seedTaskSubjects, taskID)
}
return nil
}
func (pm *Manager) Clear(taskID string) error {
pm.mu.Lock(taskID, false)
defer pm.mu.UnLock(taskID, false)
chanList, err := pm.seedSubscribers.GetAsList(taskID)
if err != nil && errors.Cause(err) != dferrors.ErrDataNotFound {
return errors.Wrap(err, "get seed subscribers")
}
if chanList != nil {
for e := chanList.Front(); e != nil; e = e.Next() {
chanList.Remove(e)
sub, ok := e.Value.(chan *types.SeedPiece)
if !ok {
logger.Warnf("failed to convert chan seedPiece, e.Value: %v", e.Value)
continue
}
close(sub)
}
chanList = nil
}
err = pm.seedSubscribers.Remove(taskID)
if err != nil && dferrors.ErrDataNotFound != errors.Cause(err) {
return errors.Wrap(err, "clear seed subscribes")
}
err = pm.taskPieceMetaRecords.Remove(taskID)
if err != nil && dferrors.ErrDataNotFound != errors.Cause(err) {
return errors.Wrap(err, "clear piece meta records")
}
return nil
}
func (pm *Manager) GetPieces(ctx context.Context, taskID string) (records []*types.SeedPiece, err error) {
pm.mu.Lock(taskID, true)
defer pm.mu.UnLock(taskID, true)
return pm.getPieceMetaRecordsByTaskID(taskID)
}
// setPieceMetaRecord
func (pm *Manager) setPieceMetaRecord(taskID string, record *types.SeedPiece) error {
pieceRecords, err := pm.taskPieceMetaRecords.GetAsMap(taskID)
if err != nil {
return err
}
return pieceRecords.Add(strconv.Itoa(int(record.PieceNum)), record)
}
// getPieceMetaRecordsByTaskID
func (pm *Manager) getPieceMetaRecordsByTaskID(taskID string) (records []*types.SeedPiece, err error) {
pieceRecords, err := pm.taskPieceMetaRecords.GetAsMap(taskID)
if err != nil {
return nil, errors.Wrap(err, "failed to get piece meta records")
}
pieceNums := pieceRecords.ListKeyAsIntSlice()
sort.Ints(pieceNums)
for i := 0; i < len(pieceNums); i++ {
v, _ := pieceRecords.Get(strconv.Itoa(pieceNums[i]))
if value, ok := v.(*types.SeedPiece); ok {
records = append(records, value)
}
}
return records, nil
}

View File

@ -0,0 +1,207 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package progress
import (
"context"
"sync"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/suite"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/source/httpprotocol"
sourcemock "d7y.io/dragonfly/v2/pkg/source/mock"
"d7y.io/dragonfly/v2/pkg/util/rangeutils"
)
func TestProgressManagerSuite(t *testing.T) {
suite.Run(t, new(ProgressManagerTestSuite))
}
type ProgressManagerTestSuite struct {
manager *manager
suite.Suite
}
var (
testTaskID = "testTaskID"
testTask = task.NewSeedTask(testTaskID, "https://www.drgonfly.com", nil)
taskPieces = map[uint32]*task.PieceInfo{
0: {
PieceNum: 0,
PieceMd5: "md50",
PieceRange: &rangeutils.Range{
StartIndex: 0,
EndIndex: 99,
},
OriginRange: &rangeutils.Range{
StartIndex: 0,
EndIndex: 99,
},
PieceLen: 100,
PieceStyle: 0,
},
1: {
PieceNum: 1,
PieceMd5: "md51",
PieceRange: &rangeutils.Range{
StartIndex: 100,
EndIndex: 199,
},
OriginRange: &rangeutils.Range{
StartIndex: 100,
EndIndex: 199,
},
PieceLen: 100,
PieceStyle: 0,
},
2: {
PieceNum: 2,
PieceMd5: "md52",
PieceRange: &rangeutils.Range{
StartIndex: 200,
EndIndex: 299,
},
OriginRange: &rangeutils.Range{
StartIndex: 200,
EndIndex: 299,
},
PieceLen: 100,
PieceStyle: 0,
},
3: {
PieceNum: 3,
PieceMd5: "md53",
PieceRange: &rangeutils.Range{
StartIndex: 300,
EndIndex: 399,
},
OriginRange: &rangeutils.Range{
StartIndex: 300,
EndIndex: 399,
},
PieceLen: 100,
PieceStyle: 0,
},
}
)
func (suite *ProgressManagerTestSuite) SetupSuite() {
ctl := gomock.NewController(suite.T())
sourceClient := sourcemock.NewMockResourceClient(ctl)
source.UnRegister("https")
suite.Nil(source.Register("https", sourceClient, httpprotocol.Adapter))
sourceClient.EXPECT().GetContentLength(source.RequestEq(testTask.RawURL)).Return(int64(1024*1024*500+1000), nil).Times(1)
taskManager, err := task.NewManager(config.New())
suite.Nil(err)
seedTask, err := taskManager.AddOrUpdate(testTask)
suite.Nil(err)
suite.Equal(int64(1024*1024*500+1000), seedTask.SourceFileLength)
manager, err := newManager(taskManager)
suite.Nil(err)
suite.manager = manager
}
func (suite *ProgressManagerTestSuite) TestWatchSeedProgress() {
// watch not exit task
got, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr", "notExistTask")
suite.NotNil(err)
suite.Nil(got)
// testTaskID has not pieces currently
wg := sync.WaitGroup{}
wg.Add(5)
got1, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr1", testTaskID)
suite.Nil(err)
suite.NotNil(got1)
go func() {
defer wg.Done()
var pieceCount uint32 = 0
for info := range got1 {
suite.Equal(taskPieces[pieceCount], info)
pieceCount++
}
suite.Equal(len(taskPieces), int(pieceCount))
}()
// publish first piece
suite.Nil(suite.manager.PublishPiece(context.Background(), testTaskID, taskPieces[0]))
// testTaskID has one-piece currently
got2, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr2", testTaskID)
suite.Nil(err)
suite.NotNil(got2)
go func() {
defer wg.Done()
var pieceCount uint32 = 0
for info := range got2 {
suite.Equal(taskPieces[pieceCount], info)
pieceCount++
}
suite.Equal(len(taskPieces), int(pieceCount))
}()
// publish secondary piece
suite.Nil(suite.manager.PublishPiece(context.Background(), testTaskID, taskPieces[1]))
// testTaskID has two-piece currently
got3, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr3", testTaskID)
suite.Nil(err)
suite.NotNil(got3)
go func() {
defer wg.Done()
var pieceCount uint32 = 0
for info := range got3 {
suite.Equal(taskPieces[pieceCount], info)
pieceCount++
}
suite.Equal(len(taskPieces), int(pieceCount))
}()
// publish third piece
suite.Nil(suite.manager.PublishPiece(context.Background(), testTaskID, taskPieces[2]))
// testTaskID has three-piece currently
got4, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr4", testTaskID)
suite.Nil(err)
suite.NotNil(got4)
go func() {
defer wg.Done()
var pieceCount uint32 = 0
for info := range got4 {
suite.Equal(taskPieces[pieceCount], info)
pieceCount++
}
suite.Equal(len(taskPieces), int(pieceCount))
}()
// publish forth piece
suite.Nil(suite.manager.PublishPiece(context.Background(), testTaskID, taskPieces[3]))
// publish task
testTask.CdnStatus = task.StatusSuccess
suite.Nil(suite.manager.PublishTask(context.Background(), testTaskID, testTask))
// testTaskID has done currently
got5, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr5", testTaskID)
suite.Nil(err)
suite.NotNil(got5)
go func() {
defer wg.Done()
var pieceCount uint32 = 0
for info := range got5 {
suite.Equal(taskPieces[pieceCount], info)
pieceCount++
}
suite.Equal(len(taskPieces), int(pieceCount))
}()
wg.Wait()
}

View File

@ -0,0 +1,167 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package progress
import (
"container/list"
"context"
"sort"
"sync"
"go.uber.org/atomic"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog"
)
type subscriber struct {
ctx context.Context
scheduler string
taskID string
done chan struct{}
once sync.Once
pieces map[uint32]*task.PieceInfo
pieceChan chan *task.PieceInfo
cond *sync.Cond
closed *atomic.Bool
}
func newProgressSubscriber(ctx context.Context, clientAddr, taskID string, taskPieces map[uint32]*task.PieceInfo) *subscriber {
pieces := make(map[uint32]*task.PieceInfo, len(taskPieces))
for u, info := range taskPieces {
pieces[u] = info
}
sub := &subscriber{
ctx: ctx,
scheduler: clientAddr,
taskID: taskID,
pieces: pieces,
done: make(chan struct{}),
pieceChan: make(chan *task.PieceInfo, 100),
cond: sync.NewCond(&sync.Mutex{}),
closed: atomic.NewBool(false),
}
go sub.readLoop()
return sub
}
func (sub *subscriber) readLoop() {
logger.Debugf("subscriber %s starts watching task %s seed progress", sub.scheduler, sub.taskID)
defer func() {
close(sub.pieceChan)
logger.Debugf("subscriber %s stopped watch task %s seed progress", sub.scheduler, sub.taskID)
}()
for {
select {
case <-sub.ctx.Done():
return
case <-sub.done:
if len(sub.pieces) == 0 {
return
}
logger.Debugf("sub has been closed, there are still has %d pieces waiting to be sent", len(sub.pieces))
sub.cond.L.Lock()
sub.sendPieces()
sub.cond.L.Unlock()
default:
sub.cond.L.Lock()
for len(sub.pieces) == 0 && !sub.closed.Load() {
sub.cond.Wait()
}
sub.sendPieces()
sub.cond.L.Unlock()
}
}
}
func (sub *subscriber) sendPieces() {
pieceNums := make([]uint32, 0, len(sub.pieces))
for pieceNum := range sub.pieces {
pieceNums = append(pieceNums, pieceNum)
}
sort.Slice(pieceNums, func(i, j int) bool {
return pieceNums[i] < pieceNums[j]
})
for _, pieceNum := range pieceNums {
logger.Debugf("subscriber %s send %d piece info of taskID %s", sub.scheduler, pieceNum, sub.taskID)
sub.pieceChan <- sub.pieces[pieceNum]
delete(sub.pieces, pieceNum)
}
}
func (sub *subscriber) Notify(seedPiece *task.PieceInfo) {
logger.Debugf("notifies subscriber %s about %d piece info of taskID %s", sub.scheduler, seedPiece.PieceNum, sub.taskID)
sub.cond.L.Lock()
sub.pieces[seedPiece.PieceNum] = seedPiece
sub.cond.L.Unlock()
sub.cond.Signal()
}
func (sub *subscriber) Receiver() <-chan *task.PieceInfo {
return sub.pieceChan
}
func (sub *subscriber) Close() {
sub.once.Do(func() {
logger.Debugf("close subscriber %s from taskID %s", sub.scheduler, sub.taskID)
sub.closed.CAS(false, true)
sub.cond.Signal()
close(sub.done)
})
}
type publisher struct {
taskID string
subscribers *list.List
}
func newProgressPublisher(taskID string) *publisher {
return &publisher{
taskID: taskID,
subscribers: list.New(),
}
}
func (pub *publisher) AddSubscriber(sub *subscriber) {
pub.subscribers.PushBack(sub)
logger.Debugf("subscriber %s has been added into subscribers of publisher %s, list size is %d", sub.scheduler, sub.taskID, pub.subscribers.Len())
}
func (pub *publisher) RemoveSubscriber(sub *subscriber) {
sub.Close()
for e := pub.subscribers.Front(); e != nil; e = e.Next() {
if e.Value == sub {
pub.subscribers.Remove(e)
logger.Debugf("subscriber %s has been removed from subscribers of publisher %s, list size is %d", sub.scheduler, sub.taskID, pub.subscribers.Len())
return
}
}
}
func (pub *publisher) NotifySubscribers(seedPiece *task.PieceInfo) {
for e := pub.subscribers.Front(); e != nil; e = e.Next() {
e.Value.(*subscriber).Notify(seedPiece)
}
}
func (pub *publisher) RemoveAllSubscribers() {
var next *list.Element
for e := pub.subscribers.Front(); e != nil; e = next {
next = e.Next()
pub.RemoveSubscriber(e.Value.(*subscriber))
}
}

View File

@ -0,0 +1,152 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package progress
import (
"context"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/util/rangeutils"
)
func Test_publisher_NotifySubscribers(t *testing.T) {
assert := assert.New(t)
publisher := newProgressPublisher("testTask")
notifyPieces := []*task.PieceInfo{
{
PieceNum: 0,
PieceMd5: "pieceMd51",
PieceRange: &rangeutils.Range{
StartIndex: 0,
EndIndex: 99,
},
OriginRange: &rangeutils.Range{
StartIndex: 0,
EndIndex: 99,
},
PieceLen: 100,
PieceStyle: 0,
}, {
PieceNum: 1,
PieceMd5: "pieceMd52",
PieceRange: &rangeutils.Range{
StartIndex: 100,
EndIndex: 199,
},
OriginRange: &rangeutils.Range{
StartIndex: 100,
EndIndex: 199,
},
PieceLen: 100,
PieceStyle: 0,
},
}
wg := sync.WaitGroup{}
sub1 := newProgressSubscriber(context.Background(), "client1", "testTask", nil)
publisher.AddSubscriber(sub1)
sub2 := newProgressSubscriber(context.Background(), "client2", "testTask", nil)
publisher.AddSubscriber(sub2)
additionPieceInfo1 := &task.PieceInfo{
PieceNum: 100,
PieceMd5: "xxxxx",
PieceRange: &rangeutils.Range{},
OriginRange: &rangeutils.Range{},
PieceLen: 0,
PieceStyle: 0,
}
sub3 := newProgressSubscriber(context.Background(), "client3", "taskTask", map[uint32]*task.PieceInfo{
100: additionPieceInfo1,
})
additionPieceInfo2 := &task.PieceInfo{
PieceNum: 200,
PieceMd5: "xxxxx",
PieceRange: &rangeutils.Range{},
OriginRange: &rangeutils.Range{},
PieceLen: 0,
PieceStyle: 0,
}
publisher.AddSubscriber(sub3)
sub4 := newProgressSubscriber(context.Background(), "client4", "taskTask", map[uint32]*task.PieceInfo{
100: additionPieceInfo1,
200: additionPieceInfo2,
})
publisher.AddSubscriber(sub4)
chan1 := sub1.Receiver()
chan2 := sub2.Receiver()
chan3 := sub3.Receiver()
chan4 := sub4.Receiver()
wg.Add(1)
go func(pieceChan <-chan *task.PieceInfo) {
defer wg.Done()
var pieceCount = 0
for info := range pieceChan {
pieceCount++
assert.EqualValues(notifyPieces[info.PieceNum], info)
}
assert.Equal(2, pieceCount)
}(chan1)
wg.Add(1)
go func(pieceChan <-chan *task.PieceInfo) {
defer wg.Done()
var pieceCount = 0
for info := range pieceChan {
pieceCount++
assert.EqualValues(notifyPieces[info.PieceNum], info)
}
assert.Equal(2, pieceCount)
}(chan2)
wg.Add(1)
go func(pieceChan <-chan *task.PieceInfo) {
defer wg.Done()
var pieceCount = 0
for info := range pieceChan {
pieceCount++
if info.PieceNum == 100 {
assert.EqualValues(additionPieceInfo1, info)
} else {
assert.EqualValues(notifyPieces[info.PieceNum], info)
}
}
}(chan3)
wg.Add(1)
go func(pieceChan <-chan *task.PieceInfo) {
defer wg.Done()
var pieceCount = 0
for info := range pieceChan {
pieceCount++
if info.PieceNum == 100 {
assert.EqualValues(additionPieceInfo1, info)
} else if info.PieceNum == 200 {
assert.EqualValues(additionPieceInfo2, info)
} else {
assert.EqualValues(notifyPieces[info.PieceNum], info)
}
}
assert.Equal(4, pieceCount)
}(chan4)
for i := range notifyPieces {
publisher.NotifySubscribers(notifyPieces[i])
}
publisher.RemoveAllSubscribers()
wg.Wait()
}

View File

@ -1,48 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//go:generate mockgen -destination ./mock/mock_progress_mgr.go -package mock d7y.io/dragonfly/v2/cdn/supervisor SeedProgressMgr
package supervisor
import (
"context"
"d7y.io/dragonfly/v2/cdn/types"
)
// SeedProgressMgr as an interface defines all operations about seed progress
type SeedProgressMgr interface {
// InitSeedProgress init task seed progress
InitSeedProgress(ctx context.Context, taskID string)
// WatchSeedProgress watch task seed progress
WatchSeedProgress(ctx context.Context, taskID string) (<-chan *types.SeedPiece, error)
// PublishPiece publish piece seed
PublishPiece(ctx context.Context, taskID string, piece *types.SeedPiece) error
// PublishTask publish task seed
PublishTask(ctx context.Context, taskID string, task *types.SeedTask) error
// GetPieces get pieces by taskID
GetPieces(ctx context.Context, taskID string) (records []*types.SeedPiece, err error)
// Clear meta info of task
Clear(taskID string) error
SetTaskMgr(taskMgr SeedTaskMgr)
}

138
cdn/supervisor/service.go Normal file
View File

@ -0,0 +1,138 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package supervisor
import (
"context"
"encoding/json"
"sort"
"github.com/pkg/errors"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn"
"d7y.io/dragonfly/v2/cdn/supervisor/progress"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/synclock"
)
var (
// errResourcesLacked represents a lack of resources, for example, the disk does not have enough space.
errResourcesLacked = errors.New("resources lacked")
)
func IsResourcesLacked(err error) bool {
return errors.Is(err, errResourcesLacked)
}
type CDNService interface {
// RegisterSeedTask registers seed task
RegisterSeedTask(ctx context.Context, clientAddr string, registerTask *task.SeedTask) (<-chan *task.PieceInfo, error)
// GetSeedPieces returns pieces associated with taskID, which are sorted by pieceNum
GetSeedPieces(taskID string) (pieces []*task.PieceInfo, err error)
// GetSeedTask returns seed task associated with taskID
GetSeedTask(taskID string) (seedTask *task.SeedTask, err error)
}
type cdnService struct {
taskManager task.Manager
cdnManager cdn.Manager
progressManager progress.Manager
}
func NewCDNService(taskManager task.Manager, cdnManager cdn.Manager, progressManager progress.Manager) (CDNService, error) {
return &cdnService{
taskManager: taskManager,
cdnManager: cdnManager,
progressManager: progressManager,
}, nil
}
func (service *cdnService) RegisterSeedTask(ctx context.Context, clientAddr string, registerTask *task.SeedTask) (<-chan *task.PieceInfo, error) {
if _, err := service.taskManager.AddOrUpdate(registerTask); err != nil {
return nil, err
}
if err := service.triggerCdnSyncAction(ctx, registerTask.ID); err != nil {
return nil, err
}
return service.progressManager.WatchSeedProgress(ctx, clientAddr, registerTask.ID)
}
// triggerCdnSyncAction trigger cdn sync action
func (service *cdnService) triggerCdnSyncAction(ctx context.Context, taskID string) error {
seedTask, err := service.taskManager.Get(taskID)
if err != nil {
return err
}
synclock.Lock(taskID, true)
if seedTask.SourceFileLength > 0 {
if ok, err := service.cdnManager.TryFreeSpace(seedTask.SourceFileLength); err != nil {
seedTask.Log().Errorf("failed to try free space: %v", err)
} else if !ok {
return errResourcesLacked
}
}
if !seedTask.IsFrozen() {
seedTask.Log().Infof("seedTask status is %sno need trigger again", seedTask.CdnStatus)
synclock.UnLock(seedTask.ID, true)
return nil
}
synclock.UnLock(seedTask.ID, true)
synclock.Lock(seedTask.ID, false)
defer synclock.UnLock(seedTask.ID, false)
// reconfirm
if !seedTask.IsFrozen() {
seedTask.Log().Infof("reconfirm seedTask status is not frozen, no need trigger again, current status: %s", seedTask.CdnStatus)
return nil
}
seedTask.StartTrigger()
// triggerCDN goroutine
go func() {
updateTaskInfo, err := service.cdnManager.TriggerCDN(context.Background(), seedTask)
if err != nil {
seedTask.Log().Errorf("failed to trigger cdn: %v", err)
}
jsonTaskInfo, err := json.Marshal(updateTaskInfo)
if err != nil {
seedTask.Log().Errorf("failed to json marshal updateTaskInfo: %#v: %v", updateTaskInfo, err)
return
}
seedTask.Log().Infof("trigger cdn result: %s", jsonTaskInfo)
}()
return nil
}
func (service *cdnService) GetSeedPieces(taskID string) ([]*task.PieceInfo, error) {
pieceMap, err := service.taskManager.GetProgress(taskID)
if err != nil {
return nil, err
}
pieces := make([]*task.PieceInfo, 0, len(pieceMap))
for i := range pieceMap {
pieces = append(pieces, pieceMap[i])
}
sort.Slice(pieces, func(i, j int) bool {
return pieces[i].PieceNum < pieces[j].PieceNum
})
return pieces, nil
}
func (service *cdnService) GetSeedTask(taskID string) (*task.SeedTask, error) {
return service.taskManager.Get(taskID)
}

View File

@ -14,206 +14,128 @@
* limitations under the License.
*/
//go:generate mockgen -destination ../mocks/task/mock_task_manager.go -package task d7y.io/dragonfly/v2/cdn/supervisor/task Manager
package task
import (
"context"
"encoding/json"
"sync"
"time"
"github.com/pkg/errors"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"d7y.io/dragonfly/v2/cdn/config"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/supervisor/gc"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/internal/dferrors"
"d7y.io/dragonfly/v2/cdn/gc"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/internal/util"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/synclock"
"d7y.io/dragonfly/v2/pkg/syncmap"
"d7y.io/dragonfly/v2/pkg/unit"
"d7y.io/dragonfly/v2/pkg/util/stringutils"
)
// Ensure that Manager implements the SeedTaskMgr and gcExecutor interfaces
var _ supervisor.SeedTaskMgr = (*Manager)(nil)
var _ gc.Executor = (*Manager)(nil)
// Manager as an interface defines all operations against SeedTask.
// A SeedTask will store some meta info about the taskFile, pieces and something else.
// A seedTask corresponds to three files on the disk, which are identified by taskId, the data file meta file piece file
type Manager interface {
// AddOrUpdate update existing task info for the key if present.
// Otherwise, it stores and returns the given value.
// The isUpdate result is true if the value was updated, false if added.
AddOrUpdate(registerTask *SeedTask) (seedTask *SeedTask, err error)
// Get returns the task info with specified taskID, or nil if no
// value is present.
// The ok result indicates whether value was found in the taskManager.
Get(taskID string) (seedTask *SeedTask, err error)
// Update the task info with specified taskID and updateTask
Update(taskID string, updateTask *SeedTask) (err error)
// UpdateProgress update the downloaded pieces belonging to the task
UpdateProgress(taskID string, piece *PieceInfo) (err error)
// GetProgress returns the downloaded pieces belonging to the task
GetProgress(taskID string) (map[uint32]*PieceInfo, error)
// Exist check task existence with specified taskID.
// returns the task info with specified taskID, or nil if no value is present.
// The ok result indicates whether value was found in the taskManager.
Exist(taskID string) (seedTask *SeedTask, ok bool)
// Delete a task with specified taskID.
Delete(taskID string)
}
// Ensure that manager implements the Manager and gc.Executor interfaces
var (
_ Manager = (*manager)(nil)
_ gc.Executor = (*manager)(nil)
)
var (
errTaskNotFound = errors.New("task is not found")
errURLUnreachable = errors.New("url is unreachable")
errTaskIDConflict = errors.New("taskID is conflict")
)
var tracer trace.Tracer
func init() {
tracer = otel.Tracer("cdn-task-manager")
func IsTaskNotFound(err error) bool {
return errors.Is(err, errTaskNotFound)
}
// Manager is an implementation of the interface of TaskMgr.
type Manager struct {
cfg *config.Config
taskStore *syncmap.SyncMap
accessTimeMap *syncmap.SyncMap
taskURLUnReachableStore *syncmap.SyncMap
cdnMgr supervisor.CDNMgr
progressMgr supervisor.SeedProgressMgr
// manager is an implementation of the interface of Manager.
type manager struct {
config *config.Config
taskStore sync.Map
accessTimeMap sync.Map
taskURLUnreachableStore sync.Map
}
// NewManager returns a new Manager Object.
func NewManager(cfg *config.Config, cdnMgr supervisor.CDNMgr, progressMgr supervisor.SeedProgressMgr) (*Manager, error) {
taskMgr := &Manager{
cfg: cfg,
taskStore: syncmap.NewSyncMap(),
accessTimeMap: syncmap.NewSyncMap(),
taskURLUnReachableStore: syncmap.NewSyncMap(),
cdnMgr: cdnMgr,
progressMgr: progressMgr,
func NewManager(config *config.Config) (Manager, error) {
manager := &manager{
config: config,
}
progressMgr.SetTaskMgr(taskMgr)
gc.Register("task", cfg.GCInitialDelay, cfg.GCMetaInterval, taskMgr)
return taskMgr, nil
gc.Register("task", config.GCInitialDelay, config.GCMetaInterval, manager)
return manager, nil
}
func (tm *Manager) Register(ctx context.Context, registerTask *types.SeedTask) (pieceChan <-chan *types.SeedPiece, err error) {
var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanTaskRegister)
defer span.End()
task, err := tm.AddOrUpdate(registerTask)
if err != nil {
span.RecordError(err)
logger.WithTaskID(registerTask.TaskID).Infof("failed to add or update task with req: %#v: %v", registerTask, err)
return nil, err
}
taskBytes, _ := json.Marshal(task)
span.SetAttributes(config.AttributeTaskInfo.String(string(taskBytes)))
task.Log().Debugf("success get task info: %#v", task)
// update accessTime for taskId
if err := tm.accessTimeMap.Add(task.TaskID, time.Now()); err != nil {
task.Log().Warnf("failed to update accessTime: %v", err)
}
// trigger CDN
if err := tm.triggerCdnSyncAction(ctx, task); err != nil {
return nil, errors.Wrapf(err, "trigger cdn")
}
task.Log().Infof("successfully trigger cdn sync action")
// watch seed progress
return tm.progressMgr.WatchSeedProgress(ctx, task.TaskID)
}
// triggerCdnSyncAction
func (tm *Manager) triggerCdnSyncAction(ctx context.Context, task *types.SeedTask) error {
var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanTriggerCDNSyncAction)
defer span.End()
synclock.Lock(task.TaskID, true)
if !task.IsFrozen() {
span.SetAttributes(config.AttributeTaskStatus.String(task.CdnStatus))
task.Log().Infof("seedTask is running or has been downloaded successfully, status: %s", task.CdnStatus)
synclock.UnLock(task.TaskID, true)
return nil
}
synclock.UnLock(task.TaskID, true)
synclock.Lock(task.TaskID, false)
defer synclock.UnLock(task.TaskID, false)
// reconfirm
span.SetAttributes(config.AttributeTaskStatus.String(task.CdnStatus))
if !task.IsFrozen() {
task.Log().Infof("reconfirm find seedTask is running or has been downloaded successfully, status: %s", task.CdnStatus)
return nil
}
if task.IsWait() {
tm.progressMgr.InitSeedProgress(ctx, task.TaskID)
task.Log().Infof("successfully init seed progress for task")
}
updatedTask, err := tm.updateTask(task.TaskID, &types.SeedTask{
CdnStatus: types.TaskInfoCdnStatusRunning,
})
if err != nil {
return errors.Wrapf(err, "update task")
}
// triggerCDN goroutine
go func() {
updateTaskInfo, err := tm.cdnMgr.TriggerCDN(context.Background(), task)
if err != nil {
task.Log().Errorf("trigger cdn get error: %v", err)
}
updatedTask, err = tm.updateTask(task.TaskID, updateTaskInfo)
go func() {
if err := tm.progressMgr.PublishTask(ctx, task.TaskID, updatedTask); err != nil {
task.Log().Errorf("failed to publish task: %v", err)
}
}()
if err != nil {
task.Log().Errorf("failed to update task: %v", err)
}
task.Log().Infof("successfully update task cdn updatedTask: %#v", updatedTask)
}()
return nil
}
func (tm *Manager) getTask(taskID string) (*types.SeedTask, error) {
if stringutils.IsBlank(taskID) {
return nil, errors.Wrap(cdnerrors.ErrInvalidValue, "taskID is empty")
}
v, err := tm.taskStore.Get(taskID)
if err != nil {
if errors.Cause(err) == dferrors.ErrDataNotFound {
return nil, errors.Wrapf(cdnerrors.ErrDataNotFound, "task not found")
}
return nil, err
}
// type assertion
if info, ok := v.(*types.SeedTask); ok {
return info, nil
}
return nil, errors.Wrapf(cdnerrors.ErrConvertFailed, "origin object: %#v", v)
}
func (tm *Manager) AddOrUpdate(registerTask *types.SeedTask) (seedTask *types.SeedTask, err error) {
func (tm *manager) AddOrUpdate(registerTask *SeedTask) (seedTask *SeedTask, err error) {
defer func() {
if err != nil {
tm.accessTimeMap.Store(registerTask.TaskID, time.Now())
tm.accessTimeMap.Store(registerTask.ID, time.Now())
}
}()
synclock.Lock(registerTask.TaskID, true)
if unreachableTime, ok := tm.getTaskUnreachableTime(registerTask.TaskID); ok {
if time.Since(unreachableTime) < tm.cfg.FailAccessInterval {
synclock.UnLock(registerTask.TaskID, true)
synclock.Lock(registerTask.ID, true)
if unreachableTime, ok := tm.getTaskUnreachableTime(registerTask.ID); ok {
if time.Since(unreachableTime) < tm.config.FailAccessInterval {
synclock.UnLock(registerTask.ID, true)
// TODO 校验Header
return nil, errURLUnreachable
}
logger.Debugf("delete taskID: %s from unreachable url list", registerTask.TaskID)
tm.taskURLUnReachableStore.Delete(registerTask.TaskID)
logger.Debugf("delete taskID: %s from unreachable url list", registerTask.ID)
tm.taskURLUnreachableStore.Delete(registerTask.ID)
}
actual, loaded := tm.taskStore.LoadOrStore(registerTask.TaskID, registerTask)
seedTask = actual.(*types.SeedTask)
actual, loaded := tm.taskStore.LoadOrStore(registerTask.ID, registerTask)
seedTask = actual.(*SeedTask)
if loaded && !IsSame(seedTask, registerTask) {
synclock.UnLock(registerTask.TaskID, true)
synclock.UnLock(registerTask.ID, true)
return nil, errors.Wrapf(errTaskIDConflict, "register task %#v is conflict with exist task %#v", registerTask, seedTask)
}
if seedTask.SourceFileLength != source.UnknownSourceFileLen {
synclock.UnLock(registerTask.TaskID, true)
synclock.UnLock(registerTask.ID, true)
return seedTask, nil
}
synclock.UnLock(registerTask.TaskID, true)
synclock.Lock(registerTask.TaskID, false)
defer synclock.UnLock(registerTask.TaskID, false)
synclock.UnLock(registerTask.ID, true)
synclock.Lock(registerTask.ID, false)
defer synclock.UnLock(registerTask.ID, false)
if seedTask.SourceFileLength != source.UnknownSourceFileLen {
return seedTask, nil
}
// get sourceContentLength with req.Header
contentLengthRequest, err := source.NewRequestWithHeader(registerTask.URL, registerTask.Header)
contentLengthRequest, err := source.NewRequestWithHeader(registerTask.RawURL, registerTask.Header)
if err != nil {
return nil, err
}
@ -223,9 +145,9 @@ func (tm *Manager) AddOrUpdate(registerTask *types.SeedTask) (seedTask *types.Se
}
sourceFileLength, err := source.GetContentLength(contentLengthRequest)
if err != nil {
registerTask.Log().Errorf("get url (%s) content length failed: %v", registerTask.URL, err)
registerTask.Log().Errorf("get url (%s) content length failed: %v", registerTask.RawURL, err)
if source.IsResourceNotReachableError(err) {
tm.taskURLUnReachableStore.Store(registerTask, time.Now())
tm.taskURLUnreachableStore.Store(registerTask, time.Now())
}
return seedTask, err
}
@ -246,34 +168,62 @@ func (tm *Manager) AddOrUpdate(registerTask *types.SeedTask) (seedTask *types.Se
return seedTask, nil
}
func (tm Manager) Get(taskID string) (*types.SeedTask, error) {
task, err := tm.getTask(taskID)
// update accessTime for taskID
if err := tm.accessTimeMap.Add(taskID, time.Now()); err != nil {
logger.WithTaskID(taskID).Warnf("failed to update accessTime: %v", err)
func (tm *manager) Get(taskID string) (*SeedTask, error) {
synclock.Lock(taskID, true)
defer synclock.UnLock(taskID, true)
// only update access when get task success
if task, ok := tm.getTask(taskID); ok {
tm.accessTimeMap.Store(taskID, time.Now())
return task, nil
}
return task, err
return nil, errTaskNotFound
}
func (tm Manager) Exist(taskID string) (*types.SeedTask, bool) {
task, err := tm.getTask(taskID)
return task, err == nil
}
func (tm *manager) Update(taskID string, taskInfo *SeedTask) error {
synclock.Lock(taskID, false)
defer synclock.UnLock(taskID, false)
func (tm Manager) Delete(taskID string) error {
tm.accessTimeMap.Delete(taskID)
tm.taskURLUnReachableStore.Delete(taskID)
tm.taskStore.Delete(taskID)
if err := tm.progressMgr.Clear(taskID); err != nil {
if err := tm.updateTask(taskID, taskInfo); err != nil {
return err
}
// only update access when update task success
tm.accessTimeMap.Store(taskID, time.Now())
return nil
}
func (tm *Manager) GetPieces(ctx context.Context, taskID string) (pieces []*types.SeedPiece, err error) {
synclock.Lock(taskID, true)
defer synclock.UnLock(taskID, true)
return tm.progressMgr.GetPieces(ctx, taskID)
func (tm *manager) UpdateProgress(taskID string, info *PieceInfo) error {
synclock.Lock(taskID, false)
defer synclock.UnLock(taskID, false)
seedTask, ok := tm.getTask(taskID)
if !ok {
return errTaskNotFound
}
seedTask.Pieces[info.PieceNum] = info
// only update access when update task success
tm.accessTimeMap.Store(taskID, time.Now())
return nil
}
func (tm *manager) GetProgress(taskID string) (map[uint32]*PieceInfo, error) {
synclock.Lock(taskID, false)
defer synclock.UnLock(taskID, false)
seedTask, ok := tm.getTask(taskID)
if !ok {
return nil, errTaskNotFound
}
tm.accessTimeMap.Store(taskID, time.Now())
return seedTask.Pieces, nil
}
func (tm *manager) Exist(taskID string) (*SeedTask, bool) {
return tm.getTask(taskID)
}
func (tm *manager) Delete(taskID string) {
synclock.Lock(taskID, false)
defer synclock.UnLock(taskID, false)
tm.deleteTask(taskID)
}
const (
@ -282,38 +232,34 @@ const (
gcTasksTimeout = 2.0 * time.Second
)
func (tm *Manager) GC() error {
logger.Debugf("start the task meta gc job")
var removedTaskCount int
func (tm *manager) GC() error {
logger.Info("start the task meta gc job")
startTime := time.Now()
// get all taskIDs and the corresponding accessTime
taskAccessMap := tm.accessTimeMap
// range all tasks and determine whether they are expired
taskIDs := taskAccessMap.ListKeyAsStringSlice()
totalTaskNums := len(taskIDs)
for _, taskID := range taskIDs {
atime, err := taskAccessMap.GetAsTime(taskID)
if err != nil {
logger.GcLogger.With("type", "meta").Errorf("gc tasks: failed to get access time taskID(%s): %v", taskID, err)
continue
}
if time.Since(atime) < tm.cfg.TaskExpireTime {
continue
totalTaskNums := 0
removedTaskCount := 0
tm.accessTimeMap.Range(func(key, value interface{}) bool {
totalTaskNums++
taskID := key.(string)
synclock.Lock(taskID, false)
defer synclock.UnLock(taskID, false)
atime := value.(time.Time)
if time.Since(atime) < tm.config.TaskExpireTime {
return true
}
// gc task memory data
logger.GcLogger.With("type", "meta").Infof("gc task: start to deal with task: %s", taskID)
if err := tm.Delete(taskID); err != nil {
logger.GcLogger.With("type", "meta").Infof("gc task: failed to delete task: %s", taskID)
continue
}
tm.deleteTask(taskID)
removedTaskCount++
}
return true
})
// slow GC detected, report it with a log warning
if timeDuring := time.Since(startTime); timeDuring > gcTasksTimeout {
logger.GcLogger.With("type", "meta").Warnf("gc tasks: %d cost: %.3f", removedTaskCount, timeDuring.Seconds())
}
logger.GcLogger.With("type", "meta").Infof("gc tasks: successfully full gc task count(%d), remainder count(%d)", removedTaskCount, totalTaskNums-removedTaskCount)
logger.GcLogger.With("type", "meta").Infof("%d tasks were successfully cleared, leaving %d tasks remaining", removedTaskCount,
totalTaskNums-removedTaskCount)
return nil
}

View File

@ -17,89 +17,115 @@
package task
import (
"context"
"net/url"
"os"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/suite"
"github.com/jarcoal/httpmock"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/supervisor/mock"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/internal/idgen"
"d7y.io/dragonfly/v2/internal/util"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/source/httpprotocol"
sourcemock "d7y.io/dragonfly/v2/pkg/source/mock"
)
func TestTaskManagerSuite(t *testing.T) {
suite.Run(t, new(TaskManagerTestSuite))
func TestMain(m *testing.M) {
os.Exit(m.Run())
}
type TaskManagerTestSuite struct {
tm *Manager
suite.Suite
}
func (suite *TaskManagerTestSuite) TestRegister() {
dragonflyURL := "http://dragonfly.io.com?a=a&b=b&c=c"
ctrl := gomock.NewController(suite.T())
cdnMgr := mock.NewMockCDNMgr(ctrl)
progressMgr := mock.NewMockSeedProgressMgr(ctrl)
progressMgr.EXPECT().SetTaskMgr(gomock.Any()).Times(1)
tm, err := NewManager(config.New(), cdnMgr, progressMgr)
suite.Nil(err)
suite.NotNil(tm)
func TestIsTaskNotFound(t *testing.T) {
type args struct {
ctx context.Context
req *types.TaskRegisterRequest
err error
}
tests := []struct {
name string
args args
wantPieceChan <-chan *types.SeedPiece
wantErr bool
want bool
}{
{
name: "register_md5",
name: "wrap task not found error",
args: args{
ctx: context.Background(),
req: &types.TaskRegisterRequest{
URL: dragonflyURL,
TaskID: idgen.TaskID(dragonflyURL, &base.UrlMeta{Filter: "a&b", Tag: "dragonfly", Digest: "md5:f1e2488bba4d1267948d9e2f7008571c"}),
Digest: "md5:f1e2488bba4d1267948d9e2f7008571c",
Filter: []string{"a", "b"},
Header: nil,
err: errors.Wrap(errTaskNotFound, "wrap error"),
},
},
wantPieceChan: nil,
wantErr: false,
},
{
name: "register_sha256",
want: true,
}, {
name: "wrap task two layers",
args: args{
ctx: context.Background(),
req: &types.TaskRegisterRequest{
URL: dragonflyURL,
TaskID: idgen.TaskID(dragonflyURL, &base.UrlMeta{Filter: "a&b", Tag: "dragonfly", Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5"}),
Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5",
Filter: []string{"a", "b"},
Header: nil,
err: errors.Wrap(errors.Wrap(errTaskNotFound, "wrap error"), "wrap error again"),
},
want: true,
}, {
name: "native err",
args: args{
err: errTaskNotFound,
},
wantPieceChan: nil,
wantErr: false,
want: true,
},
}
for _, tt := range tests {
suite.Run(tt.name, func() {
//gotPieceChan, err := tm.Register(tt.args.ctx, tt.args.req)
//
//if (err != nil) != tt.wantErr {
// suite.T().Errorf("Register() error = %v, wantErr %v", err, tt.wantErr)
// return
//}
//if !reflect.DeepEqual(gotPieceChan, tt.wantPieceChan) {
// suite.T().Errorf("Register() gotPieceChan = %v, want %v", gotPieceChan, tt.wantPieceChan)
//}
t.Run(tt.name, func(t *testing.T) {
if got := IsTaskNotFound(tt.args.err); got != tt.want {
t.Errorf("IsTaskNotFound() = %v, want %v", got, tt.want)
}
})
}
}
func Test_manager_Exist(t *testing.T) {
httpmock.Activate()
tm, err := NewManager(config.New())
require := require.New(t)
require.Nil(err)
ctl := gomock.NewController(t)
sourceClient := sourcemock.NewMockResourceClient(ctl)
testURL, err := url.Parse("https://dragonfly.com")
require.Nil(err)
source.UnRegister("https")
require.Nil(source.Register("https", sourceClient, httpprotocol.Adapter))
sourceClient.EXPECT().GetContentLength(source.RequestEq(testURL.String())).Return(int64(1024*1024*500+1000), nil).Times(1)
seedTask := NewSeedTask("taskID", testURL.String(), nil)
addedTask, err := tm.AddOrUpdate(seedTask)
require.Nil(err)
existTask, ok := tm.Exist("taskID")
require.True(ok)
require.EqualValues(addedTask, existTask)
require.EqualValues(1024*1024*500+1000, existTask.SourceFileLength)
require.EqualValues(1024*1024*7, existTask.PieceSize)
}
func Test_manager_AddOrUpdate(t *testing.T) {
tm, err := NewManager(config.New())
require := require.New(t)
require.Nil(err)
ctl := gomock.NewController(t)
sourceClient := sourcemock.NewMockResourceClient(ctl)
testURL, err := url.Parse("https://dragonfly.com")
require.Nil(err)
source.UnRegister("https")
require.Nil(source.Register("https", sourceClient, httpprotocol.Adapter))
sourceClient.EXPECT().GetContentLength(source.RequestEq(testURL.String())).Return(int64(1024*1024*500+1000), nil).Times(1)
registerTask := NewSeedTask("dragonfly", testURL.String(), &base.UrlMeta{
Digest: "sha256:xxxxx",
Tag: "dragonfly",
Range: "0-3",
Filter: "",
Header: map[string]string{"key1": "value1"},
})
existTask, ok := tm.Exist("dragonfly")
require.Nil(existTask)
require.False(ok)
seedTask, err := tm.AddOrUpdate(registerTask)
require.Nil(err)
existTask, ok = tm.Exist("dragonfly")
require.NotNil(existTask)
require.True(ok)
require.EqualValues(registerTask, seedTask)
require.Equal(util.ComputePieceSize(int64(1024*1024*500+1000)), uint32(seedTask.PieceSize))
require.Equal(int64(1024*1024*500+1000), seedTask.SourceFileLength)
require.EqualValues(map[string]string{"key1": "value1"}, seedTask.Header)
}

View File

@ -21,57 +21,38 @@ import (
"github.com/pkg/errors"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/pkg/util/stringutils"
)
// getTaskUnreachableTime get unreachable time of task and convert it to time.Time type
func (tm *Manager) getTaskUnreachableTime(taskID string) (time.Time, bool) {
unreachableTime, ok := tm.taskURLUnReachableStore.Load(taskID)
if !ok {
return time.Time{}, false
}
return unreachableTime.(time.Time), true
}
// updateTask
func (tm *Manager) updateTask(taskID string, updateTaskInfo *types.SeedTask) (*types.SeedTask, error) {
if stringutils.IsBlank(taskID) {
return nil, errors.Wrap(cdnerrors.ErrInvalidValue, "taskID is empty")
}
// updateTask updates task
func (tm *manager) updateTask(taskID string, updateTaskInfo *SeedTask) error {
if updateTaskInfo == nil {
return nil, errors.Wrap(cdnerrors.ErrInvalidValue, "updateTaskInfo is nil")
return errors.New("updateTaskInfo is nil")
}
if stringutils.IsBlank(updateTaskInfo.CdnStatus) {
return nil, errors.Wrap(cdnerrors.ErrInvalidValue, "status of task is empty")
return errors.New("status of updateTaskInfo is empty")
}
// get origin task
task, err := tm.getTask(taskID)
if err != nil {
return nil, err
task, ok := tm.getTask(taskID)
if !ok {
return errTaskNotFound
}
if !updateTaskInfo.IsSuccess() {
// when the origin CDNStatus equals success, do not update it to unsuccessful
if task.IsSuccess() {
return task, nil
task.Log().Warnf("origin task status is success, but update task status is %s, return origin task", task.CdnStatus)
return nil
}
// only update the task CdnStatus when the new task CDNStatus and
// the origin CDNStatus both not equals success
task.CdnStatus = updateTaskInfo.CdnStatus
return task, nil
return nil
}
// only update the task info when the new CDNStatus equals success
// only update the task info when the updateTaskInfo CDNStatus equals success
// and the origin CDNStatus not equals success.
if updateTaskInfo.CdnFileLength != 0 {
if updateTaskInfo.CdnFileLength > 0 {
task.CdnFileLength = updateTaskInfo.CdnFileLength
}
if !stringutils.IsBlank(updateTaskInfo.SourceRealDigest) {
task.SourceRealDigest = updateTaskInfo.SourceRealDigest
}
@ -79,36 +60,75 @@ func (tm *Manager) updateTask(taskID string, updateTaskInfo *types.SeedTask) (*t
if !stringutils.IsBlank(updateTaskInfo.PieceMd5Sign) {
task.PieceMd5Sign = updateTaskInfo.PieceMd5Sign
}
var pieceTotal int32
if updateTaskInfo.SourceFileLength > 0 {
pieceTotal = int32((updateTaskInfo.SourceFileLength + int64(task.PieceSize-1)) / int64(task.PieceSize))
if updateTaskInfo.SourceFileLength >= 0 {
task.TotalPieceCount = updateTaskInfo.TotalPieceCount
task.SourceFileLength = updateTaskInfo.SourceFileLength
}
if pieceTotal != 0 {
task.PieceTotal = pieceTotal
}
task.CdnStatus = updateTaskInfo.CdnStatus
return task, nil
return nil
}
// IsSame check whether the two task provided are the same
func IsSame(task1, task2 *types.SeedTask) bool {
// getTask get task from taskStore and convert it to *SeedTask type
func (tm *manager) getTask(taskID string) (*SeedTask, bool) {
task, ok := tm.taskStore.Load(taskID)
if !ok {
return nil, false
}
return task.(*SeedTask), true
}
func (tm *manager) deleteTask(taskID string) {
tm.accessTimeMap.Delete(taskID)
tm.taskURLUnreachableStore.Delete(taskID)
tm.taskStore.Delete(taskID)
}
// getTaskAccessTime get access time of task and convert it to time.Time type
func (tm *manager) getTaskAccessTime(taskID string) (time.Time, bool) {
access, ok := tm.accessTimeMap.Load(taskID)
if !ok {
return time.Time{}, false
}
return access.(time.Time), true
}
// getTaskUnreachableTime get unreachable time of task and convert it to time.Time type
func (tm *manager) getTaskUnreachableTime(taskID string) (time.Time, bool) {
unreachableTime, ok := tm.taskURLUnreachableStore.Load(taskID)
if !ok {
return time.Time{}, false
}
return unreachableTime.(time.Time), true
}
// IsSame check if task1 is same with task2
func IsSame(task1, task2 *SeedTask) bool {
if task1 == task2 {
return true
}
if task1.ID != task2.ID {
return false
}
if task1.TaskURL != task2.TaskURL {
return false
}
if !stringutils.IsBlank(task1.RequestDigest) && !stringutils.IsBlank(task2.RequestDigest) {
if task1.RequestDigest != task2.RequestDigest {
if task1.Range != task2.Range {
return false
}
if task1.Tag != task2.Tag {
return false
}
if !stringutils.IsBlank(task1.RequestDigest) && !stringutils.IsBlank(task2.SourceRealDigest) {
return task1.SourceRealDigest == task2.RequestDigest
if task1.Digest != task2.Digest {
return false
}
if task1.Filter != task2.Filter {
return false
}
return true
}

215
cdn/supervisor/task/task.go Normal file
View File

@ -0,0 +1,215 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package task
import (
"strings"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/util/net/urlutils"
"d7y.io/dragonfly/v2/pkg/util/rangeutils"
)
type SeedTask struct {
// ID of the task
ID string `json:"ID,omitempty"`
// RawURL is the resource's URL which user uses dfget to download. The location of URL can be anywhere, LAN or WAN.
// For image distribution, this is image layer's URL in image registry.
// The resource url is provided by dfget command line parameter.
RawURL string `json:"rawURL,omitempty"`
// TaskURL is generated from rawURL. rawURL may contain some queries or parameter, dfget will filter some queries via
// --filter parameter of dfget. The usage of it is that different rawURL may generate the same taskID.
TaskURL string `json:"taskURL,omitempty"`
// SourceFileLength is the length of the source file in bytes.
SourceFileLength int64 `json:"sourceFileLength,omitempty"`
// CdnFileLength is the length of the file stored on CDN
CdnFileLength int64 `json:"cdnFileLength,omitempty"`
// PieceSize is the size of pieces in bytes
PieceSize int32 `json:"pieceSize,omitempty"`
// CdnStatus is the status of the created task related to CDN functionality.
//
// Enum: [WAITING RUNNING FAILED SUCCESS SOURCE_ERROR]
CdnStatus string `json:"cdnStatus,omitempty"`
// TotalPieceCount is the total number of pieces
TotalPieceCount int32 `json:"totalPieceCount,omitempty"`
// SourceRealDigest when CDN finishes downloading file/image from the source location,
// the md5 sum of the source file will be calculated as the value of the SourceRealDigest.
// And it will be used to compare with RequestDigest value to check whether file is complete.
SourceRealDigest string `json:"sourceRealDigest,omitempty"`
// PieceMd5Sign Is the SHA256 signature of all pieces md5 signature
PieceMd5Sign string `json:"pieceMd5Sign,omitempty"`
// Digest checks integrity of url content, for example md5:xxx or sha256:yyy
Digest string `json:"digest,omitempty"`
// Tag identifies different task for same url, conflict with digest
Tag string `json:"tag,omitempty"`
// Range content range for url
Range string `json:"range,omitempty"`
// Filter url used to generate task id
Filter string `json:"filter,omitempty"`
// Header other url header infos
Header map[string]string `json:"header,omitempty"`
// Pieces pieces of task
Pieces map[uint32]*PieceInfo `json:"-"`
logger *logger.SugaredLoggerOnWith
}
type PieceInfo struct {
PieceNum uint32 `json:"piece_num"`
PieceMd5 string `json:"piece_md5"`
PieceRange *rangeutils.Range `json:"piece_range"`
OriginRange *rangeutils.Range `json:"origin_range"`
PieceLen uint32 `json:"piece_len"`
PieceStyle base.PieceStyle `json:"piece_style"`
}
const (
UnknownTotalPieceCount = -1
)
func NewSeedTask(taskID string, rawURL string, urlMeta *base.UrlMeta) *SeedTask {
if urlMeta == nil {
urlMeta = &base.UrlMeta{}
}
return &SeedTask{
ID: taskID,
RawURL: rawURL,
TaskURL: urlutils.FilterURLParam(rawURL, strings.Split(urlMeta.Filter, "&")),
SourceFileLength: source.UnknownSourceFileLen,
CdnFileLength: 0,
PieceSize: 0,
CdnStatus: StatusWaiting,
TotalPieceCount: UnknownTotalPieceCount,
SourceRealDigest: "",
PieceMd5Sign: "",
Digest: urlMeta.Digest,
Tag: urlMeta.Tag,
Range: urlMeta.Range,
Filter: urlMeta.Filter,
Header: urlMeta.Header,
Pieces: make(map[uint32]*PieceInfo),
logger: logger.WithTaskID(taskID),
}
}
func (task *SeedTask) Clone() *SeedTask {
cloneTask := new(SeedTask)
*cloneTask = *task
if task.Header != nil {
for key, value := range task.Header {
cloneTask.Header[key] = value
}
}
if len(task.Pieces) > 0 {
for pieceNum, piece := range task.Pieces {
cloneTask.Pieces[pieceNum] = piece
}
}
return cloneTask
}
// IsSuccess determines that whether the CDNStatus is success.
func (task *SeedTask) IsSuccess() bool {
return task.CdnStatus == StatusSuccess
}
// IsFrozen if task status is frozen
func (task *SeedTask) IsFrozen() bool {
return task.CdnStatus == StatusFailed ||
task.CdnStatus == StatusWaiting ||
task.CdnStatus == StatusSourceError
}
// IsWait if task status is wait
func (task *SeedTask) IsWait() bool {
return task.CdnStatus == StatusWaiting
}
// IsError if task status if fail
func (task *SeedTask) IsError() bool {
return task.CdnStatus == StatusFailed || task.CdnStatus == StatusSourceError
}
func (task *SeedTask) IsDone() bool {
return task.CdnStatus == StatusFailed || task.CdnStatus == StatusSuccess || task.CdnStatus == StatusSourceError
}
func (task *SeedTask) UpdateStatus(cdnStatus string) {
task.CdnStatus = cdnStatus
}
func (task *SeedTask) UpdateTaskInfo(cdnStatus, realDigest, pieceMd5Sign string, sourceFileLength, cdnFileLength int64) {
task.CdnStatus = cdnStatus
task.PieceMd5Sign = pieceMd5Sign
task.SourceRealDigest = realDigest
task.SourceFileLength = sourceFileLength
task.CdnFileLength = cdnFileLength
}
func (task *SeedTask) Log() *logger.SugaredLoggerOnWith {
if task.logger == nil {
task.logger = logger.WithTaskID(task.ID)
}
return task.logger
}
func (task *SeedTask) StartTrigger() {
task.CdnStatus = StatusRunning
task.Pieces = make(map[uint32]*PieceInfo)
}
const (
// StatusWaiting captures enum value "WAITING"
StatusWaiting string = "WAITING"
// StatusRunning captures enum value "RUNNING"
StatusRunning string = "RUNNING"
// StatusFailed captures enum value "FAILED"
StatusFailed string = "FAILED"
// StatusSuccess captures enum value "SUCCESS"
StatusSuccess string = "SUCCESS"
// StatusSourceError captures enum value "SOURCE_ERROR"
StatusSourceError string = "SOURCE_ERROR"
)
func IsEqual(task1, task2 SeedTask) bool {
return cmp.Equal(task1, task2, cmpopts.IgnoreFields(SeedTask{}, "Pieces"), cmpopts.IgnoreUnexported(SeedTask{}))
}

View File

@ -1,45 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//go:generate mockgen -destination ./mock/mock_task_mgr.go -package mock d7y.io/dragonfly/v2/cdn/supervisor SeedTaskMgr
package supervisor
import (
"context"
"d7y.io/dragonfly/v2/cdn/types"
)
// SeedTaskMgr as an interface defines all operations against SeedTask.
// A SeedTask will store some meta info about the taskFile, pieces and something else.
// A seedTask corresponds to three files on the disk, which are identified by taskId, the data file meta file piece file
type SeedTaskMgr interface {
// Register register seed task
Register(context.Context, *types.SeedTask) (pieceCh <-chan *types.SeedPiece, err error)
// Get get task Info with specified taskId.
Get(string) (*types.SeedTask, error)
// Exist check task existence with specified taskId.
Exist(string) (*types.SeedTask, bool)
// Delete delete a task.
Delete(string) error
// GetPieces
GetPieces(context.Context, string) (pieces []*types.SeedPiece, err error)
}

View File

@ -1,34 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package types
import "d7y.io/dragonfly/v2/pkg/util/rangeutils"
type SeedPiece struct {
PieceStyle PieceFormat `json:"piece_style"` // 0: PlainUnspecified
PieceNum uint32 `json:"piece_num"`
PieceMd5 string `json:"piece_md_5"`
PieceRange *rangeutils.Range `json:"piece_range"`
OriginRange *rangeutils.Range `json:"origin_range"`
PieceLen uint32 `json:"piece_len"`
}
type PieceFormat int8
const (
PlainUnspecified PieceFormat = 1
)

View File

@ -1,127 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package types
import (
"strings"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/util/net/urlutils"
)
type SeedTask struct {
TaskID string `json:"taskId,omitempty"`
URL string `json:"url,omitempty"`
TaskURL string `json:"taskUrl,omitempty"`
SourceFileLength int64 `json:"sourceFileLength,omitempty"`
CdnFileLength int64 `json:"cdnFileLength,omitempty"`
PieceSize int32 `json:"pieceSize,omitempty"`
Header map[string]string `json:"header,omitempty"`
CdnStatus string `json:"cdnStatus,omitempty"`
PieceTotal int32 `json:"pieceTotal,omitempty"`
RequestDigest string `json:"requestDigest,omitempty"`
SourceRealDigest string `json:"sourceRealDigest,omitempty"`
Range string `json:"range,omitempty"`
PieceMd5Sign string `json:"pieceMd5Sign,omitempty"`
logger *logger.SugaredLoggerOnWith
}
const (
IllegalSourceFileLen = -100
)
func NewSeedTask(taskID string, rawURL string, urlMeta *base.UrlMeta) *SeedTask {
if urlMeta == nil {
urlMeta = &base.UrlMeta{}
}
return &SeedTask{
TaskID: taskID,
Header: urlMeta.Header,
RequestDigest: urlMeta.Digest,
URL: rawURL,
TaskURL: urlutils.FilterURLParam(rawURL, strings.Split(urlMeta.Filter, "&")),
SourceFileLength: source.UnknownSourceFileLen,
CdnFileLength: 0,
PieceSize: 0,
Range: urlMeta.Range,
CdnStatus: TaskInfoCdnStatusWaiting,
logger: logger.WithTaskID(taskID),
}
}
// IsSuccess determines that whether the CDNStatus is success.
func (task *SeedTask) IsSuccess() bool {
return task.CdnStatus == TaskInfoCdnStatusSuccess
}
// IsFrozen if task status is frozen
func (task *SeedTask) IsFrozen() bool {
return task.CdnStatus == TaskInfoCdnStatusFailed || task.CdnStatus == TaskInfoCdnStatusWaiting || task.CdnStatus == TaskInfoCdnStatusSourceError
}
// IsWait if task status is wait
func (task *SeedTask) IsWait() bool {
return task.CdnStatus == TaskInfoCdnStatusWaiting
}
// IsError if task status if fail
func (task *SeedTask) IsError() bool {
return task.CdnStatus == TaskInfoCdnStatusFailed || task.CdnStatus == TaskInfoCdnStatusSourceError
}
func (task *SeedTask) IsDone() bool {
return task.CdnStatus == TaskInfoCdnStatusFailed || task.CdnStatus == TaskInfoCdnStatusSuccess || task.CdnStatus == TaskInfoCdnStatusSourceError
}
func (task *SeedTask) UpdateStatus(cdnStatus string) {
task.CdnStatus = cdnStatus
}
func (task *SeedTask) UpdateTaskInfo(cdnStatus, realDigest, pieceMd5Sign string, sourceFileLength, cdnFileLength int64) {
task.CdnStatus = cdnStatus
task.PieceMd5Sign = pieceMd5Sign
task.SourceRealDigest = realDigest
task.SourceFileLength = sourceFileLength
task.CdnFileLength = cdnFileLength
}
func (task *SeedTask) Log() *logger.SugaredLoggerOnWith {
if task.logger == nil {
task.logger = logger.WithTaskID(task.TaskID)
}
return task.logger
}
const (
// TaskInfoCdnStatusWaiting captures enum value "WAITING"
TaskInfoCdnStatusWaiting string = "WAITING"
// TaskInfoCdnStatusRunning captures enum value "RUNNING"
TaskInfoCdnStatusRunning string = "RUNNING"
// TaskInfoCdnStatusFailed captures enum value "FAILED"
TaskInfoCdnStatusFailed string = "FAILED"
// TaskInfoCdnStatusSuccess captures enum value "SUCCESS"
TaskInfoCdnStatusSuccess string = "SUCCESS"
// TaskInfoCdnStatusSourceError captures enum value "SOURCE_ERROR"
TaskInfoCdnStatusSourceError string = "SOURCE_ERROR"
)

View File

@ -1,26 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package types
// TaskRegisterRequest
type TaskRegisterRequest struct {
URL string `json:"rawURL,omitempty"`
TaskID string `json:"taskId,omitempty"`
Digest string `json:"digest,omitempty"`
Filter []string `json:"filter,omitempty"`
Header map[string]string `json:"header,omitempty"`
}

View File

@ -102,6 +102,6 @@ func runCdnSystem() error {
return err
}
dependency.SetupQuitSignalHandler(func() { svr.Stop() })
dependency.SetupQuitSignalHandler(func() { logger.Fatalf("stop server failed: %v", svr.Stop()) })
return svr.Serve()
}

View File

@ -59,6 +59,12 @@ func TestComputePieceSize(t *testing.T) {
length: 3100 * 1024 * 1024,
},
want: DefaultPieceSizeLimit,
}, {
name: "500M+ length",
args: args{
length: 552562021,
},
want: DefaultPieceSize + 3*1024*1024,
},
}
for _, tt := range tests {

View File

@ -1,230 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package syncmap
import (
"container/list"
"strconv"
"sync"
"time"
"github.com/pkg/errors"
"go.uber.org/atomic"
"d7y.io/dragonfly/v2/internal/dferrors"
"d7y.io/dragonfly/v2/pkg/util/stringutils"
)
// SyncMap is a thread-safe map providing generic support
type SyncMap struct {
*sync.Map
}
// NewSyncMap returns a new SyncMap.
func NewSyncMap() *SyncMap {
return &SyncMap{&sync.Map{}}
}
// Add adds a key-value pair into the *sync.Map.
// The ErrEmptyValue error will be returned if the key is empty.
func (mmap *SyncMap) Add(key string, value interface{}) error {
if stringutils.IsBlank(key) {
return errors.Wrap(dferrors.ErrEmptyValue, "key")
}
mmap.Store(key, value)
return nil
}
// Get returns result as interface{} according to the key.
// The ErrEmptyValue error will be returned if the key is empty.
// And the ErrDataNotFound error will be returned if the key cannot be found.
func (mmap *SyncMap) Get(key string) (interface{}, error) {
if stringutils.IsBlank(key) {
return nil, errors.Wrap(dferrors.ErrEmptyValue, "key")
}
if v, ok := mmap.Load(key); ok {
return v, nil
}
return nil, errors.Wrapf(dferrors.ErrDataNotFound, "get key %s from map", key)
}
// GetAsMap returns result as SyncMap.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsMap(key string) (*SyncMap, error) {
v, err := mmap.Get(key)
if err != nil {
return nil, errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(*SyncMap); ok {
return value, nil
}
return nil, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsList returns result as list
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsList(key string) (*list.List, error) {
v, err := mmap.Get(key)
if err != nil {
return list.New(), errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(*list.List); ok {
return value, nil
}
return nil, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsInt returns result as int.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsInt(key string) (int, error) {
v, err := mmap.Get(key)
if err != nil {
return 0, errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(int); ok {
return value, nil
}
return 0, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsInt64 returns result as int64.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsInt64(key string) (int64, error) {
v, err := mmap.Get(key)
if err != nil {
return 0, errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(int64); ok {
return value, nil
}
return 0, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsString returns result as string.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsString(key string) (string, error) {
v, err := mmap.Get(key)
if err != nil {
return "", errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(string); ok {
return value, nil
}
return "", errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsBool returns result as bool.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsBool(key string) (bool, error) {
v, err := mmap.Get(key)
if err != nil {
return false, errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(bool); ok {
return value, nil
}
return false, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsAtomicInt returns result as *AtomicInt.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsAtomicInt(key string) (*atomic.Int32, error) {
v, err := mmap.Get(key)
if err != nil {
return nil, errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(*atomic.Int32); ok {
return value, nil
}
return nil, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsTime returns result as Time.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsTime(key string) (time.Time, error) {
v, err := mmap.Get(key)
if err != nil {
return time.Now(), errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(time.Time); ok {
return value, nil
}
return time.Now(), errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// Remove deletes the key-value pair from the mmap.
// The ErrEmptyValue error will be returned if the key is empty.
// And the ErrDataNotFound error will be returned if the key cannot be found.
func (mmap *SyncMap) Remove(key string) error {
if stringutils.IsBlank(key) {
return errors.Wrap(dferrors.ErrEmptyValue, "key")
}
if _, ok := mmap.Load(key); !ok {
return errors.Wrapf(dferrors.ErrDataNotFound, "get key %s from map", key)
}
mmap.Delete(key)
return nil
}
// ListKeyAsStringSlice returns the list of keys as a string slice.
func (mmap *SyncMap) ListKeyAsStringSlice() (result []string) {
if mmap == nil {
return []string{}
}
rangeFunc := func(key, value interface{}) bool {
if v, ok := key.(string); ok {
result = append(result, v)
return true
}
return true
}
mmap.Range(rangeFunc)
return
}
// ListKeyAsIntSlice returns the list of keys as an int slice.
func (mmap *SyncMap) ListKeyAsIntSlice() (result []int) {
if mmap == nil {
return []int{}
}
rangeFunc := func(key, value interface{}) bool {
if v, ok := key.(string); ok {
if value, err := strconv.Atoi(v); err == nil {
result = append(result, value)
return true
}
}
return true
}
mmap.Range(rangeFunc)
return
}

View File

@ -14,14 +14,11 @@
* limitations under the License.
*/
package maputils
package gc
// DeepCopy copies the src to dst and return a non-nil dst map.
func DeepCopy(src map[string]string) map[string]string {
dst := make(map[string]string)
for k, v := range src {
dst[k] = v
}
return dst
type Config struct {
}
func (config Config) applyDefaults() Config {
return config
}

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
// Package timeutils provides utilities supplementing the standard 'time' package.
// Package structutils provides utilities supplementing the standard 'time' package.
package structutils
import (

View File

@ -42,7 +42,7 @@ type TaskManager interface {
Get(string) (*Task, bool)
// Delete task
Delete(string)
// Get or add task
// GetOrAdd or add task
GetOrAdd(*Task) (*Task, bool)
}