Adjust CDN module package (#893)

* adjust CDN code package structure

Signed-off-by: sunwp <244372610@qq.com>
This commit is contained in:
sunwp 2021-12-11 20:17:39 +08:00 committed by Gaius
parent 40a281a752
commit e252ef5880
No known key found for this signature in database
GPG Key ID: 8B4E5D1290FA2FFB
62 changed files with 2878 additions and 3375 deletions

View File

@ -19,33 +19,27 @@ package cdn
import ( import (
"context" "context"
"fmt" "fmt"
"net/http"
"runtime" "runtime"
"time"
"github.com/pkg/errors" "github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc" "google.golang.org/grpc"
"d7y.io/dragonfly/v2/cdn/config" "d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/gc"
"d7y.io/dragonfly/v2/cdn/metrics" "d7y.io/dragonfly/v2/cdn/metrics"
"d7y.io/dragonfly/v2/cdn/plugins" "d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/rpcserver" "d7y.io/dragonfly/v2/cdn/rpcserver"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn" "d7y.io/dragonfly/v2/cdn/supervisor/cdn"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/gc"
"d7y.io/dragonfly/v2/cdn/supervisor/progress" "d7y.io/dragonfly/v2/cdn/supervisor/progress"
"d7y.io/dragonfly/v2/cdn/supervisor/task" "d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/manager" "d7y.io/dragonfly/v2/pkg/rpc/manager"
managerclient "d7y.io/dragonfly/v2/pkg/rpc/manager/client" managerClient "d7y.io/dragonfly/v2/pkg/rpc/manager/client"
"d7y.io/dragonfly/v2/pkg/util/hostutils" "d7y.io/dragonfly/v2/pkg/util/hostutils"
"d7y.io/dragonfly/v2/pkg/util/net/iputils"
)
const (
gracefulStopTimeout = 10 * time.Second
) )
type Server struct { type Server struct {
@ -53,177 +47,168 @@ type Server struct {
config *config.Config config *config.Config
// GRPC server // GRPC server
grpcServer *grpc.Server grpcServer *rpcserver.Server
// Metrics server // Metrics server
metricsServer *http.Server metricsServer *metrics.Server
// Manager client // Manager client
managerClient managerclient.Client configServer managerClient.Client
// gc Server
gcServer *gc.Server
} }
// New creates a brand new server instance. // New creates a brand-new server instance.
func New(cfg *config.Config) (*Server, error) { func New(cfg *config.Config) (*Server, error) {
s := &Server{config: cfg}
if ok := storage.IsSupport(cfg.StorageMode); !ok { if ok := storage.IsSupport(cfg.StorageMode); !ok {
return nil, fmt.Errorf("os %s is not support storage mode %s", runtime.GOOS, cfg.StorageMode) return nil, fmt.Errorf("os %s is not support storage mode %s", runtime.GOOS, cfg.StorageMode)
} }
// Initialize plugins
if err := plugins.Initialize(cfg.Plugins); err != nil { if err := plugins.Initialize(cfg.Plugins); err != nil {
return nil, err return nil, errors.Wrapf(err, "init plugins")
}
// Initialize task manager
taskManager, err := task.NewManager(cfg)
if err != nil {
return nil, errors.Wrapf(err, "create task manager")
} }
// Initialize progress manager // Initialize progress manager
progressMgr, err := progress.NewManager() progressManager, err := progress.NewManager(taskManager)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "create progress manager") return nil, errors.Wrapf(err, "create progress manager")
} }
// Initialize storage manager // Initialize storage manager
storageMgr, ok := storage.Get(cfg.StorageMode) storageManager, ok := storage.Get(cfg.StorageMode)
if !ok { if !ok {
return nil, fmt.Errorf("can not find storage pattern %s", cfg.StorageMode) return nil, fmt.Errorf("can not find storage pattern %s", cfg.StorageMode)
} }
storageManager.Initialize(taskManager)
// Initialize CDN manager // Initialize CDN manager
cdnMgr, err := cdn.NewManager(cfg, storageMgr, progressMgr) cdnManager, err := cdn.NewManager(cfg, storageManager, progressManager, taskManager)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "create cdn manager") return nil, errors.Wrapf(err, "create cdn manager")
} }
// Initialize task manager // Initialize CDN service
taskMgr, err := task.NewManager(cfg, cdnMgr, progressMgr) service, err := supervisor.NewCDNService(taskManager, cdnManager, progressManager)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "create task manager") return nil, errors.Wrapf(err, "create cdn service")
} }
// Initialize storage manager
storageMgr.Initialize(taskMgr)
// Initialize storage manager // Initialize storage manager
var opts []grpc.ServerOption var opts []grpc.ServerOption
if s.config.Options.Telemetry.Jaeger != "" { if cfg.Options.Telemetry.Jaeger != "" {
opts = append(opts, grpc.ChainUnaryInterceptor(otelgrpc.UnaryServerInterceptor()), grpc.ChainStreamInterceptor(otelgrpc.StreamServerInterceptor())) opts = append(opts, grpc.ChainUnaryInterceptor(otelgrpc.UnaryServerInterceptor()), grpc.ChainStreamInterceptor(otelgrpc.StreamServerInterceptor()))
} }
grpcServer, err := rpcserver.New(cfg, taskMgr, opts...) grpcServer, err := rpcserver.New(cfg, service, opts...)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "create seedServer") return nil, errors.Wrap(err, "create rpcServer")
}
s.grpcServer = grpcServer
// Initialize prometheus
if cfg.Metrics != nil {
s.metricsServer = metrics.New(cfg.Metrics, grpcServer)
} }
// Initialize manager client // Initialize gc server
if cfg.Manager.Addr != "" { gcServer, err := gc.New()
managerClient, err := managerclient.New(cfg.Manager.Addr) if err != nil {
return nil, errors.Wrap(err, "create gcServer")
}
var metricsServer *metrics.Server
if cfg.Metrics != nil && cfg.Metrics.Addr != "" {
// Initialize metrics server
metricsServer, err = metrics.New(cfg.Metrics, grpcServer.Server)
if err != nil { if err != nil {
return nil, err return nil, errors.Wrap(err, "create metricsServer")
}
s.managerClient = managerClient
// Register to manager
if _, err := s.managerClient.UpdateCDN(&manager.UpdateCDNRequest{
SourceType: manager.SourceType_CDN_SOURCE,
HostName: hostutils.FQDNHostname,
Ip: s.config.AdvertiseIP,
Port: int32(s.config.ListenPort),
DownloadPort: int32(s.config.DownloadPort),
Idc: s.config.Host.IDC,
Location: s.config.Host.Location,
CdnClusterId: uint64(s.config.Manager.CDNClusterID),
}); err != nil {
return nil, err
} }
} }
return s, nil // Initialize configServer
var configServer managerClient.Client
if cfg.Manager.Addr != "" {
configServer, err = managerClient.New(cfg.Manager.Addr)
if err != nil {
return nil, errors.Wrap(err, "create configServer")
}
}
return &Server{
config: cfg,
grpcServer: grpcServer,
metricsServer: metricsServer,
configServer: configServer,
gcServer: gcServer,
}, nil
} }
func (s *Server) Serve() error { func (s *Server) Serve() error {
// Start GC go func() {
ctx, cancel := context.WithCancel(context.Background()) // Start GC
defer cancel() if err := s.gcServer.Serve(); err != nil {
if err := gc.StartGC(ctx); err != nil { logger.Fatalf("start gc task failed: %v", err)
return err }
} }()
// Started metrics server go func() {
if s.metricsServer != nil { if s.metricsServer != nil {
go func() { // Start metrics server
logger.Infof("started metrics server at %s", s.metricsServer.Addr) if err := s.metricsServer.ListenAndServe(s.metricsServer.Handler()); err != nil {
if err := s.metricsServer.ListenAndServe(); err != nil { logger.Fatalf("start metrics server failed: %v", err)
if err == http.ErrServerClosed {
return
}
logger.Fatalf("metrics server closed unexpect: %#v", err)
} }
}() }
} }()
// Serve Keepalive go func() {
if s.managerClient != nil { if s.configServer != nil {
go func() { CDNInstance, err := s.configServer.UpdateCDN(&manager.UpdateCDNRequest{
logger.Info("start keepalive to manager") SourceType: manager.SourceType_CDN_SOURCE,
s.managerClient.KeepAlive(s.config.Manager.KeepAlive.Interval, &manager.KeepAliveRequest{ HostName: hostutils.FQDNHostname,
Ip: s.config.AdvertiseIP,
Port: int32(s.config.ListenPort),
DownloadPort: int32(s.config.DownloadPort),
Idc: s.config.Host.IDC,
Location: s.config.Host.Location,
CdnClusterId: uint64(s.config.Manager.CDNClusterID),
})
if err != nil {
logger.Fatalf("update cdn instance failed: %v", err)
}
// Serve Keepalive
logger.Infof("====starting keepalive cdn instance %#v to manager %s====", CDNInstance)
s.configServer.KeepAlive(s.config.Manager.KeepAlive.Interval, &manager.KeepAliveRequest{
HostName: hostutils.FQDNHostname, HostName: hostutils.FQDNHostname,
SourceType: manager.SourceType_CDN_SOURCE, SourceType: manager.SourceType_CDN_SOURCE,
ClusterId: uint64(s.config.Manager.CDNClusterID), ClusterId: uint64(s.config.Manager.CDNClusterID),
}) })
}()
}
// Generate GRPC listener
var listen = iputils.IPv4
if s.config.AdvertiseIP != "" {
listen = s.config.AdvertiseIP
}
lis, _, err := rpc.ListenWithPortRange(listen, s.config.ListenPort, s.config.ListenPort)
if err != nil {
logger.Fatalf("net listener failed to start: %v", err)
}
defer lis.Close()
// Started GRPC server
logger.Infof("started grpc server at %s://%s", lis.Addr().Network(), lis.Addr().String())
if err := s.grpcServer.Serve(lis); err != nil {
logger.Errorf("stoped grpc server: %v", err)
return err
}
return nil
}
func (s *Server) Stop() {
// Stop manager client
if s.managerClient != nil {
s.managerClient.Close()
logger.Info("manager client closed")
}
// Stop metrics server
if s.metricsServer != nil {
if err := s.metricsServer.Shutdown(context.Background()); err != nil {
logger.Errorf("metrics server failed to stop: %#v", err)
} }
logger.Info("metrics server closed under request")
}
// Stop GRPC server
stopped := make(chan struct{})
go func() {
s.grpcServer.GracefulStop()
logger.Info("grpc server closed under request")
close(stopped)
}() }()
t := time.NewTimer(gracefulStopTimeout) // Start grpc server
select { return s.grpcServer.ListenAndServe()
case <-t.C: }
s.grpcServer.Stop()
case <-stopped: func (s *Server) Stop() error {
t.Stop() g, ctx := errgroup.WithContext(context.Background())
}
g.Go(func() error {
return s.gcServer.Shutdown()
})
if s.configServer != nil {
// Stop manager client
g.Go(func() error {
return s.configServer.Close()
})
}
g.Go(func() error {
// Stop metrics server
return s.metricsServer.Shutdown(ctx)
})
g.Go(func() error {
// Stop grpc server
return s.grpcServer.Shutdown()
})
return g.Wait()
} }

View File

@ -21,12 +21,9 @@ import (
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
"d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/plugins" "d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/storedriver" "d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/storedriver/local"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/disk"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/hybrid"
"d7y.io/dragonfly/v2/cmd/dependency/base" "d7y.io/dragonfly/v2/cmd/dependency/base"
"d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/unit"
"d7y.io/dragonfly/v2/pkg/util/net/iputils" "d7y.io/dragonfly/v2/pkg/util/net/iputils"
@ -60,13 +57,13 @@ func NewDefaultPlugins() map[plugins.PluginType][]*plugins.PluginProperties {
return map[plugins.PluginType][]*plugins.PluginProperties{ return map[plugins.PluginType][]*plugins.PluginProperties{
plugins.StorageDriverPlugin: { plugins.StorageDriverPlugin: {
{ {
Name: local.DiskDriverName, Name: "disk",
Enable: true, Enable: true,
Config: &storedriver.Config{ Config: &storedriver.Config{
BaseDir: DefaultDiskBaseDir, BaseDir: DefaultDiskBaseDir,
}, },
}, { }, {
Name: local.MemoryDriverName, Name: "memory",
Enable: false, Enable: false,
Config: &storedriver.Config{ Config: &storedriver.Config{
BaseDir: DefaultMemoryBaseDir, BaseDir: DefaultMemoryBaseDir,
@ -74,14 +71,14 @@ func NewDefaultPlugins() map[plugins.PluginType][]*plugins.PluginProperties {
}, },
}, plugins.StorageManagerPlugin: { }, plugins.StorageManagerPlugin: {
{ {
Name: disk.StorageMode, Name: "disk",
Enable: true, Enable: true,
Config: &storage.Config{ Config: &StorageConfig{
GCInitialDelay: 0 * time.Second, GCInitialDelay: 0 * time.Second,
GCInterval: 15 * time.Second, GCInterval: 15 * time.Second,
DriverConfigs: map[string]*storage.DriverConfig{ DriverConfigs: map[string]*DriverConfig{
local.DiskDriverName: { "disk": {
GCConfig: &storage.GCConfig{ GCConfig: &GCConfig{
YoungGCThreshold: 100 * unit.GB, YoungGCThreshold: 100 * unit.GB,
FullGCThreshold: 5 * unit.GB, FullGCThreshold: 5 * unit.GB,
CleanRatio: 1, CleanRatio: 1,
@ -90,22 +87,22 @@ func NewDefaultPlugins() map[plugins.PluginType][]*plugins.PluginProperties {
}, },
}, },
}, { }, {
Name: hybrid.StorageMode, Name: "hybrid",
Enable: false, Enable: false,
Config: &storage.Config{ Config: &StorageConfig{
GCInitialDelay: 0 * time.Second, GCInitialDelay: 0 * time.Second,
GCInterval: 15 * time.Second, GCInterval: 15 * time.Second,
DriverConfigs: map[string]*storage.DriverConfig{ DriverConfigs: map[string]*DriverConfig{
local.DiskDriverName: { "disk": {
GCConfig: &storage.GCConfig{ GCConfig: &GCConfig{
YoungGCThreshold: 100 * unit.GB, YoungGCThreshold: 100 * unit.GB,
FullGCThreshold: 5 * unit.GB, FullGCThreshold: 5 * unit.GB,
CleanRatio: 1, CleanRatio: 1,
IntervalThreshold: 2 * time.Hour, IntervalThreshold: 2 * time.Hour,
}, },
}, },
local.MemoryDriverName: { "memory": {
GCConfig: &storage.GCConfig{ GCConfig: &GCConfig{
YoungGCThreshold: 100 * unit.GB, YoungGCThreshold: 100 * unit.GB,
FullGCThreshold: 5 * unit.GB, FullGCThreshold: 5 * unit.GB,
CleanRatio: 3, CleanRatio: 3,
@ -122,25 +119,46 @@ func NewDefaultPlugins() map[plugins.PluginType][]*plugins.PluginProperties {
// NewDefaultBaseProperties creates an base properties instant with default values. // NewDefaultBaseProperties creates an base properties instant with default values.
func NewDefaultBaseProperties() *BaseProperties { func NewDefaultBaseProperties() *BaseProperties {
return &BaseProperties{ return &BaseProperties{
ListenPort: DefaultListenPort, ListenPort: constants.DefaultListenPort,
DownloadPort: DefaultDownloadPort, DownloadPort: constants.DefaultDownloadPort,
SystemReservedBandwidth: DefaultSystemReservedBandwidth, SystemReservedBandwidth: constants.DefaultSystemReservedBandwidth,
MaxBandwidth: DefaultMaxBandwidth, MaxBandwidth: constants.DefaultMaxBandwidth,
FailAccessInterval: DefaultFailAccessInterval,
GCInitialDelay: DefaultGCInitialDelay,
GCMetaInterval: DefaultGCMetaInterval,
TaskExpireTime: DefaultTaskExpireTime,
StorageMode: DefaultStorageMode,
AdvertiseIP: iputils.IPv4, AdvertiseIP: iputils.IPv4,
FailAccessInterval: constants.DefaultFailAccessInterval,
GCInitialDelay: constants.DefaultGCInitialDelay,
GCMetaInterval: constants.DefaultGCMetaInterval,
TaskExpireTime: constants.DefaultTaskExpireTime,
StorageMode: constants.DefaultStorageMode,
Manager: ManagerConfig{ Manager: ManagerConfig{
KeepAlive: KeepAliveConfig{ KeepAlive: KeepAliveConfig{
Interval: DefaultKeepAliveInterval, Interval: constants.DefaultKeepAliveInterval,
}, },
}, },
Host: HostConfig{}, Host: HostConfig{},
Metrics: &RestConfig{
Addr: ":8080",
},
} }
} }
type StorageConfig struct {
GCInitialDelay time.Duration `yaml:"gcInitialDelay"`
GCInterval time.Duration `yaml:"gcInterval"`
DriverConfigs map[string]*DriverConfig `yaml:"driverConfigs"`
}
type DriverConfig struct {
GCConfig *GCConfig `yaml:"gcConfig"`
}
// GCConfig gc config
type GCConfig struct {
YoungGCThreshold unit.Bytes `yaml:"youngGCThreshold"`
FullGCThreshold unit.Bytes `yaml:"fullGCThreshold"`
CleanRatio int `yaml:"cleanRatio"`
IntervalThreshold time.Duration `yaml:"intervalThreshold"`
}
// BaseProperties contains all basic properties of cdn system. // BaseProperties contains all basic properties of cdn system.
type BaseProperties struct { type BaseProperties struct {
// ListenPort is the port cdn server listens on. // ListenPort is the port cdn server listens on.

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package config package constants
import ( import (
"time" "time"

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package config package constants
import "go.opentelemetry.io/otel/attribute" import "go.opentelemetry.io/otel/attribute"
@ -24,7 +24,6 @@ const (
AttributePiecePacketResult = attribute.Key("d7y.piece.packet.result") AttributePiecePacketResult = attribute.Key("d7y.piece.packet.result")
AttributeTaskID = attribute.Key("d7y.task.id") AttributeTaskID = attribute.Key("d7y.task.id")
AttributeTaskStatus = attribute.Key("d7y.task.status") AttributeTaskStatus = attribute.Key("d7y.task.status")
AttributeTaskURL = attribute.Key("d7y.task.url")
AttributeTaskInfo = attribute.Key("d7y.taskInfo") AttributeTaskInfo = attribute.Key("d7y.taskInfo")
AttributeIfReuseTask = attribute.Key("d7y.task.already.exist") AttributeIfReuseTask = attribute.Key("d7y.task.already.exist")
AttributeSeedPiece = attribute.Key("d7y.seed.piece") AttributeSeedPiece = attribute.Key("d7y.seed.piece")
@ -48,7 +47,7 @@ const (
) )
const ( const (
EventHitUnReachableURL = "hit-unReachableURL" EventHitUnreachableURL = "hit-unreachableURL"
EventRequestSourceFileLength = "request-source-file-length" EventRequestSourceFileLength = "request-source-file-length"
EventDeleteUnReachableTask = "downloaded" EventDeleteUnReachableTask = "downloaded"
EventInitSeedProgress = "init-seed-progress" EventInitSeedProgress = "init-seed-progress"

View File

@ -1,161 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package errors
import (
"fmt"
"github.com/pkg/errors"
)
// ErrURLNotReachable represents the url is a not reachable.
type ErrURLNotReachable struct {
URL string
}
func (e ErrURLNotReachable) Error() string {
return fmt.Sprintf("url %s not reachable", e.URL)
}
// ErrTaskIDDuplicate represents the task id is in conflict.
type ErrTaskIDDuplicate struct {
TaskID string
Cause error
}
func (e ErrTaskIDDuplicate) Error() string {
return fmt.Sprintf("taskId %s conflict: %v", e.TaskID, e.Cause)
}
type ErrInconsistentValues struct {
Expected interface{}
Actual interface{}
}
func (e ErrInconsistentValues) Error() string {
return fmt.Sprintf("inconsistent number of pieces, expected %s, actual: %s", e.Expected, e.Actual)
}
// ErrResourceExpired represents the downloaded resource has expired
type ErrResourceExpired struct {
URL string
}
func (e ErrResourceExpired) Error() string {
return fmt.Sprintf("url %s expired", e.URL)
}
// ErrResourceNotSupportRangeRequest represents the downloaded resource does not support Range downloads
type ErrResourceNotSupportRangeRequest struct {
URL string
}
func (e ErrResourceNotSupportRangeRequest) Error() string {
return fmt.Sprintf("url %s does not support range request", e.URL)
}
// ErrFileNotExist represents the file is not exists
type ErrFileNotExist struct {
File string
}
func (e ErrFileNotExist) Error() string {
return fmt.Sprintf("file or dir %s not exist", e.File)
}
var (
// ErrSystemError represents the error is a system error.
ErrSystemError = errors.New("system error")
// ErrTaskDownloadFail represents an exception was encountered while downloading the file
ErrTaskDownloadFail = errors.New("resource download failed")
// ErrDataNotFound represents the data cannot be found.
ErrDataNotFound = errors.New("data not found")
// ErrInvalidValue represents the value is invalid.
ErrInvalidValue = errors.New("invalid value")
// ErrConvertFailed represents failed to convert.
ErrConvertFailed = errors.New("convert failed")
// ErrResourcesLacked represents a lack of resources, for example, the disk does not have enough space.
ErrResourcesLacked = errors.New("resources lacked")
)
// IsSystemError checks the error is a system error or not.
func IsSystemError(err error) bool {
return errors.Cause(err) == ErrSystemError
}
// IsURLNotReachable checks the error is a url not reachable or not.
func IsURLNotReachable(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrURLNotReachable)
return ok
}
// IsTaskIDDuplicate checks the error is a TaskIDDuplicate error or not.
func IsTaskIDDuplicate(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrTaskIDDuplicate)
return ok
}
func IsInconsistentValues(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrInconsistentValues)
return ok
}
func IsDownloadFail(err error) bool {
return errors.Cause(err) == ErrTaskDownloadFail
}
func IsResourceExpired(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrResourceExpired)
return ok
}
func IsResourceNotSupportRangeRequest(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrResourceNotSupportRangeRequest)
return ok
}
func IsDataNotFound(err error) bool {
return errors.Cause(err) == ErrDataNotFound
}
func IsInvalidValue(err error) bool {
return errors.Cause(err) == ErrInvalidValue
}
func IsConvertFailed(err error) bool {
return errors.Cause(err) == ErrConvertFailed
}
func IsFileNotExist(err error) bool {
err = errors.Cause(err)
_, ok := err.(ErrFileNotExist)
return ok
}
func IsResourcesLacked(err error) bool {
return errors.Cause(err) == ErrResourcesLacked
}

View File

@ -1,438 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package errors
import (
"fmt"
"testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/suite"
)
func TestErrorSuite(t *testing.T) {
suite.Run(t, new(ErrorTestSuite))
}
type ErrorTestSuite struct {
suite.Suite
}
func (s *ErrorTestSuite) TestIsConvertFailed() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrConvertFailed,
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(ErrConvertFailed, "wrap err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsConvertFailed(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsDataNotFound() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrDataNotFound,
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(ErrDataNotFound, "wrap err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsDataNotFound(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsDownloadFail() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrTaskDownloadFail,
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrTaskDownloadFail, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsDownloadFail(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsFileNotExist() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrFileNotExist{},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrFileNotExist{}, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsFileNotExist(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsInvalidValue() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrInvalidValue,
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrInvalidValue, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: true,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsInvalidValue(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsInconsistentValues() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrInconsistentValues{},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrInconsistentValues{}, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsInconsistentValues(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsResourceExpired() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrResourceExpired{},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrResourceExpired{}, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsResourceExpired(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsResourceNotSupportRangeRequest() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrResourceNotSupportRangeRequest{},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrResourceNotSupportRangeRequest{}, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsResourceNotSupportRangeRequest(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsSystemError() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrSystemError,
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(errors.Wrapf(ErrSystemError, "wrap err"), "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsSystemError(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsTaskIDDuplicate() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrTaskIDDuplicate{
TaskID: "test",
Cause: fmt.Errorf("test"),
},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(ErrTaskIDDuplicate{
TaskID: "test",
Cause: fmt.Errorf("test")}, "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsTaskIDDuplicate(tt.args.err))
})
}
}
func (s *ErrorTestSuite) TestIsURLNotReachable() {
type args struct {
err error
}
tests := []struct {
name string
args args
want bool
}{
{
name: "equal",
args: args{
err: ErrURLNotReachable{
URL: "test",
},
},
want: true,
}, {
name: "wrap",
args: args{
err: errors.Wrapf(ErrURLNotReachable{
URL: "test",
}, "wapp err"),
},
want: true,
}, {
name: "notEqual",
args: args{
err: errors.Wrapf(ErrInvalidValue, "invaid"),
},
want: false,
},
}
for _, tt := range tests {
s.Run(tt.name, func() {
s.Equal(tt.want, IsURLNotReachable(tt.args.err))
})
}
}

View File

@ -17,7 +17,6 @@
package gc package gc
import ( import (
"context"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -25,6 +24,54 @@ import (
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
) )
type Server struct {
done chan struct{}
wg *sync.WaitGroup
}
func New() (*Server, error) {
return &Server{
done: make(chan struct{}),
wg: new(sync.WaitGroup),
}, nil
}
func (server *Server) Serve() error {
logger.Info("====starting gc jobs====")
for name, executorWrapper := range gcExecutorWrappers {
server.wg.Add(1)
// start a goroutine to gc
go func(name string, wrapper *ExecutorWrapper) {
defer server.wg.Done()
logger.Debugf("start %s gc mission gc initialDelay: %s, gc initial interval: %s", name, wrapper.gcInitialDelay, wrapper.gcInterval)
// delay executing GC after initialDelay
time.Sleep(wrapper.gcInitialDelay)
// execute the GC by fixed delay
ticker := time.NewTicker(wrapper.gcInterval)
for {
select {
case <-server.done:
logger.Infof("exit %s gc task", name)
return
case <-ticker.C:
if err := wrapper.gcExecutor.GC(); err != nil {
logger.Errorf("%s gc task execute failed: %v", name, err)
}
}
}
}(name, executorWrapper)
}
server.wg.Wait()
return nil
}
func (server *Server) Shutdown() error {
defer logger.Infof("====stopped gc server====")
server.done <- struct{}{}
server.wg.Wait()
return nil
}
type Executor interface { type Executor interface {
GC() error GC() error
} }
@ -46,36 +93,5 @@ func Register(name string, gcInitialDelay time.Duration, gcInterval time.Duratio
gcInterval: gcInterval, gcInterval: gcInterval,
gcExecutor: gcExecutor, gcExecutor: gcExecutor,
} }
} logger.Infof("register %s gc task, gcInitialDelay %s, gcInterval %s", name, gcInitialDelay, gcInterval)
// StartGC starts to do the gc jobs.
func StartGC(ctx context.Context) error {
logger.Debugf("====start the gc jobs====")
var wg sync.WaitGroup
for name, executorWrapper := range gcExecutorWrappers {
wg.Add(1)
// start a goroutine to gc
go func(name string, wrapper *ExecutorWrapper) {
logger.Debugf("start the %s gc task", name)
// delay to execute GC after initialDelay
time.Sleep(wrapper.gcInitialDelay)
wg.Done()
// execute the GC by fixed delay
ticker := time.NewTicker(wrapper.gcInterval)
for {
select {
case <-ctx.Done():
logger.Infof("exit %s gc task", name)
return
case <-ticker.C:
if err := wrapper.gcExecutor.GC(); err != nil {
logger.Errorf("%s gc task execute failed: %v", name, err)
}
}
}
}(name, executorWrapper)
}
wg.Wait()
logger.Debugf("====all gc jobs have been launched====")
return nil
} }

View File

@ -17,16 +17,21 @@
package metrics package metrics
import ( import (
"context"
"net"
"net/http" "net/http"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc" "google.golang.org/grpc"
"gopkg.in/yaml.v3"
"d7y.io/dragonfly/v2/cdn/config" "d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/internal/constants" "d7y.io/dragonfly/v2/internal/constants"
logger "d7y.io/dragonfly/v2/internal/dflog"
) )
// Variables declared for metrics. // Variables declared for metrics.
@ -60,14 +65,49 @@ var (
}) })
) )
func New(cfg *config.RestConfig, grpcServer *grpc.Server) *http.Server { type Server struct {
grpc_prometheus.Register(grpcServer) config *config.RestConfig
httpServer *http.Server
}
func New(config *config.RestConfig, rpcServer *grpc.Server) (*Server, error) {
// scheduler config values
s, err := yaml.Marshal(config)
if err != nil {
return nil, errors.Wrap(err, "marshal metrics server config")
}
logger.Infof("metrics server config: \n%s", s)
grpc_prometheus.Register(rpcServer)
return &Server{
config: config,
httpServer: &http.Server{},
}, nil
}
// Handler returns an http handler for the blob server.
func (s *Server) Handler() http.Handler {
mux := http.NewServeMux() mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler()) mux.Handle("/metrics", promhttp.Handler())
return mux
return &http.Server{ }
Addr: cfg.Addr,
Handler: mux, // ListenAndServe is a blocking call which runs s.
} func (s *Server) ListenAndServe(h http.Handler) error {
l, err := net.Listen("tcp", s.config.Addr)
if err != nil {
return err
}
s.httpServer.Handler = h
logger.Infof("====starting metrics server at %s====", s.config.Addr)
err = s.httpServer.Serve(l)
if errors.Is(err, http.ErrServerClosed) {
return nil
}
return err
}
func (s *Server) Shutdown(ctx context.Context) error {
defer logger.Infof("====stopped metrics server====")
return s.httpServer.Shutdown(ctx)
} }

View File

@ -38,7 +38,7 @@ func NewRepository() Repository {
// Manager manages all plugin builders and plugin instants. // Manager manages all plugin builders and plugin instants.
type Manager interface { type Manager interface {
// GetBuilder adds a Builder object with the giving plugin type and name. // AddBuilder adds a Builder object with the giving plugin type and name.
AddBuilder(pt PluginType, name string, b Builder) error AddBuilder(pt PluginType, name string, b Builder) error
// GetBuilder returns a Builder object with the giving plugin type and name. // GetBuilder returns a Builder object with the giving plugin type and name.

View File

@ -18,63 +18,70 @@ package rpcserver
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"time"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/peer"
"d7y.io/dragonfly/v2/cdn/config" "d7y.io/dragonfly/v2/cdn/config"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors" "d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/supervisor" "d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/types" "d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/internal/dferrors" "d7y.io/dragonfly/v2/internal/dferrors"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/internal/idgen" "d7y.io/dragonfly/v2/internal/idgen"
"d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem" "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
cdnserver "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/server" cdnserver "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/server"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/util/hostutils" "d7y.io/dragonfly/v2/pkg/util/hostutils"
) )
var tracer = otel.Tracer("cdn-server") var tracer = otel.Tracer("cdn-server")
type server struct { type Server struct {
*grpc.Server *grpc.Server
taskMgr supervisor.SeedTaskMgr config *config.Config
cfg *config.Config service supervisor.CDNService
} }
// New returns a new Manager Object. // New returns a new Manager Object.
func New(cfg *config.Config, taskMgr supervisor.SeedTaskMgr, opts ...grpc.ServerOption) (*grpc.Server, error) { func New(config *config.Config, cdnService supervisor.CDNService, opts ...grpc.ServerOption) (*Server, error) {
svr := &server{ svr := &Server{
taskMgr: taskMgr, config: config,
cfg: cfg, service: cdnService,
} }
svr.Server = cdnserver.New(svr, opts...) svr.Server = cdnserver.New(svr, opts...)
return svr.Server, nil return svr, nil
} }
func (css *server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest, psc chan<- *cdnsystem.PieceSeed) (err error) { func (css *Server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest, psc chan<- *cdnsystem.PieceSeed) (err error) {
clientAddr := "unknown"
if pe, ok := peer.FromContext(ctx); ok {
clientAddr = pe.Addr.String()
}
logger.Infof("trigger obtain seed for taskID: %s, url: %s, urlMeta: %+v client: %s", req.TaskId, req.Url, req.UrlMeta, clientAddr)
var span trace.Span var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanObtainSeeds, trace.WithSpanKind(trace.SpanKindServer)) ctx, span = tracer.Start(ctx, constants.SpanObtainSeeds, trace.WithSpanKind(trace.SpanKindServer))
defer span.End() defer span.End()
span.SetAttributes(config.AttributeObtainSeedsRequest.String(req.String())) span.SetAttributes(constants.AttributeObtainSeedsRequest.String(req.String()))
span.SetAttributes(config.AttributeTaskID.String(req.TaskId)) span.SetAttributes(constants.AttributeTaskID.String(req.TaskId))
logger.Infof("obtain seeds request: %#v", req)
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
err = dferrors.Newf(base.Code_UnknownError, "obtain task(%s) seeds encounter an panic: %v", req.TaskId, r) err = dferrors.Newf(base.Code_UnknownError, "obtain task(%s) seeds encounter an panic: %v", req.TaskId, r)
span.RecordError(err) span.RecordError(err)
logger.WithTaskID(req.TaskId).Errorf("%v", err) logger.WithTaskID(req.TaskId).Errorf("%v", err)
} }
logger.Infof("seeds task %s result success: %t", req.TaskId, err == nil)
}() }()
// register task // register seed task
pieceChan, err := css.taskMgr.Register(ctx, types.NewSeedTask(req.TaskId, req.Url, req.UrlMeta)) pieceChan, err := css.service.RegisterSeedTask(ctx, clientAddr, task.NewSeedTask(req.TaskId, req.Url, req.UrlMeta))
if err != nil { if err != nil {
if cdnerrors.IsResourcesLacked(err) { if supervisor.IsResourcesLacked(err) {
err = dferrors.Newf(base.Code_ResourceLacked, "resources lacked for task(%s): %v", req.TaskId, err) err = dferrors.Newf(base.Code_ResourceLacked, "resources lacked for task(%s): %v", req.TaskId, err)
span.RecordError(err) span.RecordError(err)
return err return err
@ -83,90 +90,113 @@ func (css *server) ObtainSeeds(ctx context.Context, req *cdnsystem.SeedRequest,
span.RecordError(err) span.RecordError(err)
return err return err
} }
peerID := idgen.CDNPeerID(css.cfg.AdvertiseIP) peerID := idgen.CDNPeerID(css.config.AdvertiseIP)
hostID := idgen.CDNHostID(hostutils.FQDNHostname, int32(css.config.ListenPort))
for piece := range pieceChan { for piece := range pieceChan {
psc <- &cdnsystem.PieceSeed{ pieceSeed := &cdnsystem.PieceSeed{
PeerId: peerID, PeerId: peerID,
HostUuid: idgen.CDNHostID(hostutils.FQDNHostname, int32(css.cfg.ListenPort)), HostUuid: hostID,
PieceInfo: &base.PieceInfo{ PieceInfo: &base.PieceInfo{
PieceNum: int32(piece.PieceNum), PieceNum: int32(piece.PieceNum),
RangeStart: piece.PieceRange.StartIndex, RangeStart: piece.PieceRange.StartIndex,
RangeSize: piece.PieceLen, RangeSize: piece.PieceLen,
PieceMd5: piece.PieceMd5, PieceMd5: piece.PieceMd5,
PieceOffset: piece.OriginRange.StartIndex, PieceOffset: piece.OriginRange.StartIndex,
PieceStyle: base.PieceStyle(piece.PieceStyle), PieceStyle: piece.PieceStyle,
}, },
Done: false, Done: false,
ContentLength: source.UnknownSourceFileLen,
TotalPieceCount: task.UnknownTotalPieceCount,
} }
psc <- pieceSeed
jsonPiece, err := json.Marshal(pieceSeed)
if err != nil {
logger.Errorf("failed to json marshal seed piece: %v", err)
}
logger.Debugf("send piece seed: %s to client: %s", jsonPiece, clientAddr)
} }
task, err := css.taskMgr.Get(req.TaskId) seedTask, err := css.service.GetSeedTask(req.TaskId)
if err != nil { if err != nil {
err = dferrors.Newf(base.Code_CDNError, "failed to get task(%s): %v", req.TaskId, err)
if task.IsTaskNotFound(err) {
err = dferrors.Newf(base.Code_CDNTaskNotFound, "failed to get task(%s): %v", req.TaskId, err)
span.RecordError(err)
return err
}
err = dferrors.Newf(base.Code_CDNError, "failed to get task(%s): %v", req.TaskId, err) err = dferrors.Newf(base.Code_CDNError, "failed to get task(%s): %v", req.TaskId, err)
span.RecordError(err) span.RecordError(err)
return err return err
} }
if !task.IsSuccess() { if !seedTask.IsSuccess() {
err = dferrors.Newf(base.Code_CDNTaskDownloadFail, "task(%s) status error , status: %s", req.TaskId, task.CdnStatus) err = dferrors.Newf(base.Code_CDNTaskDownloadFail, "task(%s) status error , status: %s", req.TaskId, seedTask.CdnStatus)
span.RecordError(err) span.RecordError(err)
return err return err
} }
psc <- &cdnsystem.PieceSeed{ pieceSeed := &cdnsystem.PieceSeed{
PeerId: peerID, PeerId: peerID,
HostUuid: idgen.CDNHostID(hostutils.FQDNHostname, int32(css.cfg.ListenPort)), HostUuid: hostID,
Done: true, Done: true,
ContentLength: task.SourceFileLength, ContentLength: seedTask.SourceFileLength,
TotalPieceCount: task.PieceTotal, TotalPieceCount: seedTask.TotalPieceCount,
} }
psc <- pieceSeed
jsonPiece, err := json.Marshal(pieceSeed)
if err != nil {
logger.Errorf("failed to json marshal seed piece: %v", err)
}
logger.Debugf("send piece seed: %s to client: %s", jsonPiece, clientAddr)
return nil return nil
} }
func (css *server) GetPieceTasks(ctx context.Context, req *base.PieceTaskRequest) (piecePacket *base.PiecePacket, err error) { func (css *Server) GetPieceTasks(ctx context.Context, req *base.PieceTaskRequest) (piecePacket *base.PiecePacket, err error) {
var span trace.Span var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanGetPieceTasks, trace.WithSpanKind(trace.SpanKindServer)) _, span = tracer.Start(ctx, constants.SpanGetPieceTasks, trace.WithSpanKind(trace.SpanKindServer))
defer span.End() defer span.End()
span.SetAttributes(config.AttributeGetPieceTasksRequest.String(req.String())) span.SetAttributes(constants.AttributeGetPieceTasksRequest.String(req.String()))
span.SetAttributes(config.AttributeTaskID.String(req.TaskId)) span.SetAttributes(constants.AttributeTaskID.String(req.TaskId))
logger.Infof("get piece tasks: %#v", req)
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
err = dferrors.Newf(base.Code_UnknownError, "get task(%s) piece tasks encounter an panic: %v", req.TaskId, r) err = dferrors.Newf(base.Code_UnknownError, "get task(%s) piece tasks encounter an panic: %v", req.TaskId, r)
span.RecordError(err) span.RecordError(err)
logger.WithTaskID(req.TaskId).Errorf("%v", err) logger.WithTaskID(req.TaskId).Errorf("get piece tasks failed: %v", err)
} }
logger.WithTaskID(req.TaskId).Infof("get piece tasks result success: %t", err == nil)
}() }()
logger.Infof("get piece tasks: %#v", req) logger.Infof("get piece tasks: %#v", req)
task, err := css.taskMgr.Get(req.TaskId) seedTask, err := css.service.GetSeedTask(req.TaskId)
if err != nil { if err != nil {
if cdnerrors.IsDataNotFound(err) { if task.IsTaskNotFound(err) {
err = dferrors.Newf(base.Code_CDNTaskNotFound, "failed to get task(%s) from cdn: %v", req.TaskId, err) err = dferrors.Newf(base.Code_CDNTaskNotFound, "failed to get task(%s): %v", req.TaskId, err)
span.RecordError(err) span.RecordError(err)
return nil, err return nil, err
} }
err = dferrors.Newf(base.Code_CDNError, "failed to get task(%s) from cdn: %v", req.TaskId, err) err = dferrors.Newf(base.Code_CDNError, "failed to get task(%s): %v", req.TaskId, err)
span.RecordError(err) span.RecordError(err)
return nil, err return nil, err
} }
if task.IsError() { if seedTask.IsError() {
err = dferrors.Newf(base.Code_CDNTaskDownloadFail, "fail to download task(%s), cdnStatus: %s", task.TaskID, task.CdnStatus) err = dferrors.Newf(base.Code_CDNTaskDownloadFail, "task(%s) status is FAIL, cdnStatus: %s", seedTask.ID, seedTask.CdnStatus)
span.RecordError(err) span.RecordError(err)
return nil, err return nil, err
} }
pieces, err := css.taskMgr.GetPieces(ctx, req.TaskId) pieces, err := css.service.GetSeedPieces(req.TaskId)
if err != nil { if err != nil {
err = dferrors.Newf(base.Code_CDNError, "failed to get pieces of task(%s) from cdn: %v", task.TaskID, err) err = dferrors.Newf(base.Code_CDNError, "failed to get pieces of task(%s) from cdn: %v", seedTask.ID, err)
span.RecordError(err) span.RecordError(err)
return nil, err return nil, err
} }
pieceInfos := make([]*base.PieceInfo, 0) pieceInfos := make([]*base.PieceInfo, 0, len(pieces))
var count uint32 = 0 var count uint32 = 0
for _, piece := range pieces { for _, piece := range pieces {
if piece.PieceNum >= req.StartNum && (count < req.Limit || req.Limit == 0) { if piece.PieceNum >= req.StartNum && (count < req.Limit || req.Limit <= 0) {
p := &base.PieceInfo{ p := &base.PieceInfo{
PieceNum: int32(piece.PieceNum), PieceNum: int32(piece.PieceNum),
RangeStart: piece.PieceRange.StartIndex, RangeStart: piece.PieceRange.StartIndex,
RangeSize: piece.PieceLen, RangeSize: piece.PieceLen,
PieceMd5: piece.PieceMd5, PieceMd5: piece.PieceMd5,
PieceOffset: piece.OriginRange.StartIndex, PieceOffset: piece.OriginRange.StartIndex,
PieceStyle: base.PieceStyle(piece.PieceStyle), PieceStyle: piece.PieceStyle,
} }
pieceInfos = append(pieceInfos, p) pieceInfos = append(pieceInfos, p)
count++ count++
@ -175,12 +205,43 @@ func (css *server) GetPieceTasks(ctx context.Context, req *base.PieceTaskRequest
pp := &base.PiecePacket{ pp := &base.PiecePacket{
TaskId: req.TaskId, TaskId: req.TaskId,
DstPid: req.DstPid, DstPid: req.DstPid,
DstAddr: fmt.Sprintf("%s:%d", css.cfg.AdvertiseIP, css.cfg.DownloadPort), DstAddr: fmt.Sprintf("%s:%d", css.config.AdvertiseIP, css.config.DownloadPort),
PieceInfos: pieceInfos, PieceInfos: pieceInfos,
TotalPiece: task.PieceTotal, TotalPiece: seedTask.TotalPieceCount,
ContentLength: task.SourceFileLength, ContentLength: seedTask.SourceFileLength,
PieceMd5Sign: task.PieceMd5Sign, PieceMd5Sign: seedTask.PieceMd5Sign,
} }
span.SetAttributes(config.AttributePiecePacketResult.String(pp.String())) span.SetAttributes(constants.AttributePiecePacketResult.String(pp.String()))
return pp, nil return pp, nil
} }
func (css *Server) ListenAndServe() error {
// Generate GRPC listener
lis, _, err := rpc.ListenWithPortRange(css.config.AdvertiseIP, css.config.ListenPort, css.config.ListenPort)
if err != nil {
return err
}
//Started GRPC server
logger.Infof("====starting grpc server at %s://%s====", lis.Addr().Network(), lis.Addr().String())
return css.Server.Serve(lis)
}
const (
gracefulStopTimeout = 10 * time.Second
)
func (css *Server) Shutdown() error {
defer logger.Infof("====stopped rpc server====")
stopped := make(chan struct{})
go func() {
css.Server.GracefulStop()
close(stopped)
}()
select {
case <-time.After(gracefulStopTimeout):
css.Server.Stop()
case <-stopped:
}
return nil
}

View File

@ -1,122 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rpcserver
import (
"context"
"reflect"
"testing"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem"
)
func TestCdnSeedServer_GetPieceTasks(t *testing.T) {
type fields struct {
taskMgr supervisor.SeedTaskMgr
cfg *config.Config
}
type args struct {
ctx context.Context
req *base.PieceTaskRequest
}
tests := []struct {
name string
fields fields
args args
wantPiecePacket *base.PiecePacket
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
css := &server{
taskMgr: tt.fields.taskMgr,
cfg: tt.fields.cfg,
}
gotPiecePacket, err := css.GetPieceTasks(tt.args.ctx, tt.args.req)
if (err != nil) != tt.wantErr {
t.Errorf("GetPieceTasks() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(gotPiecePacket, tt.wantPiecePacket) {
t.Errorf("GetPieceTasks() gotPiecePacket = %v, want %v", gotPiecePacket, tt.wantPiecePacket)
}
})
}
}
func TestCdnSeedServer_ObtainSeeds(t *testing.T) {
type fields struct {
taskMgr supervisor.SeedTaskMgr
cfg *config.Config
}
type args struct {
ctx context.Context
req *cdnsystem.SeedRequest
psc chan<- *cdnsystem.PieceSeed
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
css := &server{
taskMgr: tt.fields.taskMgr,
cfg: tt.fields.cfg,
}
if err := css.ObtainSeeds(tt.args.ctx, tt.args.req, tt.args.psc); (err != nil) != tt.wantErr {
t.Errorf("ObtainSeeds() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestNewCdnSeedServer(t *testing.T) {
type args struct {
cfg *config.Config
taskMgr supervisor.SeedTaskMgr
}
tests := []struct {
name string
args args
want *server
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := New(tt.args.cfg, tt.args.taskMgr)
if (err != nil) != tt.wantErr {
t.Errorf("NewCdnSeedServer() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewCdnSeedServer() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
//go:generate mockgen -destination ./mock_driver.go -package storedriver d7y.io/dragonfly/v2/cdn/storedriver Driver //go:generate mockgen -destination ./mock_driver.go -package storedriver d7y.io/dragonfly/v2/cdn/storedriver Driver
package storedriver package storedriver
@ -21,16 +22,53 @@ import (
"fmt" "fmt"
"io" "io"
"path/filepath" "path/filepath"
"strings"
"time" "time"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors" "github.com/pkg/errors"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/plugins" "d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/unit"
"d7y.io/dragonfly/v2/pkg/util/fileutils"
"d7y.io/dragonfly/v2/pkg/util/stringutils" "d7y.io/dragonfly/v2/pkg/util/stringutils"
) )
// DriverBuilder is a function that creates a new storage driver plugin instant with the giving Config.
type DriverBuilder func(cfg *Config) (Driver, error)
// Register defines an interface to register a driver with specified name.
// All drivers should call this function to register itself to the driverFactory.
func Register(name string, builder DriverBuilder) error {
name = strings.ToLower(name)
// plugin builder
var f = func(conf interface{}) (plugins.Plugin, error) {
cfg := &Config{}
if err := mapstructure.Decode(conf, cfg); err != nil {
return nil, fmt.Errorf("parse config: %v", err)
}
// prepare the base dir
if !filepath.IsAbs(cfg.BaseDir) {
return nil, fmt.Errorf("not absolute path: %s", cfg.BaseDir)
}
if err := fileutils.MkdirAll(cfg.BaseDir); err != nil {
return nil, fmt.Errorf("create baseDir %s: %v", cfg.BaseDir, err)
}
return newDriverPlugin(name, builder, cfg)
}
return plugins.RegisterPluginBuilder(plugins.StorageDriverPlugin, name, f)
}
// Get a store from manager with specified name.
func Get(name string) (Driver, bool) {
v, ok := plugins.GetPlugin(plugins.StorageDriverPlugin, strings.ToLower(name))
if !ok {
return nil, false
}
return v.(*driverPlugin).instance, true
}
// Driver defines an interface to manage the data stored in the driver. // Driver defines an interface to manage the data stored in the driver.
// //
// NOTE: // NOTE:
@ -44,7 +82,7 @@ type Driver interface {
// Otherwise, just return the data which starts from raw.offset and the length is raw.length. // Otherwise, just return the data which starts from raw.offset and the length is raw.length.
Get(raw *Raw) (io.ReadCloser, error) Get(raw *Raw) (io.ReadCloser, error)
// Get data from the storage based on raw information. // GetBytes data from the storage based on raw information.
// The data should be returned in bytes. // The data should be returned in bytes.
// If the length<=0, the storage driver should return all data from the raw.offset. // If the length<=0, the storage driver should return all data from the raw.offset.
// Otherwise, just return the data which starts from raw.offset and the length is raw.length. // Otherwise, just return the data which starts from raw.offset and the length is raw.length.
@ -68,33 +106,33 @@ type Driver interface {
// If not, return the ErrFileNotExist. // If not, return the ErrFileNotExist.
Stat(raw *Raw) (*StorageInfo, error) Stat(raw *Raw) (*StorageInfo, error)
// GetFreeSpace returns the available disk space in B. // GetFreeSpace returns the free disk space in B.
GetFreeSpace() (unit.Bytes, error) GetFreeSpace() (unit.Bytes, error)
// GetTotalAndFreeSpace // GetTotalAndFreeSpace returns the total and free disk space in B.
GetTotalAndFreeSpace() (unit.Bytes, unit.Bytes, error) GetTotalAndFreeSpace() (unit.Bytes, unit.Bytes, error)
// GetTotalSpace // GetTotalSpace returns the total disk space in B.
GetTotalSpace() (unit.Bytes, error) GetTotalSpace() (unit.Bytes, error)
// Walk walks the file tree rooted at root which determined by raw.Bucket and raw.Key, // Walk walks the file tree rooted at root which determined by raw.Bucket and raw.Key,
// calling walkFn for each file or directory in the tree, including root. // calling walkFn for each file or directory in the tree, including root.
Walk(raw *Raw) error Walk(raw *Raw) error
// CreateBaseDir // CreateBaseDir create base dir
CreateBaseDir() error CreateBaseDir() error
// GetPath // GetPath get path of raw
GetPath(raw *Raw) string GetPath(raw *Raw) string
// MoveFile // MoveFile rename src to dst
MoveFile(src string, dst string) error MoveFile(src string, dst string) error
// Exits // Exits check if raw exists
Exits(raw *Raw) bool Exits(raw *Raw) bool
// GetHomePath // GetBaseDir returns base dir
GetHomePath() string GetBaseDir() string
} }
type Config struct { type Config struct {
@ -163,12 +201,10 @@ func (s *driverPlugin) Name() string {
return s.name return s.name
} }
// GetTotalSpace
func (s *driverPlugin) GetTotalSpace() (unit.Bytes, error) { func (s *driverPlugin) GetTotalSpace() (unit.Bytes, error) {
return s.instance.GetTotalSpace() return s.instance.GetTotalSpace()
} }
// CreateBaseDir
func (s *driverPlugin) CreateBaseDir() error { func (s *driverPlugin) CreateBaseDir() error {
return s.instance.CreateBaseDir() return s.instance.CreateBaseDir()
} }
@ -225,7 +261,7 @@ func (s *driverPlugin) PutBytes(raw *Raw, data []byte) error {
func (s *driverPlugin) Remove(raw *Raw) error { func (s *driverPlugin) Remove(raw *Raw) error {
if raw == nil || (stringutils.IsBlank(raw.Key) && if raw == nil || (stringutils.IsBlank(raw.Key) &&
stringutils.IsBlank(raw.Bucket)) { stringutils.IsBlank(raw.Bucket)) {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "cannot set both key and bucket empty at the same time") return errors.New("both key and bucket are empty")
} }
return s.instance.Remove(raw) return s.instance.Remove(raw)
} }
@ -259,13 +295,13 @@ func (s *driverPlugin) GetFreeSpace() (unit.Bytes, error) {
return s.instance.GetFreeSpace() return s.instance.GetFreeSpace()
} }
func (s *driverPlugin) GetHomePath() string { func (s *driverPlugin) GetBaseDir() string {
return s.instance.GetHomePath() return s.instance.GetBaseDir()
} }
func checkEmptyKey(raw *Raw) error { func checkEmptyKey(raw *Raw) error {
if raw == nil || stringutils.IsBlank(raw.Key) { if raw == nil || stringutils.IsBlank(raw.Key) {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "raw key is empty") return errors.New("raw key is empty")
} }
return nil return nil
} }

View File

@ -22,7 +22,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/storedriver" "d7y.io/dragonfly/v2/cdn/storedriver"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/synclock" "d7y.io/dragonfly/v2/pkg/synclock"
@ -71,7 +70,7 @@ func (ds *driver) GetTotalSpace() (unit.Bytes, error) {
return fileutils.GetTotalSpace(path) return fileutils.GetTotalSpace(path)
} }
func (ds *driver) GetHomePath() string { func (ds *driver) GetBaseDir() string {
return ds.BaseDir return ds.BaseDir
} }
@ -370,9 +369,6 @@ func (ds *driver) statPath(bucket, key string) (string, os.FileInfo, error) {
filePath := filepath.Join(ds.BaseDir, bucket, key) filePath := filepath.Join(ds.BaseDir, bucket, key)
f, err := os.Stat(filePath) f, err := os.Stat(filePath)
if err != nil { if err != nil {
if os.IsNotExist(err) {
return "", nil, cdnerrors.ErrFileNotExist{File: "filePath"}
}
return "", nil, err return "", nil, err
} }
return filePath, f, nil return filePath, f, nil

View File

@ -27,7 +27,6 @@ import (
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/plugins" "d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/storedriver" "d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/unit"
@ -70,13 +69,13 @@ func (s *LocalDriverTestSuite) TearDownSuite() {
func (s *LocalDriverTestSuite) TestGetPutBytes() { func (s *LocalDriverTestSuite) TestGetPutBytes() {
var cases = []struct { var cases = []struct {
name string name string
putRaw *storedriver.Raw putRaw *storedriver.Raw
getRaw *storedriver.Raw getRaw *storedriver.Raw
data []byte data []byte
getErrCheck func(error) bool wantGetErr bool
putErrCheck func(error) bool wantPutErr bool
expected string expected string
}{ }{
{ {
name: "get put full", name: "get put full",
@ -88,10 +87,10 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Bucket: "GetPut", Bucket: "GetPut",
Key: "foo1", Key: "foo1",
}, },
data: []byte("hello foo"), data: []byte("hello foo"),
putErrCheck: isNil, wantPutErr: false,
getErrCheck: isNil, wantGetErr: false,
expected: "hello foo", expected: "hello foo",
}, { }, {
name: "get specific length", name: "get specific length",
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
@ -104,10 +103,10 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Offset: 0, Offset: 0,
Length: 5, Length: 5,
}, },
putErrCheck: isNil, wantPutErr: false,
getErrCheck: isNil, wantGetErr: false,
data: []byte("hello foo"), data: []byte("hello foo"),
expected: "hello", expected: "hello",
}, { }, {
name: "get full length", name: "get full length",
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
@ -120,10 +119,10 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Offset: 0, Offset: 0,
Length: 0, Length: 0,
}, },
putErrCheck: isNil, wantPutErr: false,
getErrCheck: isNil, wantGetErr: false,
data: []byte("hello foo"), data: []byte("hello foo"),
expected: "hello foo", expected: "hello foo",
}, { }, {
name: "get invalid length", name: "get invalid length",
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
@ -136,10 +135,10 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Offset: 0, Offset: 0,
Length: -1, Length: -1,
}, },
putErrCheck: isNil, wantPutErr: false,
getErrCheck: errors.IsInvalidValue, wantGetErr: true,
data: []byte("hello foo"), data: []byte("hello foo"),
expected: "", expected: "",
}, { }, {
name: "put specific length", name: "put specific length",
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
@ -151,10 +150,10 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Bucket: "GetPut", Bucket: "GetPut",
Key: "foo5", Key: "foo5",
}, },
putErrCheck: isNil, wantPutErr: false,
getErrCheck: isNil, wantGetErr: false,
data: []byte("hello foo"), data: []byte("hello foo"),
expected: "hello", expected: "hello",
}, { }, {
name: "get invalid offset", name: "get invalid offset",
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
@ -166,10 +165,10 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Key: "foo6", Key: "foo6",
Offset: -1, Offset: -1,
}, },
data: []byte("hello foo"), data: []byte("hello foo"),
putErrCheck: isNil, wantPutErr: false,
getErrCheck: errors.IsInvalidValue, wantGetErr: true,
expected: "", expected: "",
}, { }, {
name: "put/get data from specific offset", name: "put/get data from specific offset",
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
@ -182,10 +181,10 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
Key: "foo7", Key: "foo7",
Offset: 3, Offset: 3,
}, },
data: []byte("hello foo"), data: []byte("hello foo"),
putErrCheck: isNil, wantPutErr: false,
getErrCheck: isNil, wantGetErr: false,
expected: "hello foo", expected: "hello foo",
}, },
} }
@ -193,10 +192,10 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
s.Run(v.name, func() { s.Run(v.name, func() {
// put // put
err := s.PutBytes(v.putRaw, v.data) err := s.PutBytes(v.putRaw, v.data)
s.True(v.putErrCheck(err)) s.True(v.wantPutErr == (err != nil))
// get // get
result, err := s.GetBytes(v.getRaw) result, err := s.GetBytes(v.getRaw)
s.True(v.getErrCheck(err)) s.True(v.wantGetErr == (err != nil))
s.Equal(v.expected, string(result)) s.Equal(v.expected, string(result))
// stat // stat
s.checkStat(v.getRaw) s.checkStat(v.getRaw)
@ -208,12 +207,12 @@ func (s *LocalDriverTestSuite) TestGetPutBytes() {
func (s *LocalDriverTestSuite) TestGetPut() { func (s *LocalDriverTestSuite) TestGetPut() {
var cases = []struct { var cases = []struct {
name string name string
putRaw *storedriver.Raw putRaw *storedriver.Raw
getRaw *storedriver.Raw getRaw *storedriver.Raw
data io.Reader data io.Reader
getErrCheck func(error) bool wantGetErr bool
expected string expected string
}{ }{
{ {
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
@ -223,9 +222,9 @@ func (s *LocalDriverTestSuite) TestGetPut() {
getRaw: &storedriver.Raw{ getRaw: &storedriver.Raw{
Key: "foo0.meta", Key: "foo0.meta",
}, },
data: strings.NewReader("hello meta file"), data: strings.NewReader("hello meta file"),
getErrCheck: isNil, wantGetErr: false,
expected: "hello meta file", expected: "hello meta file",
}, { }, {
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
Key: "foo1.meta", Key: "foo1.meta",
@ -233,9 +232,9 @@ func (s *LocalDriverTestSuite) TestGetPut() {
getRaw: &storedriver.Raw{ getRaw: &storedriver.Raw{
Key: "foo1.meta", Key: "foo1.meta",
}, },
data: strings.NewReader("hello meta file"), data: strings.NewReader("hello meta file"),
getErrCheck: isNil, wantGetErr: false,
expected: "hello meta file", expected: "hello meta file",
}, { }, {
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
Key: "foo2.meta", Key: "foo2.meta",
@ -244,9 +243,9 @@ func (s *LocalDriverTestSuite) TestGetPut() {
getRaw: &storedriver.Raw{ getRaw: &storedriver.Raw{
Key: "foo2.meta", Key: "foo2.meta",
}, },
data: strings.NewReader("hello meta file"), data: strings.NewReader("hello meta file"),
getErrCheck: isNil, wantGetErr: false,
expected: "hello ", expected: "hello ",
}, { }, {
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
Key: "foo3.meta", Key: "foo3.meta",
@ -256,9 +255,9 @@ func (s *LocalDriverTestSuite) TestGetPut() {
Offset: 2, Offset: 2,
Length: 5, Length: 5,
}, },
data: strings.NewReader("hello meta file"), data: strings.NewReader("hello meta file"),
getErrCheck: isNil, wantGetErr: false,
expected: "llo m", expected: "llo m",
}, { }, {
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
Key: "foo4.meta", Key: "foo4.meta",
@ -268,9 +267,9 @@ func (s *LocalDriverTestSuite) TestGetPut() {
Offset: 2, Offset: 2,
Length: -1, Length: -1,
}, },
getErrCheck: errors.IsInvalidValue, wantGetErr: true,
data: strings.NewReader("hello meta file"), data: strings.NewReader("hello meta file"),
expected: "", expected: "",
}, { }, {
putRaw: &storedriver.Raw{ putRaw: &storedriver.Raw{
Key: "foo5.meta", Key: "foo5.meta",
@ -280,9 +279,9 @@ func (s *LocalDriverTestSuite) TestGetPut() {
Offset: 30, Offset: 30,
Length: 5, Length: 5,
}, },
getErrCheck: errors.IsInvalidValue, wantGetErr: true,
data: strings.NewReader("hello meta file"), data: strings.NewReader("hello meta file"),
expected: "", expected: "",
}, },
} }
@ -293,7 +292,7 @@ func (s *LocalDriverTestSuite) TestGetPut() {
s.Nil(err) s.Nil(err)
// get // get
r, err := s.Get(v.getRaw) r, err := s.Get(v.getRaw)
s.True(v.getErrCheck(err)) s.True(v.wantGetErr == (err != nil))
if err == nil { if err == nil {
result, err := io.ReadAll(r) result, err := io.ReadAll(r)
s.Nil(err) s.Nil(err)
@ -507,8 +506,8 @@ func (s *LocalDriverTestSuite) TestLocalDriverExitsAndRemove() {
s.False(s.Exits(raw)) s.False(s.Exits(raw))
} }
func (s *LocalDriverTestSuite) TestLocalDriverGetHomePath() { func (s *LocalDriverTestSuite) TestLocalDriverGetBaseDir() {
s.Equal(filepath.Join(s.workHome, "repo"), s.GetHomePath()) s.Equal(filepath.Join(s.workHome, "repo"), s.GetBaseDir())
} }
func (s *LocalDriverTestSuite) TestLocalDriverGetPath() { func (s *LocalDriverTestSuite) TestLocalDriverGetPath() {
@ -522,7 +521,7 @@ func (s *LocalDriverTestSuite) TestLocalDriverGetPath() {
func (s *LocalDriverTestSuite) TestLocalDriverGetTotalAndFreeSpace() { func (s *LocalDriverTestSuite) TestLocalDriverGetTotalAndFreeSpace() {
fs := syscall.Statfs_t{} fs := syscall.Statfs_t{}
s.Nil(syscall.Statfs(s.GetHomePath(), &fs)) s.Nil(syscall.Statfs(s.GetBaseDir(), &fs))
total := unit.Bytes(fs.Blocks * uint64(fs.Bsize)) total := unit.Bytes(fs.Blocks * uint64(fs.Bsize))
free := unit.Bytes(fs.Bavail * uint64(fs.Bsize)) free := unit.Bytes(fs.Bavail * uint64(fs.Bsize))
got, got1, err := s.GetTotalAndFreeSpace() got, got1, err := s.GetTotalAndFreeSpace()
@ -559,7 +558,7 @@ func (s *LocalDriverTestSuite) checkStat(raw *storedriver.Raw) {
info, err := s.Stat(raw) info, err := s.Stat(raw)
s.Equal(isNil(err), true) s.Equal(isNil(err), true)
pathTemp := filepath.Join(s.Driver.GetHomePath(), raw.Bucket, raw.Key) pathTemp := filepath.Join(s.Driver.GetBaseDir(), raw.Bucket, raw.Key)
f, _ := os.Stat(pathTemp) f, _ := os.Stat(pathTemp)
s.EqualValues(info, &storedriver.StorageInfo{ s.EqualValues(info, &storedriver.StorageInfo{
@ -575,7 +574,7 @@ func (s *LocalDriverTestSuite) checkRemove(raw *storedriver.Raw) {
s.Equal(isNil(err), true) s.Equal(isNil(err), true)
_, err = s.Stat(raw) _, err = s.Stat(raw)
s.Equal(errors.IsFileNotExist(err), true) s.Equal(os.IsNotExist(err), true)
} }
func isNil(err error) bool { func isNil(err error) bool {

View File

@ -78,6 +78,20 @@ func (mr *MockDriverMockRecorder) Get(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDriver)(nil).Get), arg0) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDriver)(nil).Get), arg0)
} }
// GetBaseDir mocks base method.
func (m *MockDriver) GetBaseDir() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetBaseDir")
ret0, _ := ret[0].(string)
return ret0
}
// GetBaseDir indicates an expected call of GetBaseDir.
func (mr *MockDriverMockRecorder) GetBaseDir() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBaseDir", reflect.TypeOf((*MockDriver)(nil).GetBaseDir))
}
// GetBytes mocks base method. // GetBytes mocks base method.
func (m *MockDriver) GetBytes(arg0 *Raw) ([]byte, error) { func (m *MockDriver) GetBytes(arg0 *Raw) ([]byte, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
@ -108,20 +122,6 @@ func (mr *MockDriverMockRecorder) GetFreeSpace() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFreeSpace", reflect.TypeOf((*MockDriver)(nil).GetFreeSpace)) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFreeSpace", reflect.TypeOf((*MockDriver)(nil).GetFreeSpace))
} }
// GetHomePath mocks base method.
func (m *MockDriver) GetHomePath() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetHomePath")
ret0, _ := ret[0].(string)
return ret0
}
// GetHomePath indicates an expected call of GetHomePath.
func (mr *MockDriverMockRecorder) GetHomePath() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHomePath", reflect.TypeOf((*MockDriver)(nil).GetHomePath))
}
// GetPath mocks base method. // GetPath mocks base method.
func (m *MockDriver) GetPath(arg0 *Raw) string { func (m *MockDriver) GetPath(arg0 *Raw) string {
m.ctrl.T.Helper() m.ctrl.T.Helper()

View File

@ -1,63 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package storedriver
import (
"fmt"
"path/filepath"
"strings"
"github.com/mitchellh/mapstructure"
"d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/pkg/util/fileutils"
)
// DriverBuilder is a function that creates a new storage driver plugin instant with the giving Config.
type DriverBuilder func(cfg *Config) (Driver, error)
// Register defines an interface to register a driver with specified name.
// All drivers should call this function to register itself to the driverFactory.
func Register(name string, builder DriverBuilder) error {
name = strings.ToLower(name)
// plugin builder
var f = func(conf interface{}) (plugins.Plugin, error) {
cfg := &Config{}
if err := mapstructure.Decode(conf, cfg); err != nil {
return nil, fmt.Errorf("parse config: %v", err)
}
// prepare the base dir
if !filepath.IsAbs(cfg.BaseDir) {
return nil, fmt.Errorf("not absolute path: %s", cfg.BaseDir)
}
if err := fileutils.MkdirAll(cfg.BaseDir); err != nil {
return nil, fmt.Errorf("create baseDir %s: %v", cfg.BaseDir, err)
}
return newDriverPlugin(name, builder, cfg)
}
return plugins.RegisterPluginBuilder(plugins.StorageDriverPlugin, name, f)
}
// Get a store from manager with specified name.
func Get(name string) (Driver, bool) {
v, ok := plugins.GetPlugin(plugins.StorageDriverPlugin, strings.ToLower(name))
if !ok {
return nil, false
}
return v.(*driverPlugin).instance, true
}

View File

@ -93,7 +93,7 @@ func (m mockDriver) Exits(_ *Raw) bool {
panic("implement me") panic("implement me")
} }
func (m mockDriver) GetHomePath() string { func (m mockDriver) GetBaseDir() string {
panic("implement me") panic("implement me")
} }

View File

@ -18,25 +18,23 @@ package storedriver
import ( import (
"github.com/pkg/errors" "github.com/pkg/errors"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
) )
// CheckGetRaw check before get Raw // CheckGetRaw check before get Raw
func CheckGetRaw(raw *Raw, fileLength int64) error { func CheckGetRaw(raw *Raw, fileLength int64) error {
// if raw.Length < 0 ,read All data // if raw.Length < 0 ,read All data
if raw.Offset < 0 { if raw.Offset < 0 {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the offset: %d is a negative integer", raw.Offset) return errors.Errorf("the offset: %d is a negative integer", raw.Offset)
} }
if raw.Length < 0 { if raw.Length < 0 {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the length: %d is a negative integer", raw.Length) return errors.Errorf("the length: %d is a negative integer", raw.Length)
} }
if fileLength < raw.Offset { if fileLength < raw.Offset {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the offset: %d is lager than the file length: %d", raw.Offset, fileLength) return errors.Errorf("the offset: %d is lager than the file length: %d", raw.Offset, fileLength)
} }
if fileLength < (raw.Offset + raw.Length) { if fileLength < (raw.Offset + raw.Length) {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the offset: %d and length: %d is lager than the file length: %d", raw.Offset, raw.Length, fileLength) return errors.Errorf("the offset: %d and length: %d is lager than the file length: %d", raw.Offset, raw.Length, fileLength)
} }
return nil return nil
} }
@ -44,10 +42,10 @@ func CheckGetRaw(raw *Raw, fileLength int64) error {
// CheckPutRaw check before put Raw // CheckPutRaw check before put Raw
func CheckPutRaw(raw *Raw) error { func CheckPutRaw(raw *Raw) error {
if raw.Offset < 0 { if raw.Offset < 0 {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the offset: %d is a negative integer", raw.Offset) return errors.Errorf("the offset: %d is a negative integer", raw.Offset)
} }
if raw.Length < 0 { if raw.Length < 0 {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the length: %d is a negative integer", raw.Length) return errors.Errorf("the length: %d is a negative integer", raw.Length)
} }
return nil return nil
} }
@ -55,7 +53,7 @@ func CheckPutRaw(raw *Raw) error {
// CheckTrunc check before trunc file content // CheckTrunc check before trunc file content
func CheckTrunc(raw *Raw) error { func CheckTrunc(raw *Raw) error {
if raw.Trunc && raw.TruncSize < 0 { if raw.Trunc && raw.TruncSize < 0 {
return errors.Wrapf(cdnerrors.ErrInvalidValue, "the truncSize: %d is a negative integer", raw.Length) return errors.Errorf("the truncSize: %d is a negative integer", raw.Length)
} }
return nil return nil
} }

View File

@ -27,81 +27,68 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"d7y.io/dragonfly/v2/cdn/config" "d7y.io/dragonfly/v2/cdn/constants"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/types" "d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/util/digestutils" "d7y.io/dragonfly/v2/pkg/util/digestutils"
"d7y.io/dragonfly/v2/pkg/util/stringutils"
) )
// cacheDetector detect task cache // cacheDetector detect task cache
type cacheDetector struct { type cacheDetector struct {
cacheDataManager *cacheDataManager metadataManager *metadataManager
storageManager storage.Manager
} }
// cacheResult cache result of detect // cacheResult cache result of detect
type cacheResult struct { type cacheResult struct {
breakPoint int64 // break-point of task file BreakPoint int64 `json:"break_point"` // break-point of task file
pieceMetaRecords []*storage.PieceMetaRecord // piece meta data records of task PieceMetaRecords []*storage.PieceMetaRecord `json:"piece_meta_records"` // piece metadata records of task
fileMetadata *storage.FileMetadata // file meta data of task FileMetadata *storage.FileMetadata `json:"file_metadata"` // file meta data of task
}
func (s *cacheResult) String() string {
return fmt.Sprintf("{breakNum: %d, pieceMetaRecords: %#v, fileMetadata: %#v}", s.breakPoint, s.pieceMetaRecords, s.fileMetadata)
} }
// newCacheDetector create a new cache detector // newCacheDetector create a new cache detector
func newCacheDetector(cacheDataManager *cacheDataManager) *cacheDetector { func newCacheDetector(metadataManager *metadataManager, storageManager storage.Manager) *cacheDetector {
return &cacheDetector{ return &cacheDetector{
cacheDataManager: cacheDataManager, metadataManager: metadataManager,
storageManager: storageManager,
} }
} }
func (cd *cacheDetector) detectCache(ctx context.Context, task *types.SeedTask, fileDigest hash.Hash) (result *cacheResult, err error) { func (cd *cacheDetector) detectCache(ctx context.Context, seedTask *task.SeedTask, fileDigest hash.Hash) (result *cacheResult, err error) {
//err := cd.cacheStore.CreateUploadLink(ctx, task.TaskId)
//if err != nil {
// return nil, errors.Wrapf(err, "failed to create upload symbolic link")
//}
var span trace.Span var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanDetectCache) ctx, span = tracer.Start(ctx, constants.SpanDetectCache)
defer span.End() defer span.End()
defer func() { result, err = cd.doDetect(ctx, seedTask, fileDigest)
span.SetAttributes(config.AttributeDetectCacheResult.String(result.String()))
}()
result, err = cd.doDetect(ctx, task, fileDigest)
if err != nil { if err != nil {
task.Log().Infof("failed to detect cache, reset cache: %v", err) metadata, err := cd.resetCache(seedTask)
metadata, err := cd.resetCache(task) if err != nil {
if err == nil { return nil, errors.Wrapf(err, "reset cache")
result = &cacheResult{
fileMetadata: metadata,
}
return result, nil
} }
return result, err return &cacheResult{
FileMetadata: metadata,
}, nil
} }
if err := cd.cacheDataManager.updateAccessTime(task.TaskID, getCurrentTimeMillisFunc()); err != nil { if err := cd.metadataManager.updateAccessTime(seedTask.ID, getCurrentTimeMillisFunc()); err != nil {
task.Log().Warnf("failed to update task access time ") seedTask.Log().Warnf("failed to update task access time ")
} }
return result, nil return result, nil
} }
// doDetect the actual detect action which detects file metadata and pieces metadata of specific task // doDetect do the actual detect action which detects file metadata and pieces metadata of specific task
func (cd *cacheDetector) doDetect(ctx context.Context, task *types.SeedTask, fileDigest hash.Hash) (result *cacheResult, err error) { func (cd *cacheDetector) doDetect(ctx context.Context, seedTask *task.SeedTask, fileDigest hash.Hash) (*cacheResult, error) {
span := trace.SpanFromContext(ctx) if _, err := cd.storageManager.StatDownloadFile(seedTask.ID); err != nil {
fileMetadata, err := cd.cacheDataManager.readFileMetadata(task.TaskID) return nil, err
}
fileMetadata, err := cd.metadataManager.readFileMetadata(seedTask.ID)
if err != nil { if err != nil {
span.RecordError(err) return nil, errors.Wrapf(err, "read file metadata")
return nil, errors.Wrapf(err, "read file meta data of task %s", task.TaskID)
} }
span.SetAttributes() if ok, cause := checkMetadata(seedTask, fileMetadata); !ok {
if err := checkSameFile(task, fileMetadata); err != nil { return nil, errors.Errorf("fileMetadata is inconsistent with task: %s", cause)
return nil, errors.Wrapf(err, "check same file")
} }
checkExpiredRequest, err := source.NewRequestWithContext(ctx, task.URL, task.Header) checkExpiredRequest, err := source.NewRequestWithContext(ctx, seedTask.RawURL, seedTask.Header)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "create request") return nil, errors.Wrapf(err, "create request")
} }
@ -111,21 +98,21 @@ func (cd *cacheDetector) doDetect(ctx context.Context, task *types.SeedTask, fil
}) })
if err != nil { if err != nil {
// If the check fails, the resource is regarded as not expired to prevent the source from being knocked down // If the check fails, the resource is regarded as not expired to prevent the source from being knocked down
task.Log().Warnf("failed to check whether the source is expired. To prevent the source from being suspended, "+ seedTask.Log().Warnf("failed to check whether the source is expired. To prevent the source from being suspended, "+
"assume that the source is not expired: %v", err) "assume that the source is not expired: %v", err)
} }
task.Log().Debugf("task resource expired result: %t", expired) seedTask.Log().Debugf("task resource expired result: %t", expired)
if expired { if expired {
return nil, errors.Errorf("resource %s has expired", task.TaskURL) return nil, errors.Errorf("resource %s has expired", seedTask.TaskURL)
} }
// not expired // not expired
if fileMetadata.Finish { if fileMetadata.Finish {
// quickly detect the cache situation through the metadata // quickly detect the cache situation through the metadata
return cd.detectByReadMetaFile(task.TaskID, fileMetadata) return cd.detectByReadMetaFile(seedTask.ID, fileMetadata)
} }
// check if the resource supports range request. if so, // check if the resource supports range request. if so,
// detect the cache situation by reading piece meta and data file // detect the cache situation by reading piece meta and data file
checkSupportRangeRequest, err := source.NewRequestWithContext(ctx, task.URL, task.Header) checkSupportRangeRequest, err := source.NewRequestWithContext(ctx, seedTask.RawURL, seedTask.Header)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "create check support range request") return nil, errors.Wrapf(err, "create check support range request")
} }
@ -135,61 +122,58 @@ func (cd *cacheDetector) doDetect(ctx context.Context, task *types.SeedTask, fil
return nil, errors.Wrap(err, "check if support range") return nil, errors.Wrap(err, "check if support range")
} }
if !supportRange { if !supportRange {
return nil, errors.Errorf("resource %s is not support range request", task.URL) return nil, errors.Errorf("resource %s is not support range request", seedTask.TaskURL)
} }
return cd.detectByReadFile(task.TaskID, fileMetadata, fileDigest) return cd.detectByReadFile(seedTask.ID, fileMetadata, fileDigest)
} }
// parseByReadMetaFile detect cache by read meta and pieceMeta files of task // detectByReadMetaFile detect cache by read metadata and pieceMeta files of specific task
func (cd *cacheDetector) detectByReadMetaFile(taskID string, fileMetadata *storage.FileMetadata) (*cacheResult, error) { func (cd *cacheDetector) detectByReadMetaFile(taskID string, fileMetadata *storage.FileMetadata) (*cacheResult, error) {
if !fileMetadata.Success { if !fileMetadata.Success {
return nil, fmt.Errorf("success flag of taskID %s is false", taskID) return nil, errors.New("metadata success flag is false")
} }
pieceMetaRecords, err := cd.cacheDataManager.readAndCheckPieceMetaRecords(taskID, fileMetadata.PieceMd5Sign) md5Sign, pieceMetaRecords, err := cd.metadataManager.getPieceMd5Sign(taskID)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "check piece meta integrity") return nil, errors.Wrap(err, "get pieces md5 sign")
} }
if fileMetadata.TotalPieceCount > 0 && len(pieceMetaRecords) != int(fileMetadata.TotalPieceCount) { if fileMetadata.TotalPieceCount > 0 && len(pieceMetaRecords) != int(fileMetadata.TotalPieceCount) {
err := cdnerrors.ErrInconsistentValues{Expected: fileMetadata.TotalPieceCount, Actual: len(pieceMetaRecords)} return nil, errors.Errorf("total piece count is inconsistent, expected is %d, but got %d", fileMetadata.TotalPieceCount, len(pieceMetaRecords))
return nil, errors.Wrapf(err, "compare file piece count")
} }
storageInfo, err := cd.cacheDataManager.statDownloadFile(taskID) if fileMetadata.PieceMd5Sign != "" && md5Sign != fileMetadata.PieceMd5Sign {
return nil, errors.Errorf("piece md5 sign is inconsistent, expected is %s, but got %s", fileMetadata.PieceMd5Sign, md5Sign)
}
storageInfo, err := cd.storageManager.StatDownloadFile(taskID)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "get cdn file length") return nil, errors.Wrap(err, "stat download file info")
} }
// check file data integrity by file size // check file data integrity by file size
if fileMetadata.CdnFileLength != storageInfo.Size { if fileMetadata.CdnFileLength != storageInfo.Size {
err := cdnerrors.ErrInconsistentValues{ return nil, errors.Errorf("file size is inconsistent, expected is %d, but got %d", fileMetadata.CdnFileLength, storageInfo.Size)
Expected: fileMetadata.CdnFileLength,
Actual: storageInfo.Size,
}
return nil, errors.Wrapf(err, "compare file cdn file length")
} }
// TODO For hybrid storage mode, synchronize disk data to memory
return &cacheResult{ return &cacheResult{
breakPoint: -1, BreakPoint: -1,
pieceMetaRecords: pieceMetaRecords, PieceMetaRecords: pieceMetaRecords,
fileMetadata: fileMetadata, FileMetadata: fileMetadata,
}, nil }, nil
} }
// parseByReadFile detect cache by read pieceMeta and data files of task // parseByReadFile detect cache by read pieceMeta and data files of task
func (cd *cacheDetector) detectByReadFile(taskID string, metadata *storage.FileMetadata, fileDigest hash.Hash) (*cacheResult, error) { func (cd *cacheDetector) detectByReadFile(taskID string, metadata *storage.FileMetadata, fileDigest hash.Hash) (*cacheResult, error) {
reader, err := cd.cacheDataManager.readDownloadFile(taskID) reader, err := cd.storageManager.ReadDownloadFile(taskID)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "read download data file") return nil, errors.Wrapf(err, "read download data file")
} }
defer reader.Close() defer reader.Close()
tempRecords, err := cd.cacheDataManager.readPieceMetaRecords(taskID) tempRecords, err := cd.metadataManager.readPieceMetaRecords(taskID)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "read piece meta records") return nil, errors.Wrapf(err, "read piece meta records")
} }
// sort piece meta records by pieceNum // sort piece meta records by pieceNum
sort.Slice(tempRecords, func(i, j int) bool { sort.Slice(tempRecords, func(i, j int) bool {
return tempRecords[i].PieceNum < tempRecords[j].PieceNum return tempRecords[i].PieceNum < tempRecords[j].PieceNum
}) })
var breakPoint int64 = 0
var breakPoint uint64 = 0
pieceMetaRecords := make([]*storage.PieceMetaRecord, 0, len(tempRecords)) pieceMetaRecords := make([]*storage.PieceMetaRecord, 0, len(tempRecords))
for index := range tempRecords { for index := range tempRecords {
if uint32(index) != tempRecords[index].PieceNum { if uint32(index) != tempRecords[index].PieceNum {
@ -197,14 +181,14 @@ func (cd *cacheDetector) detectByReadFile(taskID string, metadata *storage.FileM
} }
// read content TODO concurrent by multi-goroutine // read content TODO concurrent by multi-goroutine
if err := checkPieceContent(reader, tempRecords[index], fileDigest); err != nil { if err := checkPieceContent(reader, tempRecords[index], fileDigest); err != nil {
logger.WithTaskID(taskID).Errorf("read content of pieceNum %d failed: %v", tempRecords[index].PieceNum, err) logger.WithTaskID(taskID).Errorf("check content of pieceNum %d failed: %v", tempRecords[index].PieceNum, err)
break break
} }
breakPoint = tempRecords[index].OriginRange.EndIndex + 1 breakPoint = int64(tempRecords[index].OriginRange.EndIndex + 1)
pieceMetaRecords = append(pieceMetaRecords, tempRecords[index]) pieceMetaRecords = append(pieceMetaRecords, tempRecords[index])
} }
if len(tempRecords) != len(pieceMetaRecords) { if len(tempRecords) != len(pieceMetaRecords) {
if err := cd.cacheDataManager.writePieceMetaRecords(taskID, pieceMetaRecords); err != nil { if err := cd.metadataManager.writePieceMetaRecords(taskID, pieceMetaRecords); err != nil {
return nil, errors.Wrapf(err, "write piece meta records failed") return nil, errors.Wrapf(err, "write piece meta records failed")
} }
} }
@ -217,54 +201,66 @@ func (cd *cacheDetector) detectByReadFile(taskID string, metadata *storage.FileM
// fileMd5: fileMd5, // fileMd5: fileMd5,
// }, nil // }, nil
//} //}
// TODO 整理数据文件 truncate breakpoint之后的数据内容 // TODO 整理数据文件 truncate breakpoint 之后的数据内容
return &cacheResult{ return &cacheResult{
breakPoint: int64(breakPoint), BreakPoint: breakPoint,
pieceMetaRecords: pieceMetaRecords, PieceMetaRecords: pieceMetaRecords,
fileMetadata: metadata, FileMetadata: metadata,
}, nil }, nil
} }
// resetCache // resetCache file
func (cd *cacheDetector) resetCache(task *types.SeedTask) (*storage.FileMetadata, error) { func (cd *cacheDetector) resetCache(seedTask *task.SeedTask) (*storage.FileMetadata, error) {
err := cd.cacheDataManager.resetRepo(task) err := cd.storageManager.ResetRepo(seedTask)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// initialize meta data file // initialize meta data file
return cd.cacheDataManager.writeFileMetadataByTask(task) return cd.metadataManager.writeFileMetadataByTask(seedTask)
} }
/* /*
helper functions helper functions
*/ */
// checkSameFile check whether meta file is modified
func checkSameFile(task *types.SeedTask, metadata *storage.FileMetadata) error { // checkMetadata check whether meta file is modified
if task == nil || metadata == nil { func checkMetadata(seedTask *task.SeedTask, metadata *storage.FileMetadata) (bool, string) {
return errors.Errorf("task or metadata is nil, task: %v, metadata: %v", task, metadata) if seedTask == nil || metadata == nil {
return false, fmt.Sprintf("task or metadata is nil, task: %v, metadata: %v", seedTask, metadata)
} }
if metadata.PieceSize != task.PieceSize { if metadata.TaskID != seedTask.ID {
return errors.Errorf("meta piece size(%d) is not equals with task piece size(%d)", metadata.PieceSize, return false, fmt.Sprintf("metadata TaskID(%s) is not equals with task ID(%s)", metadata.TaskID, seedTask.ID)
task.PieceSize)
} }
if metadata.TaskID != task.TaskID { if metadata.TaskURL != seedTask.TaskURL {
return errors.Errorf("meta task TaskId(%s) is not equals with task TaskId(%s)", metadata.TaskID, task.TaskID) return false, fmt.Sprintf("metadata taskURL(%s) is not equals with task taskURL(%s)", metadata.TaskURL, seedTask.TaskURL)
} }
if metadata.TaskURL != task.TaskURL { if metadata.PieceSize != seedTask.PieceSize {
return errors.Errorf("meta task taskUrl(%s) is not equals with task taskUrl(%s)", metadata.TaskURL, task.URL) return false, fmt.Sprintf("metadata piece size(%d) is not equals with task piece size(%d)", metadata.PieceSize, seedTask.PieceSize)
} }
if !stringutils.IsBlank(metadata.SourceRealDigest) && !stringutils.IsBlank(task.RequestDigest) &&
metadata.SourceRealDigest != task.RequestDigest { if seedTask.Range != metadata.Range {
return errors.Errorf("meta task source digest(%s) is not equals with task request digest(%s)", return false, fmt.Sprintf("metadata range(%s) is not equals with task range(%s)", metadata.Range, seedTask.Range)
metadata.SourceRealDigest, task.RequestDigest)
} }
return nil
if seedTask.Digest != metadata.Digest {
return false, fmt.Sprintf("meta digest(%s) is not equals with task request digest(%s)",
metadata.SourceRealDigest, seedTask.Digest)
}
if seedTask.Tag != metadata.Tag {
return false, fmt.Sprintf("metadata tag(%s) is not equals with task tag(%s)", metadata.Range, seedTask.Range)
}
if seedTask.Filter != metadata.Filter {
return false, fmt.Sprintf("metadata filter(%s) is not equals with task filter(%s)", metadata.Filter, seedTask.Filter)
}
return true, ""
} }
//checkPieceContent read piece content from reader and check data integrity by pieceMetaRecord // checkPieceContent read piece content from reader and check data integrity by pieceMetaRecord
func checkPieceContent(reader io.Reader, pieceRecord *storage.PieceMetaRecord, fileDigest hash.Hash) error { func checkPieceContent(reader io.Reader, pieceRecord *storage.PieceMetaRecord, fileDigest hash.Hash) error {
// TODO Analyze the original data for the slice format to calculate fileMd5 // TODO Analyze the original data for the slice format to calculate fileMd5
pieceMd5 := md5.New() pieceMd5 := md5.New()
@ -275,11 +271,7 @@ func checkPieceContent(reader io.Reader, pieceRecord *storage.PieceMetaRecord, f
realPieceMd5 := digestutils.ToHashString(pieceMd5) realPieceMd5 := digestutils.ToHashString(pieceMd5)
// check piece content // check piece content
if realPieceMd5 != pieceRecord.Md5 { if realPieceMd5 != pieceRecord.Md5 {
err := cdnerrors.ErrInconsistentValues{ return errors.Errorf("piece md5 sign is inconsistent, expected is %s, but got %s", pieceRecord.Md5, realPieceMd5)
Expected: pieceRecord.Md5,
Actual: realPieceMd5,
}
return errors.Wrap(err, "compare piece md5")
} }
return nil return nil
} }

View File

@ -33,7 +33,7 @@ import (
"d7y.io/dragonfly/v2/cdn/storedriver" "d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
storageMock "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/mock" storageMock "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/mock"
"d7y.io/dragonfly/v2/cdn/types" "d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/source/httpprotocol" "d7y.io/dragonfly/v2/pkg/source/httpprotocol"
sourceMock "d7y.io/dragonfly/v2/pkg/source/mock" sourceMock "d7y.io/dragonfly/v2/pkg/source/mock"
@ -56,8 +56,8 @@ func (suite *CacheDetectorTestSuite) SetupSuite() {
source.UnRegister("http") source.UnRegister("http")
suite.Require().Nil(source.Register("http", sourceClient, httpprotocol.Adapter)) suite.Require().Nil(source.Register("http", sourceClient, httpprotocol.Adapter))
storageManager := storageMock.NewMockManager(ctrl) storageManager := storageMock.NewMockManager(ctrl)
cacheDataManager := newCacheDataManager(storageManager) cacheDataManager := newMetadataManager(storageManager)
suite.detector = newCacheDetector(cacheDataManager) suite.detector = newCacheDetector(cacheDataManager, storageManager)
storageManager.EXPECT().ReadFileMetadata(fullExpiredCache.taskID).Return(fullExpiredCache.fileMeta, nil).AnyTimes() storageManager.EXPECT().ReadFileMetadata(fullExpiredCache.taskID).Return(fullExpiredCache.fileMeta, nil).AnyTimes()
storageManager.EXPECT().ReadFileMetadata(fullNoExpiredCache.taskID).Return(fullNoExpiredCache.fileMeta, nil).AnyTimes() storageManager.EXPECT().ReadFileMetadata(fullNoExpiredCache.taskID).Return(fullNoExpiredCache.fileMeta, nil).AnyTimes()
storageManager.EXPECT().ReadFileMetadata(partialNotSupportRangeCache.taskID).Return(partialNotSupportRangeCache.fileMeta, nil).AnyTimes() storageManager.EXPECT().ReadFileMetadata(partialNotSupportRangeCache.taskID).Return(partialNotSupportRangeCache.fileMeta, nil).AnyTimes()
@ -258,7 +258,7 @@ func newPartialFileMeta(taskID string, URL string) *storage.FileMetadata {
func (suite *CacheDetectorTestSuite) TestDetectCache() { func (suite *CacheDetectorTestSuite) TestDetectCache() {
type args struct { type args struct {
task *types.SeedTask task *task.SeedTask
} }
tests := []struct { tests := []struct {
name string name string
@ -269,9 +269,9 @@ func (suite *CacheDetectorTestSuite) TestDetectCache() {
{ {
name: "no cache", name: "no cache",
args: args{ args: args{
task: &types.SeedTask{ task: &task.SeedTask{
TaskID: noCacheTask, ID: noCacheTask,
URL: noExpiredAndSupportURL, RawURL: noExpiredAndSupportURL,
TaskURL: noExpiredAndSupportURL, TaskURL: noExpiredAndSupportURL,
}, },
}, },
@ -281,27 +281,27 @@ func (suite *CacheDetectorTestSuite) TestDetectCache() {
{ {
name: "partial cache and support range", name: "partial cache and support range",
args: args{ args: args{
task: &types.SeedTask{ task: &task.SeedTask{
TaskID: partialAndSupportCacheTask, ID: partialAndSupportCacheTask,
URL: noExpiredAndSupportURL, RawURL: noExpiredAndSupportURL,
TaskURL: noExpiredAndSupportURL, TaskURL: noExpiredAndSupportURL,
SourceFileLength: 9789, SourceFileLength: 9789,
PieceSize: 2000, PieceSize: 2000,
}, },
}, },
want: &cacheResult{ want: &cacheResult{
breakPoint: 4000, BreakPoint: 4000,
pieceMetaRecords: partialPieceMetaRecords, PieceMetaRecords: partialPieceMetaRecords,
fileMetadata: newPartialFileMeta(partialAndSupportCacheTask, noExpiredAndSupportURL), FileMetadata: newPartialFileMeta(partialAndSupportCacheTask, noExpiredAndSupportURL),
}, },
wantErr: false, wantErr: false,
}, },
{ {
name: "partial cache and not support range", name: "partial cache and not support range",
args: args{ args: args{
task: &types.SeedTask{ task: &task.SeedTask{
TaskID: partialAndNotSupportCacheTask, ID: partialAndNotSupportCacheTask,
URL: noExpiredAndNotSupportURL, RawURL: noExpiredAndNotSupportURL,
TaskURL: noExpiredAndNotSupportURL, TaskURL: noExpiredAndNotSupportURL,
SourceFileLength: 9789, SourceFileLength: 9789,
PieceSize: 2000, PieceSize: 2000,
@ -313,27 +313,27 @@ func (suite *CacheDetectorTestSuite) TestDetectCache() {
{ {
name: "full cache and not expire", name: "full cache and not expire",
args: args{ args: args{
task: &types.SeedTask{ task: &task.SeedTask{
TaskID: fullCacheNotExpiredTask, ID: fullCacheNotExpiredTask,
URL: noExpiredAndNotSupportURL, RawURL: noExpiredAndNotSupportURL,
TaskURL: noExpiredAndNotSupportURL, TaskURL: noExpiredAndNotSupportURL,
SourceFileLength: 9789, SourceFileLength: 9789,
PieceSize: 2000, PieceSize: 2000,
}, },
}, },
want: &cacheResult{ want: &cacheResult{
breakPoint: -1, BreakPoint: -1,
pieceMetaRecords: fullPieceMetaRecords, PieceMetaRecords: fullPieceMetaRecords,
fileMetadata: newCompletedFileMeta(fullCacheNotExpiredTask, noExpiredAndNotSupportURL, true), FileMetadata: newCompletedFileMeta(fullCacheNotExpiredTask, noExpiredAndNotSupportURL, true),
}, },
wantErr: false, wantErr: false,
}, },
{ {
name: "full cache and expired", name: "full cache and expired",
args: args{ args: args{
task: &types.SeedTask{ task: &task.SeedTask{
TaskID: fullCacheExpiredTask, ID: fullCacheExpiredTask,
URL: expiredAndSupportURL, RawURL: expiredAndSupportURL,
TaskURL: expiredAndNotSupportURL, TaskURL: expiredAndNotSupportURL,
}, },
}, },
@ -369,9 +369,9 @@ func (suite *CacheDetectorTestSuite) TestParseByReadFile() {
metadata: partialSupportRangeCache.fileMeta, metadata: partialSupportRangeCache.fileMeta,
}, },
want: &cacheResult{ want: &cacheResult{
breakPoint: 4000, BreakPoint: 4000,
pieceMetaRecords: partialSupportRangeCache.pieces, PieceMetaRecords: partialSupportRangeCache.pieces,
fileMetadata: partialSupportRangeCache.fileMeta, FileMetadata: partialSupportRangeCache.fileMeta,
}, },
wantErr: false, wantErr: false,
}, },
@ -403,9 +403,9 @@ func (suite *CacheDetectorTestSuite) TestParseByReadMetaFile() {
fileMetadata: fullNoExpiredCache.fileMeta, fileMetadata: fullNoExpiredCache.fileMeta,
}, },
want: &cacheResult{ want: &cacheResult{
breakPoint: -1, BreakPoint: -1,
pieceMetaRecords: fullNoExpiredCache.pieces, PieceMetaRecords: fullNoExpiredCache.pieces,
fileMetadata: fullNoExpiredCache.fileMeta, FileMetadata: fullNoExpiredCache.fileMeta,
}, },
wantErr: false, wantErr: false,
}, },

View File

@ -21,24 +21,26 @@ import (
"context" "context"
"crypto/md5" "crypto/md5"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"sync" "sync"
"github.com/pkg/errors"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
"d7y.io/dragonfly/v2/cdn/config" "d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/types" "d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog" "d7y.io/dragonfly/v2/pkg/ratelimiter/limitreader"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/util/digestutils" "d7y.io/dragonfly/v2/pkg/util/digestutils"
"d7y.io/dragonfly/v2/pkg/util/rangeutils" "d7y.io/dragonfly/v2/pkg/util/rangeutils"
) )
type piece struct { type piece struct {
taskID string taskID string
pieceNum int32 pieceNum uint32
pieceSize int32 pieceSize uint32
pieceContent *bytes.Buffer pieceContent *bytes.Buffer
} }
@ -46,63 +48,66 @@ type downloadMetadata struct {
backSourceLength int64 // back to source download file length backSourceLength int64 // back to source download file length
realCdnFileLength int64 // the actual length of the stored file realCdnFileLength int64 // the actual length of the stored file
realSourceFileLength int64 // actually read the length of the source realSourceFileLength int64 // actually read the length of the source
pieceTotalCount int32 // piece total count totalPieceCount int32 // total number of pieces
pieceMd5Sign string pieceMd5Sign string
sourceRealDigest string
} }
type cacheWriter struct { type cacheWriter struct {
cdnReporter *reporter cdnReporter *reporter
cacheDataManager *cacheDataManager cacheStore storage.Manager
metadataManager *metadataManager
} }
func newCacheWriter(cdnReporter *reporter, cacheDataManager *cacheDataManager) *cacheWriter { func newCacheWriter(cdnReporter *reporter, metadataManager *metadataManager, cacheStore storage.Manager) *cacheWriter {
return &cacheWriter{ return &cacheWriter{
cdnReporter: cdnReporter, cdnReporter: cdnReporter,
cacheDataManager: cacheDataManager, cacheStore: cacheStore,
metadataManager: metadataManager,
} }
} }
// startWriter writes the stream data from the reader to the underlying storage. // startWriter writes the stream data from the reader to the underlying storage.
func (cw *cacheWriter) startWriter(ctx context.Context, reader io.Reader, task *types.SeedTask, detectResult *cacheResult) (*downloadMetadata, error) { func (cw *cacheWriter) startWriter(ctx context.Context, reader *limitreader.LimitReader, seedTask *task.SeedTask, breakPoint int64) (*downloadMetadata,
error) {
var writeSpan trace.Span var writeSpan trace.Span
ctx, writeSpan = tracer.Start(ctx, config.SpanWriteData) ctx, writeSpan = tracer.Start(ctx, constants.SpanWriteData)
defer writeSpan.End() defer writeSpan.End()
if detectResult == nil {
detectResult = &cacheResult{}
}
// currentSourceFileLength is used to calculate the source file Length dynamically // currentSourceFileLength is used to calculate the source file Length dynamically
currentSourceFileLength := detectResult.breakPoint currentSourceFileLength := breakPoint
// the pieceNum currently have been processed routineCount := calculateRoutineCount(seedTask.SourceFileLength-currentSourceFileLength, seedTask.PieceSize)
curPieceNum := len(detectResult.pieceMetaRecords) writeSpan.SetAttributes(constants.AttributeWriteGoroutineCount.Int(routineCount))
routineCount := calculateRoutineCount(task.SourceFileLength-currentSourceFileLength, task.PieceSize)
writeSpan.SetAttributes(config.AttributeWriteGoroutineCount.Int(routineCount))
// start writer pool // start writer pool
backSourceLength, totalPieceCount, err := cw.doWrite(ctx, reader, task, routineCount, curPieceNum) backSourceLength, totalPieceCount, err := cw.doWrite(ctx, reader, seedTask, routineCount, breakPoint)
if err != nil { if err != nil {
return &downloadMetadata{backSourceLength: backSourceLength}, fmt.Errorf("write data: %v", err) return &downloadMetadata{backSourceLength: backSourceLength}, errors.Wrap(err, "do write data action")
} }
storageInfo, err := cw.cacheDataManager.statDownloadFile(task.TaskID) storageInfo, err := cw.cacheStore.StatDownloadFile(seedTask.ID)
if err != nil { if err != nil {
return &downloadMetadata{backSourceLength: backSourceLength}, fmt.Errorf("stat cdn download file: %v", err) return &downloadMetadata{backSourceLength: backSourceLength}, errors.Wrap(err, "stat cdn download file")
} }
storageInfoBytes, _ := json.Marshal(storageInfo) storageInfoBytes, _ := json.Marshal(storageInfo)
writeSpan.SetAttributes(config.AttributeDownloadFileInfo.String(string(storageInfoBytes))) writeSpan.SetAttributes(constants.AttributeDownloadFileInfo.String(string(storageInfoBytes)))
// TODO Try getting it from the ProgressManager first // TODO Try getting it from the ProgressManager first
pieceMd5Sign, _, err := cw.cacheDataManager.getPieceMd5Sign(task.TaskID) pieceMd5Sign, _, err := cw.metadataManager.getPieceMd5Sign(seedTask.ID)
if err != nil { if err != nil {
return &downloadMetadata{backSourceLength: backSourceLength}, fmt.Errorf("get piece md5 sign: %v", err) return &downloadMetadata{backSourceLength: backSourceLength}, errors.Wrap(err, "get piece md5 sign")
} }
return &downloadMetadata{ return &downloadMetadata{
backSourceLength: backSourceLength, backSourceLength: backSourceLength,
realCdnFileLength: storageInfo.Size, realCdnFileLength: storageInfo.Size,
realSourceFileLength: currentSourceFileLength + backSourceLength, realSourceFileLength: currentSourceFileLength + backSourceLength,
pieceTotalCount: int32(totalPieceCount), totalPieceCount: totalPieceCount,
pieceMd5Sign: pieceMd5Sign, pieceMd5Sign: pieceMd5Sign,
sourceRealDigest: reader.Digest(),
}, nil }, nil
} }
func (cw *cacheWriter) doWrite(ctx context.Context, reader io.Reader, task *types.SeedTask, routineCount int, curPieceNum int) (n int64, totalPiece int, // doWrite do actual write data to storage
func (cw *cacheWriter) doWrite(ctx context.Context, reader io.Reader, seedTask *task.SeedTask, routineCount int, breakPoint int64) (n int64, totalPiece int32,
err error) { err error) {
// the pieceNum currently have been processed
curPieceNum := int32(breakPoint / int64(seedTask.PieceSize))
var bufPool = &sync.Pool{ var bufPool = &sync.Pool{
New: func() interface{} { New: func() interface{} {
return new(bytes.Buffer) return new(bytes.Buffer)
@ -111,98 +116,107 @@ func (cw *cacheWriter) doWrite(ctx context.Context, reader io.Reader, task *type
var backSourceLength int64 var backSourceLength int64
buf := make([]byte, 256*1024) buf := make([]byte, 256*1024)
jobCh := make(chan *piece) jobCh := make(chan *piece)
var wg = &sync.WaitGroup{} var g, writeCtx = errgroup.WithContext(ctx)
cw.writerPool(ctx, wg, routineCount, jobCh, bufPool) cw.writerPool(writeCtx, g, routineCount, jobCh, bufPool)
loop:
for { for {
var bb = bufPool.Get().(*bytes.Buffer) select {
bb.Reset() case <-writeCtx.Done():
limitReader := io.LimitReader(reader, int64(task.PieceSize)) break loop
n, err = io.CopyBuffer(bb, limitReader, buf) default:
if err != nil { var bb = bufPool.Get().(*bytes.Buffer)
close(jobCh) bb.Reset()
return backSourceLength, 0, fmt.Errorf("read source taskID %s pieceNum %d piece: %v", task.TaskID, curPieceNum, err) limitReader := io.LimitReader(reader, int64(seedTask.PieceSize))
} n, err = io.CopyBuffer(bb, limitReader, buf)
if n == 0 { if err != nil {
break close(jobCh)
} return backSourceLength, 0, errors.Errorf("read taskID %s pieceNum %d piece from source failed: %v", seedTask.ID, curPieceNum, err)
backSourceLength += n }
if n == 0 {
break loop
}
backSourceLength += n
jobCh <- &piece{ jobCh <- &piece{
taskID: task.TaskID, taskID: seedTask.ID,
pieceNum: int32(curPieceNum), pieceNum: uint32(curPieceNum),
pieceSize: task.PieceSize, pieceSize: uint32(seedTask.PieceSize),
pieceContent: bb, pieceContent: bb,
} }
curPieceNum++ curPieceNum++
if n < int64(task.PieceSize) { if n < int64(seedTask.PieceSize) {
break break loop
}
} }
} }
close(jobCh) close(jobCh)
wg.Wait() if err := g.Wait(); err != nil {
return backSourceLength, 0, errors.Wrapf(err, "write pool")
}
return backSourceLength, curPieceNum, nil return backSourceLength, curPieceNum, nil
} }
func (cw *cacheWriter) writerPool(ctx context.Context, wg *sync.WaitGroup, routineCount int, pieceCh chan *piece, bufPool *sync.Pool) { func (cw *cacheWriter) writerPool(ctx context.Context, g *errgroup.Group, routineCount int, pieceCh chan *piece, bufPool *sync.Pool) {
wg.Add(routineCount)
for i := 0; i < routineCount; i++ { for i := 0; i < routineCount; i++ {
go func() { g.Go(func() error {
defer wg.Done()
for p := range pieceCh { for p := range pieceCh {
// TODO Subsequent compression and other features are implemented through waitToWriteContent and pieceStyle select {
waitToWriteContent := p.pieceContent case <-ctx.Done():
originPieceLen := waitToWriteContent.Len() // the length of the original data that has not been processed return ctx.Err()
pieceLen := originPieceLen // the real length written to the storage medium after processing default:
pieceStyle := types.PlainUnspecified // TODO Subsequent compression and other features are implemented through waitToWriteContent and pieceStyle
pieceMd5 := md5.New() waitToWriteContent := p.pieceContent
err := cw.cacheDataManager.writeDownloadFile( originPieceLen := waitToWriteContent.Len() // the length of the original data that has not been processed
p.taskID, int64(p.pieceNum)*int64(p.pieceSize), int64(waitToWriteContent.Len()), pieceLen := originPieceLen // the real length written to the storage driver after processed
io.TeeReader(io.LimitReader(p.pieceContent, int64(waitToWriteContent.Len())), pieceMd5)) pieceStyle := int32(base.PieceStyle_PLAIN.Number())
// Recycle Buffer pieceMd5 := md5.New()
bufPool.Put(waitToWriteContent) err := cw.cacheStore.WriteDownloadFile(
if err != nil { p.taskID, int64(p.pieceNum)*int64(p.pieceSize), int64(waitToWriteContent.Len()),
logger.Errorf("write taskID %s pieceNum %d file: %v", p.taskID, p.pieceNum, err) io.TeeReader(io.LimitReader(p.pieceContent, int64(waitToWriteContent.Len())), pieceMd5))
continue if err != nil {
} return errors.Errorf("write taskID %s pieceNum %d to download file failed: %v", p.taskID, p.pieceNum, err)
start := uint64(p.pieceNum) * uint64(p.pieceSize) }
end := start + uint64(pieceLen) - 1 // Recycle Buffer
pieceRecord := &storage.PieceMetaRecord{ bufPool.Put(waitToWriteContent)
PieceNum: uint32(p.pieceNum), start := uint64(p.pieceNum) * uint64(p.pieceSize)
PieceLen: uint32(pieceLen), end := start + uint64(pieceLen) - 1
Md5: digestutils.ToHashString(pieceMd5), pieceRecord := &storage.PieceMetaRecord{
Range: &rangeutils.Range{ PieceNum: p.pieceNum,
StartIndex: start, PieceLen: uint32(pieceLen),
EndIndex: end, Md5: digestutils.ToHashString(pieceMd5),
}, Range: &rangeutils.Range{
OriginRange: &rangeutils.Range{ StartIndex: start,
StartIndex: start, EndIndex: end,
EndIndex: end, },
}, OriginRange: &rangeutils.Range{
PieceStyle: pieceStyle, StartIndex: start,
} EndIndex: end,
// write piece meta to storage },
if err = cw.cacheDataManager.appendPieceMetadata(p.taskID, pieceRecord); err != nil { PieceStyle: pieceStyle,
logger.Errorf("write piece meta file: %v", err) }
continue // write piece meta to storage
} if err = cw.metadataManager.appendPieceMetadata(p.taskID, pieceRecord); err != nil {
return errors.Errorf("write piece meta to piece meta file failed: %v", err)
if cw.cdnReporter != nil { }
// report piece info
if err = cw.cdnReporter.reportPieceMetaRecord(ctx, p.taskID, pieceRecord, DownloaderReport); err != nil { if err = cw.cdnReporter.reportPieceMetaRecord(ctx, p.taskID, pieceRecord, DownloaderReport); err != nil {
// NOTE: should we do this job again? // NOTE: should we do this job again?
logger.Errorf("report piece status, pieceNum %d pieceMetaRecord %s: %v", p.pieceNum, pieceRecord, err) return errors.Errorf("report piece status, pieceNum %d pieceMetaRecord %s: %v", p.pieceNum, pieceRecord, err)
} }
} }
} }
}() return nil
})
} }
} }
/* /*
helper functions helper functions
max goroutine count is CDNWriterRoutineLimit
*/ */
// calculateRoutineCount max goroutine count is CDNWriterRoutineLimit
func calculateRoutineCount(remainingFileLength int64, pieceSize int32) int { func calculateRoutineCount(remainingFileLength int64, pieceSize int32) int {
routineSize := config.CDNWriterRoutineLimit routineSize := constants.CDNWriterRoutineLimit
if remainingFileLength < 0 || pieceSize <= 0 { if remainingFileLength < 0 || pieceSize <= 0 {
return routineSize return routineSize
} }

View File

@ -20,22 +20,24 @@ import (
"bufio" "bufio"
"context" "context"
"fmt" "fmt"
"io"
"os" "os"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"d7y.io/dragonfly/v2/cdn/config" "d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/plugins" "d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/storedriver" "d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/storedriver/local" "d7y.io/dragonfly/v2/cdn/storedriver/local"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/disk" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/disk"
"d7y.io/dragonfly/v2/cdn/supervisor/progress" progressMock "d7y.io/dragonfly/v2/cdn/supervisor/mocks/progress"
"d7y.io/dragonfly/v2/cdn/types" "d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/ratelimiter/limitreader"
"d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/unit"
) )
@ -63,12 +65,12 @@ func NewPlugins(workHome string) map[plugins.PluginType][]*plugins.PluginPropert
{ {
Name: disk.StorageMode, Name: disk.StorageMode,
Enable: true, Enable: true,
Config: &storage.Config{ Config: &config.StorageConfig{
GCInitialDelay: 0 * time.Second, GCInitialDelay: 0 * time.Second,
GCInterval: 15 * time.Second, GCInterval: 15 * time.Second,
DriverConfigs: map[string]*storage.DriverConfig{ DriverConfigs: map[string]*config.DriverConfig{
local.DiskDriverName: { local.DiskDriverName: {
GCConfig: &storage.GCConfig{ GCConfig: &config.GCConfig{
YoungGCThreshold: 100 * unit.GB, YoungGCThreshold: 100 * unit.GB,
FullGCThreshold: 5 * unit.GB, FullGCThreshold: 5 * unit.GB,
CleanRatio: 1, CleanRatio: 1,
@ -85,14 +87,17 @@ func (suite *CacheWriterTestSuite) SetupSuite() {
suite.workHome, _ = os.MkdirTemp("/tmp", "cdn-CacheWriterDetectorTestSuite-") suite.workHome, _ = os.MkdirTemp("/tmp", "cdn-CacheWriterDetectorTestSuite-")
suite.T().Log("workHome:", suite.workHome) suite.T().Log("workHome:", suite.workHome)
suite.Nil(plugins.Initialize(NewPlugins(suite.workHome))) suite.Nil(plugins.Initialize(NewPlugins(suite.workHome)))
storeMgr, ok := storage.Get(config.DefaultStorageMode) ctrl := gomock.NewController(suite.T())
progressManager := progressMock.NewMockManager(ctrl)
progressManager.EXPECT().PublishPiece(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
progressManager.EXPECT().PublishTask(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
storeManager, ok := storage.Get(constants.DefaultStorageMode)
if !ok { if !ok {
suite.Failf("failed to get storage mode %s", config.DefaultStorageMode) suite.Failf("failed to get storage mode %s", constants.DefaultStorageMode)
} }
cacheDataManager := newCacheDataManager(storeMgr) metadataManager := newMetadataManager(storeManager)
progressMgr, _ := progress.NewManager() cdnReporter := newReporter(progressManager)
cdnReporter := newReporter(progressMgr) suite.writer = newCacheWriter(cdnReporter, metadataManager, storeManager)
suite.writer = newCacheWriter(cdnReporter, cacheDataManager)
} }
func (suite *CacheWriterTestSuite) TearDownSuite() { func (suite *CacheWriterTestSuite) TearDownSuite() {
@ -108,9 +113,9 @@ func (suite *CacheWriterTestSuite) TestStartWriter() {
suite.Nil(err) suite.Nil(err)
contentLen := int64(len(content)) contentLen := int64(len(content))
type args struct { type args struct {
reader io.Reader reader *limitreader.LimitReader
task *types.SeedTask task *task.SeedTask
detectResult *cacheResult breakPoint int64
} }
tests := []struct { tests := []struct {
@ -122,9 +127,9 @@ func (suite *CacheWriterTestSuite) TestStartWriter() {
{ {
name: "write with nil detectResult", name: "write with nil detectResult",
args: args{ args: args{
reader: bufio.NewReader(strings.NewReader(string(content))), reader: limitreader.NewLimitReader(bufio.NewReader(strings.NewReader(string(content))), 100),
task: &types.SeedTask{ task: &task.SeedTask{
TaskID: "5806501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e9e", ID: "5806501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e9e",
PieceSize: 50, PieceSize: 50,
}, },
}, },
@ -132,67 +137,56 @@ func (suite *CacheWriterTestSuite) TestStartWriter() {
backSourceLength: contentLen, backSourceLength: contentLen,
realCdnFileLength: contentLen, realCdnFileLength: contentLen,
realSourceFileLength: contentLen, realSourceFileLength: contentLen,
pieceTotalCount: int32((contentLen + 49) / 50), totalPieceCount: int32((contentLen + 49) / 50),
pieceMd5Sign: "3f4585787609b0d7d4c9fc800db61655a74494f83507c8acd2818d0461d9cdc5", pieceMd5Sign: "3f4585787609b0d7d4c9fc800db61655a74494f83507c8acd2818d0461d9cdc5",
}, },
}, { }, {
name: "write with non nil detectResult", name: "write with non nil detectResult",
args: args{ args: args{
reader: bufio.NewReader(strings.NewReader(string(content))), reader: limitreader.NewLimitReader(bufio.NewReader(strings.NewReader(string(content))), 100),
task: &types.SeedTask{ task: &task.SeedTask{
TaskID: "5816501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e9e", ID: "5816501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e9e",
PieceSize: 50, PieceSize: 50,
}, },
detectResult: &cacheResult{
breakPoint: 0,
pieceMetaRecords: nil,
fileMetadata: nil,
},
}, },
result: &downloadMetadata{ result: &downloadMetadata{
backSourceLength: contentLen, backSourceLength: contentLen,
realCdnFileLength: contentLen, realCdnFileLength: contentLen,
realSourceFileLength: contentLen, realSourceFileLength: contentLen,
pieceTotalCount: int32((contentLen + 49) / 50), totalPieceCount: int32((contentLen + 49) / 50),
pieceMd5Sign: "3f4585787609b0d7d4c9fc800db61655a74494f83507c8acd2818d0461d9cdc5", pieceMd5Sign: "3f4585787609b0d7d4c9fc800db61655a74494f83507c8acd2818d0461d9cdc5",
}, },
}, { }, {
name: "write with task length", name: "write with task length",
args: args{ args: args{
reader: bufio.NewReader(strings.NewReader(string(content))), reader: limitreader.NewLimitReader(bufio.NewReader(strings.NewReader(string(content))), 100),
task: &types.SeedTask{ task: &task.SeedTask{
TaskID: "5826501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e93", ID: "5826501c3bb92f0b645918c5a4b15495a63259e3e0363008f97e186509e93",
PieceSize: 50, PieceSize: 50,
SourceFileLength: contentLen, SourceFileLength: contentLen,
}, },
detectResult: &cacheResult{
breakPoint: 0,
pieceMetaRecords: nil,
fileMetadata: nil,
},
}, },
result: &downloadMetadata{ result: &downloadMetadata{
backSourceLength: contentLen, backSourceLength: contentLen,
realCdnFileLength: contentLen, realCdnFileLength: contentLen,
realSourceFileLength: contentLen, realSourceFileLength: contentLen,
pieceTotalCount: int32((contentLen + 49) / 50), totalPieceCount: int32((contentLen + 49) / 50),
pieceMd5Sign: "3f4585787609b0d7d4c9fc800db61655a74494f83507c8acd2818d0461d9cdc5", pieceMd5Sign: "3f4585787609b0d7d4c9fc800db61655a74494f83507c8acd2818d0461d9cdc5",
}, },
}, },
} }
for _, tt := range tests { for _, tt := range tests {
suite.Run(tt.name, func() { suite.Run(tt.name, func() {
suite.writer.cdnReporter.progress.InitSeedProgress(context.Background(), tt.args.task.TaskID) downloadMetadata, err := suite.writer.startWriter(context.Background(), tt.args.reader, tt.args.task, tt.args.breakPoint)
downloadMetadata, err := suite.writer.startWriter(context.Background(), tt.args.reader, tt.args.task, tt.args.detectResult)
suite.Equal(tt.wantErr, err != nil) suite.Equal(tt.wantErr, err != nil)
suite.Equal(tt.result, downloadMetadata) suite.Equal(tt.result, downloadMetadata)
suite.checkFileSize(suite.writer.cacheDataManager, tt.args.task.TaskID, contentLen) suite.checkFileSize(suite.writer.cacheStore, tt.args.task.ID, contentLen)
}) })
} }
} }
func (suite *CacheWriterTestSuite) checkFileSize(cacheDataMgr *cacheDataManager, taskID string, expectedSize int64) { func (suite *CacheWriterTestSuite) checkFileSize(cacheStore storage.Manager, taskID string, expectedSize int64) {
storageInfo, err := cacheDataMgr.statDownloadFile(taskID) storageInfo, err := cacheStore.StatDownloadFile(taskID)
suite.Nil(err) suite.Nil(err)
suite.Equal(expectedSize, storageInfo.Size) suite.Equal(expectedSize, storageInfo.Size)
} }

View File

@ -23,24 +23,24 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"d7y.io/dragonfly/v2/cdn/types" "d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/util/rangeutils" "d7y.io/dragonfly/v2/pkg/util/rangeutils"
"d7y.io/dragonfly/v2/pkg/util/stringutils" "d7y.io/dragonfly/v2/pkg/util/stringutils"
) )
func (cm *Manager) download(ctx context.Context, task *types.SeedTask, breakPoint int64) (io.ReadCloser, error) { func (cm *manager) download(ctx context.Context, seedTask *task.SeedTask, breakPoint int64) (io.ReadCloser, error) {
var err error var err error
breakRange := task.Range breakRange := seedTask.Range
if breakPoint > 0 { if breakPoint > 0 {
// todo replace task.SourceFileLength with totalSourceFileLength to get BreakRange // todo replace task.SourceFileLength with totalSourceFileLength to get BreakRange
breakRange, err = getBreakRange(breakPoint, task.Range, task.SourceFileLength) breakRange, err = getBreakRange(breakPoint, seedTask.Range, seedTask.SourceFileLength)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "calculate the breakRange") return nil, errors.Wrapf(err, "calculate the breakRange")
} }
} }
task.Log().Infof("start downloading URL %s at range %s with header %s", task.URL, breakRange, task.Header) seedTask.Log().Infof("start downloading URL %s at range %s with header %s", seedTask.RawURL, breakRange, seedTask.Header)
downloadRequest, err := source.NewRequestWithContext(ctx, task.URL, task.Header) downloadRequest, err := source.NewRequestWithContext(ctx, seedTask.RawURL, seedTask.Header)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "create download request") return nil, errors.Wrap(err, "create download request")
} }
@ -50,7 +50,7 @@ func (cm *Manager) download(ctx context.Context, task *types.SeedTask, breakPoin
body, expireInfo, err := source.DownloadWithExpireInfo(downloadRequest) body, expireInfo, err := source.DownloadWithExpireInfo(downloadRequest)
// update Expire info // update Expire info
if err == nil { if err == nil {
cm.updateExpireInfo(task.TaskID, map[string]string{ cm.updateExpireInfo(seedTask.ID, map[string]string{
source.LastModified: expireInfo.LastModified, source.LastModified: expireInfo.LastModified,
source.ETag: expireInfo.ETag, source.ETag: expireInfo.ETag,
}) })

View File

@ -19,6 +19,7 @@ package cdn
import ( import (
"context" "context"
"crypto/md5" "crypto/md5"
"encoding/json"
"fmt" "fmt"
"time" "time"
@ -27,11 +28,12 @@ import (
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"d7y.io/dragonfly/v2/cdn/config" "d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/supervisor" "d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
_ "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/disk" // nolint _ "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/disk" // nolint
_ "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/hybrid" // nolint _ "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/hybrid" // nolint
"d7y.io/dragonfly/v2/cdn/types" "d7y.io/dragonfly/v2/cdn/supervisor/progress"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/ratelimiter/limitreader" "d7y.io/dragonfly/v2/pkg/ratelimiter/limitreader"
"d7y.io/dragonfly/v2/pkg/ratelimiter/ratelimiter" "d7y.io/dragonfly/v2/pkg/ratelimiter/ratelimiter"
@ -42,128 +44,150 @@ import (
"d7y.io/dragonfly/v2/pkg/util/timeutils" "d7y.io/dragonfly/v2/pkg/util/timeutils"
) )
// Ensure that Manager implements the CDNMgr interface // Manager as an interface defines all operations against CDN and
var _ supervisor.CDNMgr = (*Manager)(nil) // operates on the underlying files stored on the local disk, etc.
type Manager interface {
var tracer trace.Tracer // TriggerCDN will trigger the download resource from sourceURL.
TriggerCDN(context.Context, *task.SeedTask) (*task.SeedTask, error)
func init() { // Delete the cdn meta with specified taskID.
tracer = otel.Tracer("cdn-server") // The file on the disk will be deleted when the force is true.
Delete(taskID string) error
// TryFreeSpace checks if the free space of the storage is larger than the fileLength.
TryFreeSpace(fileLength int64) (bool, error)
} }
// Manager is an implementation of the interface of CDNMgr. // Ensure that Manager implements the CDNManager interface
type Manager struct { var _ Manager = (*manager)(nil)
cfg *config.Config
cacheStore storage.Manager var tracer = otel.Tracer("cdn-server")
limiter *ratelimiter.RateLimiter
cdnLocker *synclock.LockerPool // Manager is an implementation of the interface of Manager.
cacheDataManager *cacheDataManager type manager struct {
progressMgr supervisor.SeedProgressMgr cfg *config.Config
cdnReporter *reporter cacheStore storage.Manager
detector *cacheDetector limiter *ratelimiter.RateLimiter
writer *cacheWriter cdnLocker *synclock.LockerPool
metadataManager *metadataManager
progressManager progress.Manager
taskManager task.Manager
cdnReporter *reporter
detector *cacheDetector
writer *cacheWriter
} }
// NewManager returns a new Manager. // NewManager returns a new Manager.
func NewManager(cfg *config.Config, cacheStore storage.Manager, progressMgr supervisor.SeedProgressMgr) (supervisor.CDNMgr, error) { func NewManager(cfg *config.Config, cacheStore storage.Manager, progressManager progress.Manager,
return newManager(cfg, cacheStore, progressMgr) taskManager task.Manager) (Manager, error) {
return newManager(cfg, cacheStore, progressManager, taskManager)
} }
func newManager(cfg *config.Config, cacheStore storage.Manager, progressMgr supervisor.SeedProgressMgr) (*Manager, error) { func newManager(cfg *config.Config, cacheStore storage.Manager, progressManager progress.Manager, taskManager task.Manager) (Manager, error) {
rateLimiter := ratelimiter.NewRateLimiter(ratelimiter.TransRate(int64(cfg.MaxBandwidth-cfg.SystemReservedBandwidth)), 2) rateLimiter := ratelimiter.NewRateLimiter(ratelimiter.TransRate(int64(cfg.MaxBandwidth-cfg.SystemReservedBandwidth)), 2)
cacheDataManager := newCacheDataManager(cacheStore) metadataManager := newMetadataManager(cacheStore)
cdnReporter := newReporter(progressMgr) cdnReporter := newReporter(progressManager)
return &Manager{ return &manager{
cfg: cfg, cfg: cfg,
cacheStore: cacheStore, cacheStore: cacheStore,
limiter: rateLimiter, limiter: rateLimiter,
cdnLocker: synclock.NewLockerPool(), metadataManager: metadataManager,
cacheDataManager: cacheDataManager, cdnReporter: cdnReporter,
cdnReporter: cdnReporter, progressManager: progressManager,
progressMgr: progressMgr, taskManager: taskManager,
detector: newCacheDetector(cacheDataManager), detector: newCacheDetector(metadataManager, cacheStore),
writer: newCacheWriter(cdnReporter, cacheDataManager), writer: newCacheWriter(cdnReporter, metadataManager, cacheStore),
cdnLocker: synclock.NewLockerPool(),
}, nil }, nil
} }
func (cm *Manager) TriggerCDN(ctx context.Context, task *types.SeedTask) (seedTask *types.SeedTask, err error) { func (cm *manager) TriggerCDN(ctx context.Context, seedTask *task.SeedTask) (*task.SeedTask, error) {
updateTaskInfo, err := cm.doTrigger(ctx, seedTask)
if err != nil {
seedTask.Log().Errorf("failed to trigger cdn: %v", err)
// todo source not reach error SOURCE_ERROR
updateTaskInfo = getUpdateTaskInfoWithStatusOnly(seedTask, task.StatusFailed)
}
err = cm.progressManager.PublishTask(ctx, seedTask.ID, updateTaskInfo)
return updateTaskInfo, err
}
func (cm *manager) doTrigger(ctx context.Context, seedTask *task.SeedTask) (*task.SeedTask, error) {
var span trace.Span var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanTriggerCDN) ctx, span = tracer.Start(ctx, constants.SpanTriggerCDN)
defer span.End() defer span.End()
tempTask := *task cm.cdnLocker.Lock(seedTask.ID, false)
seedTask = &tempTask defer cm.cdnLocker.UnLock(seedTask.ID, false)
// obtain taskId write lock
cm.cdnLocker.Lock(task.TaskID, false)
defer cm.cdnLocker.UnLock(task.TaskID, false)
var fileDigest = md5.New() var fileDigest = md5.New()
var digestType = digestutils.Md5Hash.String() var digestType = digestutils.Md5Hash.String()
if !stringutils.IsBlank(task.RequestDigest) { if !stringutils.IsBlank(seedTask.Digest) {
requestDigest := digestutils.Parse(task.RequestDigest) requestDigest := digestutils.Parse(seedTask.Digest)
digestType = requestDigest[0] digestType = requestDigest[0]
fileDigest = digestutils.CreateHash(digestType) fileDigest = digestutils.CreateHash(digestType)
} }
// first: detect Cache // first: detect Cache
detectResult, err := cm.detector.detectCache(ctx, task, fileDigest) detectResult, err := cm.detector.detectCache(ctx, seedTask, fileDigest)
if err != nil { if err != nil {
seedTask.UpdateStatus(types.TaskInfoCdnStatusFailed) return nil, errors.Wrap(err, "detect task cache")
return seedTask, errors.Wrapf(err, "failed to detect cache")
} }
span.SetAttributes(config.AttributeCacheResult.String(detectResult.String())) jsonResult, err := json.Marshal(detectResult)
task.Log().Debugf("detects cache result: %#v", detectResult)
// second: report detect result
err = cm.cdnReporter.reportCache(ctx, task.TaskID, detectResult)
if err != nil { if err != nil {
task.Log().Errorf("failed to report cache, reset detectResult: %v", err) return nil, errors.Wrapf(err, "json marshal detectResult: %#v", detectResult)
}
seedTask.Log().Debugf("detects cache result: %s", jsonResult)
// second: report detect result
err = cm.cdnReporter.reportDetectResult(ctx, seedTask.ID, detectResult)
if err != nil {
seedTask.Log().Errorf("failed to report detect cache result: %v", err)
return nil, errors.Wrapf(err, "report detect cache result")
} }
// full cache // full cache
if detectResult.breakPoint == -1 { if detectResult.BreakPoint == -1 {
task.Log().Infof("cache full hit on local") seedTask.Log().Infof("cache full hit on local")
seedTask.UpdateTaskInfo(types.TaskInfoCdnStatusSuccess, detectResult.fileMetadata.SourceRealDigest, detectResult.fileMetadata.PieceMd5Sign, return getUpdateTaskInfo(seedTask, task.StatusSuccess, detectResult.FileMetadata.SourceRealDigest, detectResult.FileMetadata.PieceMd5Sign,
detectResult.fileMetadata.SourceFileLen, detectResult.fileMetadata.CdnFileLength) detectResult.FileMetadata.SourceFileLen, detectResult.FileMetadata.CdnFileLength, detectResult.FileMetadata.TotalPieceCount), nil
return seedTask, nil
} }
server.StatSeedStart(task.TaskID, task.URL)
start := time.Now() start := time.Now()
// third: start to download the source file // third: start to download the source file
var downloadSpan trace.Span var downloadSpan trace.Span
ctx, downloadSpan = tracer.Start(ctx, config.SpanDownloadSource) ctx, downloadSpan = tracer.Start(ctx, constants.SpanDownloadSource)
downloadSpan.End() downloadSpan.End()
body, err := cm.download(ctx, task, detectResult.breakPoint) server.StatSeedStart(seedTask.ID, seedTask.RawURL)
respBody, err := cm.download(ctx, seedTask, detectResult.BreakPoint)
// download fail // download fail
if err != nil { if err != nil {
downloadSpan.RecordError(err) downloadSpan.RecordError(err)
server.StatSeedFinish(task.TaskID, task.URL, false, err, start, time.Now(), 0, 0) server.StatSeedFinish(seedTask.ID, seedTask.RawURL, false, err, start, time.Now(), 0, 0)
seedTask.UpdateStatus(types.TaskInfoCdnStatusSourceError) return nil, errors.Wrap(err, "download task file data")
return seedTask, err
} }
defer body.Close() defer respBody.Close()
reader := limitreader.NewLimitReaderWithLimiterAndDigest(body, cm.limiter, fileDigest, digestutils.Algorithms[digestType]) reader := limitreader.NewLimitReaderWithLimiterAndDigest(respBody, cm.limiter, fileDigest, digestutils.Algorithms[digestType])
// forth: write to storage // forth: write to storage
downloadMetadata, err := cm.writer.startWriter(ctx, reader, task, detectResult) downloadMetadata, err := cm.writer.startWriter(ctx, reader, seedTask, detectResult.BreakPoint)
if err != nil { if err != nil {
server.StatSeedFinish(task.TaskID, task.URL, false, err, start, time.Now(), downloadMetadata.backSourceLength, server.StatSeedFinish(seedTask.ID, seedTask.RawURL, false, err, start, time.Now(), downloadMetadata.backSourceLength,
downloadMetadata.realSourceFileLength) downloadMetadata.realSourceFileLength)
task.Log().Errorf("failed to write for task: %v", err) return nil, errors.Wrap(err, "write task file data")
seedTask.UpdateStatus(types.TaskInfoCdnStatusFailed)
return seedTask, err
} }
server.StatSeedFinish(task.TaskID, task.URL, true, nil, start, time.Now(), downloadMetadata.backSourceLength, server.StatSeedFinish(seedTask.ID, seedTask.RawURL, true, nil, start, time.Now(), downloadMetadata.backSourceLength,
downloadMetadata.realSourceFileLength) downloadMetadata.realSourceFileLength)
sourceDigest := reader.Digest()
// fifth: handle CDN result // fifth: handle CDN result
success, err := cm.handleCDNResult(task, sourceDigest, downloadMetadata) err = cm.handleCDNResult(seedTask, downloadMetadata)
if err != nil || !success { if err != nil {
seedTask.UpdateStatus(types.TaskInfoCdnStatusFailed) return nil, err
return seedTask, err
} }
seedTask.UpdateTaskInfo(types.TaskInfoCdnStatusSuccess, sourceDigest, downloadMetadata.pieceMd5Sign, return getUpdateTaskInfo(seedTask, task.StatusSuccess, downloadMetadata.sourceRealDigest, downloadMetadata.pieceMd5Sign,
downloadMetadata.realSourceFileLength, downloadMetadata.realCdnFileLength) downloadMetadata.realSourceFileLength, downloadMetadata.realCdnFileLength, downloadMetadata.totalPieceCount), nil
return seedTask, nil
} }
func (cm *Manager) Delete(taskID string) error { func (cm *manager) Delete(taskID string) error {
cm.cdnLocker.Lock(taskID, false)
defer cm.cdnLocker.UnLock(taskID, false)
err := cm.cacheStore.DeleteTask(taskID) err := cm.cacheStore.DeleteTask(taskID)
if err != nil { if err != nil {
return errors.Wrap(err, "failed to delete task files") return errors.Wrap(err, "failed to delete task files")
@ -171,67 +195,76 @@ func (cm *Manager) Delete(taskID string) error {
return nil return nil
} }
func (cm *Manager) TryFreeSpace(fileLength int64) (bool, error) { func (cm *manager) TryFreeSpace(fileLength int64) (bool, error) {
return cm.cacheStore.TryFreeSpace(fileLength) return cm.cacheStore.TryFreeSpace(fileLength)
} }
func (cm *Manager) handleCDNResult(task *types.SeedTask, sourceDigest string, downloadMetadata *downloadMetadata) (bool, error) { // TODO Different error representations are returned to the caller
task.Log().Debugf("handle cdn result, downloadMetadata: %#v", downloadMetadata) func (cm *manager) handleCDNResult(seedTask *task.SeedTask, downloadMetadata *downloadMetadata) error {
var isSuccess = true seedTask.Log().Debugf("handle cdn result, downloadMetadata: %#v", downloadMetadata)
var errorMsg string var success = true
var errMsg string
// check md5 // check md5
if !stringutils.IsBlank(task.RequestDigest) && task.RequestDigest != sourceDigest { if !stringutils.IsBlank(seedTask.Digest) && seedTask.Digest != downloadMetadata.sourceRealDigest {
errorMsg = fmt.Sprintf("file digest not match expected: %s real: %s", task.RequestDigest, sourceDigest) errMsg = fmt.Sprintf("file digest not match expected: %s real: %s", seedTask.Digest, downloadMetadata.sourceRealDigest)
isSuccess = false success = false
} }
// check source length // check source length
if isSuccess && task.SourceFileLength >= 0 && task.SourceFileLength != downloadMetadata.realSourceFileLength { if success && seedTask.SourceFileLength >= 0 && seedTask.SourceFileLength != downloadMetadata.realSourceFileLength {
errorMsg = fmt.Sprintf("file length not match expected: %d real: %d", task.SourceFileLength, downloadMetadata.realSourceFileLength) errMsg = fmt.Sprintf("file length not match expected: %d real: %d", seedTask.SourceFileLength, downloadMetadata.realSourceFileLength)
isSuccess = false success = false
} }
if isSuccess && task.PieceTotal > 0 && downloadMetadata.pieceTotalCount != task.PieceTotal { if success && seedTask.TotalPieceCount > 0 && downloadMetadata.totalPieceCount != seedTask.TotalPieceCount {
errorMsg = fmt.Sprintf("task total piece count not match expected: %d real: %d", task.PieceTotal, downloadMetadata.pieceTotalCount) errMsg = fmt.Sprintf("task total piece count not match expected: %d real: %d", seedTask.TotalPieceCount, downloadMetadata.totalPieceCount)
isSuccess = false success = false
} }
sourceFileLen := task.SourceFileLength sourceFileLen := seedTask.SourceFileLength
if isSuccess && task.SourceFileLength <= 0 { if success && seedTask.SourceFileLength <= 0 {
sourceFileLen = downloadMetadata.realSourceFileLength sourceFileLen = downloadMetadata.realSourceFileLength
} }
cdnFileLength := downloadMetadata.realCdnFileLength if err := cm.metadataManager.updateStatusAndResult(seedTask.ID, &storage.FileMetadata{
pieceMd5Sign := downloadMetadata.pieceMd5Sign
// if validate fail
if !isSuccess {
cdnFileLength = 0
}
if err := cm.cacheDataManager.updateStatusAndResult(task.TaskID, &storage.FileMetadata{
Finish: true, Finish: true,
Success: isSuccess, Success: success,
SourceRealDigest: sourceDigest,
PieceMd5Sign: pieceMd5Sign,
CdnFileLength: cdnFileLength,
SourceFileLen: sourceFileLen, SourceFileLen: sourceFileLen,
TotalPieceCount: downloadMetadata.pieceTotalCount, CdnFileLength: downloadMetadata.realCdnFileLength,
SourceRealDigest: downloadMetadata.sourceRealDigest,
TotalPieceCount: downloadMetadata.totalPieceCount,
PieceMd5Sign: downloadMetadata.pieceMd5Sign,
}); err != nil { }); err != nil {
return false, errors.Wrap(err, "failed to update task status and result") return errors.Wrapf(err, "update metadata")
} }
if !success {
if !isSuccess { return errors.New(errMsg)
return false, errors.New(errorMsg)
} }
return nil
task.Log().Infof("success to get task, downloadMetadata: %#v realDigest: %s", downloadMetadata, sourceDigest)
return true, nil
} }
func (cm *Manager) updateExpireInfo(taskID string, expireInfo map[string]string) { func (cm *manager) updateExpireInfo(taskID string, expireInfo map[string]string) {
if err := cm.cacheDataManager.updateExpireInfo(taskID, expireInfo); err != nil { if err := cm.metadataManager.updateExpireInfo(taskID, expireInfo); err != nil {
logger.WithTaskID(taskID).Errorf("failed to update expireInfo(%s): %v", expireInfo, err) logger.WithTaskID(taskID).Errorf("failed to update expireInfo(%s): %v", expireInfo, err)
} }
logger.WithTaskID(taskID).Infof("success to update expireInfo(%s)", expireInfo) logger.WithTaskID(taskID).Debugf("success to update metadata expireInfo(%s)", expireInfo)
} }
/* /*
helper functions helper functions
*/ */
var getCurrentTimeMillisFunc = timeutils.CurrentTimeMillis var getCurrentTimeMillisFunc = timeutils.CurrentTimeMillis
func getUpdateTaskInfoWithStatusOnly(seedTask *task.SeedTask, cdnStatus string) *task.SeedTask {
cloneTask := seedTask.Clone()
cloneTask.CdnStatus = cdnStatus
return cloneTask
}
func getUpdateTaskInfo(seedTask *task.SeedTask, cdnStatus, realMD5, pieceMd5Sign string, sourceFileLength, cdnFileLength int64,
totalPieceCount int32) *task.SeedTask {
cloneTask := seedTask.Clone()
cloneTask.SourceFileLength = sourceFileLength
cloneTask.CdnFileLength = cdnFileLength
cloneTask.CdnStatus = cdnStatus
cloneTask.TotalPieceCount = totalPieceCount
cloneTask.SourceRealDigest = realMD5
cloneTask.PieceMd5Sign = pieceMd5Sign
return cloneTask
}

View File

@ -28,10 +28,13 @@ import (
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"d7y.io/dragonfly/v2/cdn/config" "d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/plugins" "d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/mock" _ "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage/disk"
"d7y.io/dragonfly/v2/cdn/types" progressMock "d7y.io/dragonfly/v2/cdn/supervisor/mocks/progress"
taskMock "d7y.io/dragonfly/v2/cdn/supervisor/mocks/task"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/internal/idgen" "d7y.io/dragonfly/v2/internal/idgen"
"d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
@ -48,7 +51,7 @@ func TestCDNManagerSuite(t *testing.T) {
type CDNManagerTestSuite struct { type CDNManagerTestSuite struct {
workHome string workHome string
cm *Manager cm Manager
suite.Suite suite.Suite
} }
@ -56,21 +59,24 @@ func (suite *CDNManagerTestSuite) SetupSuite() {
suite.workHome, _ = os.MkdirTemp("/tmp", "cdn-ManagerTestSuite-") suite.workHome, _ = os.MkdirTemp("/tmp", "cdn-ManagerTestSuite-")
fmt.Printf("workHome: %s", suite.workHome) fmt.Printf("workHome: %s", suite.workHome)
suite.Nil(plugins.Initialize(NewPlugins(suite.workHome))) suite.Nil(plugins.Initialize(NewPlugins(suite.workHome)))
storeMgr, ok := storage.Get(config.DefaultStorageMode) storeMgr, ok := storage.Get(constants.DefaultStorageMode)
if !ok { if !ok {
suite.Failf("failed to get storage mode %s", config.DefaultStorageMode) suite.Failf("failed to get storage mode %s", constants.DefaultStorageMode)
} }
ctrl := gomock.NewController(suite.T()) ctrl := gomock.NewController(suite.T())
progressMgr := mock.NewMockSeedProgressMgr(ctrl) taskManager := taskMock.NewMockManager(ctrl)
progressMgr.EXPECT().PublishPiece(gomock.Any(), md5TaskID, gomock.Any()).Return(nil).Times(98 * 2) progressManager := progressMock.NewMockManager(ctrl)
progressMgr.EXPECT().PublishPiece(gomock.Any(), sha256TaskID, gomock.Any()).Return(nil).Times(98 * 2) progressManager.EXPECT().PublishPiece(gomock.Any(), md5TaskID, gomock.Any()).Return(nil).Times(98 * 2)
suite.cm, _ = newManager(config.New(), storeMgr, progressMgr) progressManager.EXPECT().PublishPiece(gomock.Any(), sha256TaskID, gomock.Any()).Return(nil).Times(98 * 2)
progressManager.EXPECT().PublishTask(gomock.Any(), md5TaskID, gomock.Any()).Return(nil).Times(2)
progressManager.EXPECT().PublishTask(gomock.Any(), sha256TaskID, gomock.Any()).Return(nil).Times(2)
suite.cm, _ = newManager(config.New(), storeMgr, progressManager, taskManager)
} }
var ( var (
dragonflyURL = "http://dragonfly.io.com?a=a&b=b&c=c" dragonflyRawURL = "http://dragonfly.io.com?a=a&b=b&c=c"
md5TaskID = idgen.TaskID(dragonflyURL, &base.UrlMeta{Digest: "md5:f1e2488bba4d1267948d9e2f7008571c", Tag: "dragonfly", Filter: "a&b"}) md5TaskID = idgen.TaskID(dragonflyRawURL, &base.UrlMeta{Digest: "md5:f1e2488bba4d1267948d9e2f7008571c", Tag: "dragonfly", Filter: "a&b"})
sha256TaskID = idgen.TaskID(dragonflyURL, &base.UrlMeta{Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5", Tag: "dragonfly", Filter: "a&b"}) sha256TaskID = idgen.TaskID(dragonflyRawURL, &base.UrlMeta{Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5", Tag: "dragonfly", Filter: "a&b"})
) )
func (suite *CDNManagerTestSuite) TearDownSuite() { func (suite *CDNManagerTestSuite) TearDownSuite() {
@ -133,67 +139,67 @@ func (suite *CDNManagerTestSuite) TestTriggerCDN() {
tests := []struct { tests := []struct {
name string name string
sourceTask *types.SeedTask sourceTask *task.SeedTask
targetTask *types.SeedTask targetTask *task.SeedTask
}{ }{
{ {
name: "trigger_md5", name: "trigger_md5",
sourceTask: &types.SeedTask{ sourceTask: &task.SeedTask{
TaskID: md5TaskID, ID: md5TaskID,
URL: dragonflyURL, RawURL: dragonflyRawURL,
TaskURL: urlutils.FilterURLParam(dragonflyURL, []string{"a", "b"}), TaskURL: urlutils.FilterURLParam(dragonflyRawURL, []string{"a", "b"}),
SourceFileLength: 9789, SourceFileLength: 9789,
CdnFileLength: 0, CdnFileLength: 0,
PieceSize: 100, PieceSize: 100,
Header: map[string]string{"md5": "f1e2488bba4d1267948d9e2f7008571c"}, Header: map[string]string{"md5": "f1e2488bba4d1267948d9e2f7008571c"},
CdnStatus: types.TaskInfoCdnStatusRunning, CdnStatus: task.StatusRunning,
PieceTotal: 0, TotalPieceCount: 98,
RequestDigest: "md5:f1e2488bba4d1267948d9e2f7008571c", Digest: "md5:f1e2488bba4d1267948d9e2f7008571c",
SourceRealDigest: "", SourceRealDigest: "",
PieceMd5Sign: "", PieceMd5Sign: "",
}, },
targetTask: &types.SeedTask{ targetTask: &task.SeedTask{
TaskID: md5TaskID, ID: md5TaskID,
URL: dragonflyURL, RawURL: dragonflyRawURL,
TaskURL: urlutils.FilterURLParam(dragonflyURL, []string{"a", "b"}), TaskURL: urlutils.FilterURLParam(dragonflyRawURL, []string{"a", "b"}),
SourceFileLength: 9789, SourceFileLength: 9789,
CdnFileLength: 9789, CdnFileLength: 9789,
PieceSize: 100, PieceSize: 100,
Header: map[string]string{"md5": "f1e2488bba4d1267948d9e2f7008571c"}, Header: map[string]string{"md5": "f1e2488bba4d1267948d9e2f7008571c"},
CdnStatus: types.TaskInfoCdnStatusSuccess, CdnStatus: task.StatusSuccess,
PieceTotal: 0, TotalPieceCount: 98,
RequestDigest: "md5:f1e2488bba4d1267948d9e2f7008571c", Digest: "md5:f1e2488bba4d1267948d9e2f7008571c",
SourceRealDigest: "md5:f1e2488bba4d1267948d9e2f7008571c", SourceRealDigest: "md5:f1e2488bba4d1267948d9e2f7008571c",
PieceMd5Sign: "bb138842f338fff90af737e4a6b2c6f8e2a7031ca9d5900bc9b646f6406d890f", PieceMd5Sign: "bb138842f338fff90af737e4a6b2c6f8e2a7031ca9d5900bc9b646f6406d890f",
}, },
}, },
{ {
name: "trigger_sha256", name: "trigger_sha256",
sourceTask: &types.SeedTask{ sourceTask: &task.SeedTask{
TaskID: sha256TaskID, ID: sha256TaskID,
URL: dragonflyURL, RawURL: dragonflyRawURL,
TaskURL: urlutils.FilterURLParam(dragonflyURL, []string{"a", "b"}), TaskURL: urlutils.FilterURLParam(dragonflyRawURL, []string{"a", "b"}),
SourceFileLength: 9789, SourceFileLength: 9789,
CdnFileLength: 0, CdnFileLength: 0,
PieceSize: 100, PieceSize: 100,
Header: map[string]string{"sha256": "b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5"}, Header: map[string]string{"sha256": "b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5"},
CdnStatus: types.TaskInfoCdnStatusRunning, CdnStatus: task.StatusRunning,
PieceTotal: 0, TotalPieceCount: 98,
RequestDigest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5", Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5",
SourceRealDigest: "", SourceRealDigest: "",
PieceMd5Sign: "", PieceMd5Sign: "",
}, },
targetTask: &types.SeedTask{ targetTask: &task.SeedTask{
TaskID: sha256TaskID, ID: sha256TaskID,
URL: dragonflyURL, RawURL: dragonflyRawURL,
TaskURL: urlutils.FilterURLParam(dragonflyURL, []string{"a", "b"}), TaskURL: urlutils.FilterURLParam(dragonflyRawURL, []string{"a", "b"}),
SourceFileLength: 9789, SourceFileLength: 9789,
CdnFileLength: 9789, CdnFileLength: 9789,
PieceSize: 100, PieceSize: 100,
Header: map[string]string{"sha256": "b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5"}, Header: map[string]string{"sha256": "b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5"},
CdnStatus: types.TaskInfoCdnStatusSuccess, CdnStatus: task.StatusSuccess,
PieceTotal: 0, TotalPieceCount: 98,
RequestDigest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5", Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5",
SourceRealDigest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5", SourceRealDigest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5",
PieceMd5Sign: "bb138842f338fff90af737e4a6b2c6f8e2a7031ca9d5900bc9b646f6406d890f", PieceMd5Sign: "bb138842f338fff90af737e4a6b2c6f8e2a7031ca9d5900bc9b646f6406d890f",
}, },
@ -204,10 +210,10 @@ func (suite *CDNManagerTestSuite) TestTriggerCDN() {
suite.Run(tt.name, func() { suite.Run(tt.name, func() {
gotSeedTask, err := suite.cm.TriggerCDN(context.Background(), tt.sourceTask) gotSeedTask, err := suite.cm.TriggerCDN(context.Background(), tt.sourceTask)
suite.Nil(err) suite.Nil(err)
suite.Equal(tt.targetTask, gotSeedTask) suite.True(task.IsEqual(*tt.targetTask, *gotSeedTask))
cacheSeedTask, err := suite.cm.TriggerCDN(context.Background(), gotSeedTask) cacheSeedTask, err := suite.cm.TriggerCDN(context.Background(), gotSeedTask)
suite.Nil(err) suite.Nil(err)
suite.Equal(tt.targetTask, cacheSeedTask) suite.True(task.IsEqual(*tt.targetTask, *cacheSeedTask))
}) })
} }

View File

@ -17,15 +17,12 @@
package cdn package cdn
import ( import (
"fmt"
"io"
"sort" "sort"
"github.com/pkg/errors" "github.com/pkg/errors"
"d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/types" "d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/synclock" "d7y.io/dragonfly/v2/pkg/synclock"
"d7y.io/dragonfly/v2/pkg/util/digestutils" "d7y.io/dragonfly/v2/pkg/util/digestutils"
@ -33,42 +30,46 @@ import (
"d7y.io/dragonfly/v2/pkg/util/timeutils" "d7y.io/dragonfly/v2/pkg/util/timeutils"
) )
// cacheDataManager manages the meta file and piece meta file of each TaskId. // metadataManager manages the meta file and piece meta file of each TaskID.
type cacheDataManager struct { type metadataManager struct {
storage storage.Manager storage storage.Manager
cacheLocker *synclock.LockerPool cacheLocker *synclock.LockerPool
} }
func newCacheDataManager(storeMgr storage.Manager) *cacheDataManager { func newMetadataManager(storageManager storage.Manager) *metadataManager {
return &cacheDataManager{ return &metadataManager{
storeMgr, storageManager,
synclock.NewLockerPool(), synclock.NewLockerPool(),
} }
} }
// writeFileMetadataByTask stores the metadata of task by task to storage. // writeFileMetadataByTask stores metadata of task
func (mm *cacheDataManager) writeFileMetadataByTask(task *types.SeedTask) (*storage.FileMetadata, error) { func (mm *metadataManager) writeFileMetadataByTask(seedTask *task.SeedTask) (*storage.FileMetadata, error) {
mm.cacheLocker.Lock(task.TaskID, false) mm.cacheLocker.Lock(seedTask.ID, false)
defer mm.cacheLocker.UnLock(task.TaskID, false) defer mm.cacheLocker.UnLock(seedTask.ID, false)
metadata := &storage.FileMetadata{ metadata := &storage.FileMetadata{
TaskID: task.TaskID, TaskID: seedTask.ID,
TaskURL: task.TaskURL, TaskURL: seedTask.TaskURL,
PieceSize: task.PieceSize, PieceSize: seedTask.PieceSize,
SourceFileLen: task.SourceFileLength, SourceFileLen: seedTask.SourceFileLength,
AccessTime: getCurrentTimeMillisFunc(), AccessTime: getCurrentTimeMillisFunc(),
CdnFileLength: task.CdnFileLength, CdnFileLength: seedTask.CdnFileLength,
TotalPieceCount: task.PieceTotal, Digest: seedTask.Digest,
Tag: seedTask.Tag,
TotalPieceCount: seedTask.TotalPieceCount,
Range: seedTask.Range,
Filter: seedTask.Filter,
} }
if err := mm.storage.WriteFileMetadata(task.TaskID, metadata); err != nil { if err := mm.storage.WriteFileMetadata(seedTask.ID, metadata); err != nil {
return nil, errors.Wrapf(err, "write task %s metadata file", task.TaskID) return nil, errors.Wrapf(err, "write task metadata file")
} }
return metadata, nil return metadata, nil
} }
// updateAccessTime update access and interval // updateAccessTime update access and interval
func (mm *cacheDataManager) updateAccessTime(taskID string, accessTime int64) error { func (mm *metadataManager) updateAccessTime(taskID string, accessTime int64) error {
mm.cacheLocker.Lock(taskID, false) mm.cacheLocker.Lock(taskID, false)
defer mm.cacheLocker.UnLock(taskID, false) defer mm.cacheLocker.UnLock(taskID, false)
@ -89,7 +90,7 @@ func (mm *cacheDataManager) updateAccessTime(taskID string, accessTime int64) er
return mm.storage.WriteFileMetadata(taskID, originMetadata) return mm.storage.WriteFileMetadata(taskID, originMetadata)
} }
func (mm *cacheDataManager) updateExpireInfo(taskID string, expireInfo map[string]string) error { func (mm *metadataManager) updateExpireInfo(taskID string, expireInfo map[string]string) error {
mm.cacheLocker.Lock(taskID, false) mm.cacheLocker.Lock(taskID, false)
defer mm.cacheLocker.UnLock(taskID, false) defer mm.cacheLocker.UnLock(taskID, false)
@ -103,7 +104,7 @@ func (mm *cacheDataManager) updateExpireInfo(taskID string, expireInfo map[strin
return mm.storage.WriteFileMetadata(taskID, originMetadata) return mm.storage.WriteFileMetadata(taskID, originMetadata)
} }
func (mm *cacheDataManager) updateStatusAndResult(taskID string, metadata *storage.FileMetadata) error { func (mm *metadataManager) updateStatusAndResult(taskID string, metadata *storage.FileMetadata) error {
mm.cacheLocker.Lock(taskID, false) mm.cacheLocker.Lock(taskID, false)
defer mm.cacheLocker.UnLock(taskID, false) defer mm.cacheLocker.UnLock(taskID, false)
@ -130,8 +131,12 @@ func (mm *cacheDataManager) updateStatusAndResult(taskID string, metadata *stora
return mm.storage.WriteFileMetadata(taskID, originMetadata) return mm.storage.WriteFileMetadata(taskID, originMetadata)
} }
func (mm *metadataManager) readFileMetadata(taskID string) (*storage.FileMetadata, error) {
return mm.storage.ReadFileMetadata(taskID)
}
// appendPieceMetadata append piece meta info to storage // appendPieceMetadata append piece meta info to storage
func (mm *cacheDataManager) appendPieceMetadata(taskID string, record *storage.PieceMetaRecord) error { func (mm *metadataManager) appendPieceMetadata(taskID string, record *storage.PieceMetaRecord) error {
mm.cacheLocker.Lock(taskID, false) mm.cacheLocker.Lock(taskID, false)
defer mm.cacheLocker.UnLock(taskID, false) defer mm.cacheLocker.UnLock(taskID, false)
// write to the storage // write to the storage
@ -139,36 +144,31 @@ func (mm *cacheDataManager) appendPieceMetadata(taskID string, record *storage.P
} }
// appendPieceMetadata append piece meta info to storage // appendPieceMetadata append piece meta info to storage
func (mm *cacheDataManager) writePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error { func (mm *metadataManager) writePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error {
mm.cacheLocker.Lock(taskID, false) mm.cacheLocker.Lock(taskID, false)
defer mm.cacheLocker.UnLock(taskID, false) defer mm.cacheLocker.UnLock(taskID, false)
// write to the storage // write to the storage
return mm.storage.WritePieceMetaRecords(taskID, records) return mm.storage.WritePieceMetaRecords(taskID, records)
} }
// readAndCheckPieceMetaRecords reads pieceMetaRecords from storage and check data integrity by the md5 file of the TaskId // readPieceMetaRecords reads pieceMetaRecords from storage and without check data integrity
func (mm *cacheDataManager) readAndCheckPieceMetaRecords(taskID, pieceMd5Sign string) ([]*storage.PieceMetaRecord, error) { func (mm *metadataManager) readPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) {
mm.cacheLocker.Lock(taskID, true) mm.cacheLocker.Lock(taskID, true)
defer mm.cacheLocker.UnLock(taskID, true) defer mm.cacheLocker.UnLock(taskID, true)
md5Sign, pieceMetaRecords, err := mm.getPieceMd5Sign(taskID) pieceMetaRecords, err := mm.storage.ReadPieceMetaRecords(taskID)
if err != nil { if err != nil {
return nil, err return nil, errors.Wrapf(err, "read piece meta file")
}
if md5Sign != pieceMd5Sign {
return nil, fmt.Errorf("check piece meta data integrity fail, expectMd5Sign: %s, actualMd5Sign: %s",
pieceMd5Sign, md5Sign)
} }
// sort piece meta records by pieceNum
sort.Slice(pieceMetaRecords, func(i, j int) bool {
return pieceMetaRecords[i].PieceNum < pieceMetaRecords[j].PieceNum
})
return pieceMetaRecords, nil return pieceMetaRecords, nil
} }
// readPieceMetaRecords reads pieceMetaRecords from storage and without check data integrity func (mm *metadataManager) getPieceMd5Sign(taskID string) (string, []*storage.PieceMetaRecord, error) {
func (mm *cacheDataManager) readPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) {
mm.cacheLocker.Lock(taskID, true) mm.cacheLocker.Lock(taskID, true)
defer mm.cacheLocker.UnLock(taskID, true) defer mm.cacheLocker.UnLock(taskID, true)
return mm.storage.ReadPieceMetaRecords(taskID)
}
func (mm *cacheDataManager) getPieceMd5Sign(taskID string) (string, []*storage.PieceMetaRecord, error) {
pieceMetaRecords, err := mm.storage.ReadPieceMetaRecords(taskID) pieceMetaRecords, err := mm.storage.ReadPieceMetaRecords(taskID)
if err != nil { if err != nil {
return "", nil, errors.Wrapf(err, "read piece meta file") return "", nil, errors.Wrapf(err, "read piece meta file")
@ -182,29 +182,3 @@ func (mm *cacheDataManager) getPieceMd5Sign(taskID string) (string, []*storage.P
} }
return digestutils.Sha256(pieceMd5...), pieceMetaRecords, nil return digestutils.Sha256(pieceMd5...), pieceMetaRecords, nil
} }
func (mm *cacheDataManager) readFileMetadata(taskID string) (*storage.FileMetadata, error) {
fileMeta, err := mm.storage.ReadFileMetadata(taskID)
if err != nil {
return nil, errors.Wrapf(err, "read file metadata of task %s from storage", taskID)
}
return fileMeta, nil
}
func (mm *cacheDataManager) statDownloadFile(taskID string) (*storedriver.StorageInfo, error) {
return mm.storage.StatDownloadFile(taskID)
}
func (mm *cacheDataManager) readDownloadFile(taskID string) (io.ReadCloser, error) {
return mm.storage.ReadDownloadFile(taskID)
}
func (mm *cacheDataManager) resetRepo(task *types.SeedTask) error {
mm.cacheLocker.Lock(task.TaskID, false)
defer mm.cacheLocker.UnLock(task.TaskID, false)
return mm.storage.ResetRepo(task)
}
func (mm *cacheDataManager) writeDownloadFile(taskID string, offset int64, len int64, data io.Reader) error {
return mm.storage.WriteDownloadFile(taskID, offset, len, data)
}

View File

@ -22,14 +22,16 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"go.uber.org/zap" "go.uber.org/zap"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/types" "d7y.io/dragonfly/v2/cdn/supervisor/progress"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
) )
type reporter struct { type reporter struct {
progress supervisor.SeedProgressMgr progressManager progress.Manager
taskManager task.Manager
} }
const ( const (
@ -37,17 +39,17 @@ const (
DownloaderReport = "download" DownloaderReport = "download"
) )
func newReporter(publisher supervisor.SeedProgressMgr) *reporter { func newReporter(publisher progress.Manager) *reporter {
return &reporter{ return &reporter{
progress: publisher, progressManager: publisher,
} }
} }
// report cache result // reportDetectResult report detect cache result
func (re *reporter) reportCache(ctx context.Context, taskID string, detectResult *cacheResult) error { func (re *reporter) reportDetectResult(ctx context.Context, taskID string, detectResult *cacheResult) error {
// report cache pieces status // report cache pieces status
if detectResult != nil && detectResult.pieceMetaRecords != nil { if detectResult != nil && detectResult.PieceMetaRecords != nil {
for _, record := range detectResult.pieceMetaRecords { for _, record := range detectResult.PieceMetaRecords {
if err := re.reportPieceMetaRecord(ctx, taskID, record, CacheReport); err != nil { if err := re.reportPieceMetaRecord(ctx, taskID, record, CacheReport); err != nil {
return errors.Wrapf(err, "publish pieceMetaRecord: %v, seedPiece: %v", record, return errors.Wrapf(err, "publish pieceMetaRecord: %v, seedPiece: %v", record,
convertPieceMeta2SeedPiece(record)) convertPieceMeta2SeedPiece(record))
@ -57,24 +59,23 @@ func (re *reporter) reportCache(ctx context.Context, taskID string, detectResult
return nil return nil
} }
// reportPieceMetaRecord // reportPieceMetaRecord report piece meta record
func (re *reporter) reportPieceMetaRecord(ctx context.Context, taskID string, record *storage.PieceMetaRecord, func (re *reporter) reportPieceMetaRecord(ctx context.Context, taskID string, record *storage.PieceMetaRecord, from string) error {
from string) error { // report cache piece status
// report cache pieces status
logger.DownloaderLogger.Info(taskID, logger.DownloaderLogger.Info(taskID,
zap.Uint32("pieceNum", record.PieceNum), zap.Uint32("pieceNum", record.PieceNum),
zap.String("md5", record.Md5), zap.String("md5", record.Md5),
zap.String("from", from)) zap.String("from", from))
return re.progress.PublishPiece(ctx, taskID, convertPieceMeta2SeedPiece(record)) return re.progressManager.PublishPiece(ctx, taskID, convertPieceMeta2SeedPiece(record))
} }
/* /*
helper functions helper functions
*/ */
func convertPieceMeta2SeedPiece(record *storage.PieceMetaRecord) *types.SeedPiece { func convertPieceMeta2SeedPiece(record *storage.PieceMetaRecord) *task.PieceInfo {
return &types.SeedPiece{ return &task.PieceInfo{
PieceStyle: record.PieceStyle, PieceStyle: base.PieceStyle(record.PieceStyle),
PieceNum: uint32(record.PieceNum), PieceNum: record.PieceNum,
PieceMd5: record.Md5, PieceMd5: record.Md5,
PieceRange: record.Range, PieceRange: record.Range,
OriginRange: record.OriginRange, OriginRange: record.OriginRange,

View File

@ -29,13 +29,12 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"go.uber.org/atomic" "go.uber.org/atomic"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors" "d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/gc"
"d7y.io/dragonfly/v2/cdn/storedriver" "d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/storedriver/local" "d7y.io/dragonfly/v2/cdn/storedriver/local"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/gc" "d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/cdn/types"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/synclock" "d7y.io/dragonfly/v2/pkg/synclock"
"d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/unit"
@ -45,8 +44,8 @@ import (
const StorageMode = storage.DiskStorageMode const StorageMode = storage.DiskStorageMode
var ( var (
_ gc.Executor = (*diskStorageMgr)(nil) _ gc.Executor = (*diskStorageManager)(nil)
_ storage.Manager = (*diskStorageMgr)(nil) _ storage.Manager = (*diskStorageManager)(nil)
) )
func init() { func init() {
@ -55,7 +54,7 @@ func init() {
} }
} }
func newStorageManager(cfg *storage.Config) (storage.Manager, error) { func newStorageManager(cfg *config.StorageConfig) (storage.Manager, error) {
if len(cfg.DriverConfigs) != 1 { if len(cfg.DriverConfigs) != 1 {
return nil, fmt.Errorf("disk storage manager should have only one disk driver, cfg's driver number is wrong config: %v", cfg) return nil, fmt.Errorf("disk storage manager should have only one disk driver, cfg's driver number is wrong config: %v", cfg)
} }
@ -64,22 +63,22 @@ func newStorageManager(cfg *storage.Config) (storage.Manager, error) {
return nil, fmt.Errorf("can not find disk driver for disk storage manager, config is %#v", cfg) return nil, fmt.Errorf("can not find disk driver for disk storage manager, config is %#v", cfg)
} }
storageMgr := &diskStorageMgr{ storageManager := &diskStorageManager{
cfg: cfg, cfg: cfg,
diskDriver: diskDriver, diskDriver: diskDriver,
} }
gc.Register("diskStorage", cfg.GCInitialDelay, cfg.GCInterval, storageMgr) gc.Register("diskStorage", cfg.GCInitialDelay, cfg.GCInterval, storageManager)
return storageMgr, nil return storageManager, nil
} }
type diskStorageMgr struct { type diskStorageManager struct {
cfg *storage.Config cfg *config.StorageConfig
diskDriver storedriver.Driver diskDriver storedriver.Driver
cleaner *storage.Cleaner cleaner storage.Cleaner
taskMgr supervisor.SeedTaskMgr taskManager task.Manager
} }
func (s *diskStorageMgr) getDefaultGcConfig() *storage.GCConfig { func (s *diskStorageManager) getDefaultGcConfig() *config.GCConfig {
totalSpace, err := s.diskDriver.GetTotalSpace() totalSpace, err := s.diskDriver.GetTotalSpace()
if err != nil { if err != nil {
logger.GcLogger.With("type", "disk").Errorf("get total space of disk: %v", err) logger.GcLogger.With("type", "disk").Errorf("get total space of disk: %v", err)
@ -88,7 +87,7 @@ func (s *diskStorageMgr) getDefaultGcConfig() *storage.GCConfig {
if totalSpace > 0 && totalSpace/4 < yongGcThreshold { if totalSpace > 0 && totalSpace/4 < yongGcThreshold {
yongGcThreshold = totalSpace / 4 yongGcThreshold = totalSpace / 4
} }
return &storage.GCConfig{ return &config.GCConfig{
YoungGCThreshold: yongGcThreshold, YoungGCThreshold: yongGcThreshold,
FullGCThreshold: 25 * unit.GB, FullGCThreshold: 25 * unit.GB,
IntervalThreshold: 2 * time.Hour, IntervalThreshold: 2 * time.Hour,
@ -96,21 +95,21 @@ func (s *diskStorageMgr) getDefaultGcConfig() *storage.GCConfig {
} }
} }
func (s *diskStorageMgr) Initialize(taskMgr supervisor.SeedTaskMgr) { func (s *diskStorageManager) Initialize(taskManager task.Manager) {
s.taskMgr = taskMgr s.taskManager = taskManager
diskGcConfig := s.cfg.DriverConfigs[local.DiskDriverName].GCConfig diskGcConfig := s.cfg.DriverConfigs[local.DiskDriverName].GCConfig
if diskGcConfig == nil { if diskGcConfig == nil {
diskGcConfig = s.getDefaultGcConfig() diskGcConfig = s.getDefaultGcConfig()
logger.GcLogger.With("type", "disk").Warnf("disk gc config is nil, use default gcConfig: %v", diskGcConfig) logger.GcLogger.With("type", "disk").Warnf("disk gc config is nil, use default gcConfig: %v", diskGcConfig)
} }
s.cleaner, _ = storage.NewStorageCleaner(diskGcConfig, s.diskDriver, s, taskMgr) s.cleaner, _ = storage.NewStorageCleaner(diskGcConfig, s.diskDriver, s, taskManager)
} }
func (s *diskStorageMgr) AppendPieceMetadata(taskID string, pieceRecord *storage.PieceMetaRecord) error { func (s *diskStorageManager) AppendPieceMetadata(taskID string, pieceRecord *storage.PieceMetaRecord) error {
return s.diskDriver.PutBytes(storage.GetAppendPieceMetadataRaw(taskID), []byte(pieceRecord.String()+"\n")) return s.diskDriver.PutBytes(storage.GetAppendPieceMetadataRaw(taskID), []byte(pieceRecord.String()+"\n"))
} }
func (s *diskStorageMgr) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) { func (s *diskStorageManager) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) {
readBytes, err := s.diskDriver.GetBytes(storage.GetPieceMetadataRaw(taskID)) readBytes, err := s.diskDriver.GetBytes(storage.GetPieceMetadataRaw(taskID))
if err != nil { if err != nil {
return nil, err return nil, err
@ -127,7 +126,7 @@ func (s *diskStorageMgr) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMe
return result, nil return result, nil
} }
func (s *diskStorageMgr) GC() error { func (s *diskStorageManager) GC() error {
logger.GcLogger.With("type", "disk").Info("start the disk storage gc job") logger.GcLogger.With("type", "disk").Info("start the disk storage gc job")
gcTaskIDs, err := s.cleaner.GC("disk", false) gcTaskIDs, err := s.cleaner.GC("disk", false)
if err != nil { if err != nil {
@ -137,7 +136,7 @@ func (s *diskStorageMgr) GC() error {
for _, taskID := range gcTaskIDs { for _, taskID := range gcTaskIDs {
synclock.Lock(taskID, false) synclock.Lock(taskID, false)
// try to ensure the taskID is not using again // try to ensure the taskID is not using again
if _, exist := s.taskMgr.Exist(taskID); exist { if _, exist := s.taskManager.Exist(taskID); exist {
synclock.UnLock(taskID, false) synclock.UnLock(taskID, false)
continue continue
} }
@ -153,14 +152,14 @@ func (s *diskStorageMgr) GC() error {
return nil return nil
} }
func (s *diskStorageMgr) WriteDownloadFile(taskID string, offset int64, len int64, data io.Reader) error { func (s *diskStorageManager) WriteDownloadFile(taskID string, offset int64, len int64, data io.Reader) error {
raw := storage.GetDownloadRaw(taskID) raw := storage.GetDownloadRaw(taskID)
raw.Offset = offset raw.Offset = offset
raw.Length = len raw.Length = len
return s.diskDriver.Put(raw, data) return s.diskDriver.Put(raw, data)
} }
func (s *diskStorageMgr) ReadFileMetadata(taskID string) (*storage.FileMetadata, error) { func (s *diskStorageManager) ReadFileMetadata(taskID string) (*storage.FileMetadata, error) {
bytes, err := s.diskDriver.GetBytes(storage.GetTaskMetadataRaw(taskID)) bytes, err := s.diskDriver.GetBytes(storage.GetTaskMetadataRaw(taskID))
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "get metadata bytes") return nil, errors.Wrapf(err, "get metadata bytes")
@ -173,7 +172,7 @@ func (s *diskStorageMgr) ReadFileMetadata(taskID string) (*storage.FileMetadata,
return metadata, nil return metadata, nil
} }
func (s *diskStorageMgr) WriteFileMetadata(taskID string, metadata *storage.FileMetadata) error { func (s *diskStorageManager) WriteFileMetadata(taskID string, metadata *storage.FileMetadata) error {
data, err := json.Marshal(metadata) data, err := json.Marshal(metadata)
if err != nil { if err != nil {
return errors.Wrapf(err, "marshal metadata") return errors.Wrapf(err, "marshal metadata")
@ -181,7 +180,7 @@ func (s *diskStorageMgr) WriteFileMetadata(taskID string, metadata *storage.File
return s.diskDriver.PutBytes(storage.GetTaskMetadataRaw(taskID), data) return s.diskDriver.PutBytes(storage.GetTaskMetadataRaw(taskID), data)
} }
func (s *diskStorageMgr) WritePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error { func (s *diskStorageManager) WritePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error {
recordStrs := make([]string, 0, len(records)) recordStrs := make([]string, 0, len(records))
for i := range records { for i := range records {
recordStrs = append(recordStrs, records[i].String()) recordStrs = append(recordStrs, records[i].String())
@ -192,19 +191,19 @@ func (s *diskStorageMgr) WritePieceMetaRecords(taskID string, records []*storage
return s.diskDriver.PutBytes(pieceRaw, []byte(strings.Join(recordStrs, "\n"))) return s.diskDriver.PutBytes(pieceRaw, []byte(strings.Join(recordStrs, "\n")))
} }
func (s *diskStorageMgr) ReadPieceMetaBytes(taskID string) ([]byte, error) { func (s *diskStorageManager) ReadPieceMetaBytes(taskID string) ([]byte, error) {
return s.diskDriver.GetBytes(storage.GetPieceMetadataRaw(taskID)) return s.diskDriver.GetBytes(storage.GetPieceMetadataRaw(taskID))
} }
func (s *diskStorageMgr) ReadDownloadFile(taskID string) (io.ReadCloser, error) { func (s *diskStorageManager) ReadDownloadFile(taskID string) (io.ReadCloser, error) {
return s.diskDriver.Get(storage.GetDownloadRaw(taskID)) return s.diskDriver.Get(storage.GetDownloadRaw(taskID))
} }
func (s *diskStorageMgr) StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) { func (s *diskStorageManager) StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) {
return s.diskDriver.Stat(storage.GetDownloadRaw(taskID)) return s.diskDriver.Stat(storage.GetDownloadRaw(taskID))
} }
func (s *diskStorageMgr) CreateUploadLink(taskID string) error { func (s *diskStorageManager) CreateUploadLink(taskID string) error {
// create a soft link from the upload file to the download file // create a soft link from the upload file to the download file
if err := fileutils.SymbolicLink(s.diskDriver.GetPath(storage.GetDownloadRaw(taskID)), if err := fileutils.SymbolicLink(s.diskDriver.GetPath(storage.GetDownloadRaw(taskID)),
s.diskDriver.GetPath(storage.GetUploadRaw(taskID))); err != nil { s.diskDriver.GetPath(storage.GetUploadRaw(taskID))); err != nil {
@ -213,31 +212,31 @@ func (s *diskStorageMgr) CreateUploadLink(taskID string) error {
return nil return nil
} }
func (s *diskStorageMgr) DeleteTask(taskID string) error { func (s *diskStorageManager) DeleteTask(taskID string) error {
if err := s.diskDriver.Remove(storage.GetTaskMetadataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { if err := s.diskDriver.Remove(storage.GetTaskMetadataRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err return err
} }
if err := s.diskDriver.Remove(storage.GetPieceMetadataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { if err := s.diskDriver.Remove(storage.GetPieceMetadataRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err return err
} }
if err := s.diskDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { if err := s.diskDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err return err
} }
if err := s.diskDriver.Remove(storage.GetUploadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { if err := s.diskDriver.Remove(storage.GetUploadRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err return err
} }
// try to clean the parent bucket // try to clean the parent bucket
if err := s.diskDriver.Remove(storage.GetParentRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { if err := s.diskDriver.Remove(storage.GetParentRaw(taskID)); err != nil && !os.IsNotExist(err) {
logrus.Warnf("taskID: %s failed remove parent bucket: %v", taskID, err) logrus.Warnf("taskID: %s failed remove parent bucket: %v", taskID, err)
} }
return nil return nil
} }
func (s *diskStorageMgr) ResetRepo(task *types.SeedTask) error { func (s *diskStorageManager) ResetRepo(task *task.SeedTask) error {
return s.DeleteTask(task.TaskID) return s.DeleteTask(task.ID)
} }
func (s *diskStorageMgr) TryFreeSpace(fileLength int64) (bool, error) { func (s *diskStorageManager) TryFreeSpace(fileLength int64) (bool, error) {
freeSpace, err := s.diskDriver.GetFreeSpace() freeSpace, err := s.diskDriver.GetFreeSpace()
if err != nil { if err != nil {
return false, err return false, err
@ -251,7 +250,7 @@ func (s *diskStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
WalkFn: func(filePath string, info os.FileInfo, err error) error { WalkFn: func(filePath string, info os.FileInfo, err error) error {
if fileutils.IsRegular(filePath) { if fileutils.IsRegular(filePath) {
taskID := strings.Split(path.Base(filePath), ".")[0] taskID := strings.Split(path.Base(filePath), ".")[0]
task, exist := s.taskMgr.Exist(taskID) task, exist := s.taskManager.Exist(taskID)
if exist { if exist {
var totalLen int64 = 0 var totalLen int64 = 0
if task.CdnFileLength > 0 { if task.CdnFileLength > 0 {

View File

@ -25,7 +25,7 @@ import (
"d7y.io/dragonfly/v2/cdn/storedriver" "d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/mock" taskMock "d7y.io/dragonfly/v2/cdn/supervisor/mocks/task"
"d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/unit"
) )
@ -34,20 +34,20 @@ func TestDiskStorageMgrSuite(t *testing.T) {
} }
type DiskStorageMgrSuite struct { type DiskStorageMgrSuite struct {
m *diskStorageMgr m *diskStorageManager
suite.Suite suite.Suite
} }
func (suite *DiskStorageMgrSuite) TestTryFreeSpace() { func (suite *DiskStorageMgrSuite) TestTryFreeSpace() {
ctrl := gomock.NewController(suite.T()) ctrl := gomock.NewController(suite.T())
diskDriver := storedriver.NewMockDriver(ctrl) diskDriver := storedriver.NewMockDriver(ctrl)
taskMgr := mock.NewMockSeedTaskMgr(ctrl) taskManager := taskMock.NewMockManager(ctrl)
suite.m = &diskStorageMgr{ suite.m = &diskStorageManager{
diskDriver: diskDriver, diskDriver: diskDriver,
taskMgr: taskMgr, taskManager: taskManager,
} }
diskDriver.EXPECT().GetTotalSpace().Return(100*unit.GB, nil) diskDriver.EXPECT().GetTotalSpace().Return(100*unit.GB, nil)
cleaner, _ := storage.NewStorageCleaner(suite.m.getDefaultGcConfig(), diskDriver, suite.m, taskMgr) cleaner, _ := storage.NewStorageCleaner(suite.m.getDefaultGcConfig(), diskDriver, suite.m, taskManager)
suite.m.cleaner = cleaner suite.m.cleaner = cleaner
tests := []struct { tests := []struct {

View File

@ -29,13 +29,12 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"go.uber.org/atomic" "go.uber.org/atomic"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors" "d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/gc"
"d7y.io/dragonfly/v2/cdn/storedriver" "d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/storedriver/local" "d7y.io/dragonfly/v2/cdn/storedriver/local"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
"d7y.io/dragonfly/v2/cdn/supervisor/gc" "d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/cdn/types"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/synclock" "d7y.io/dragonfly/v2/pkg/synclock"
"d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/unit"
@ -46,8 +45,8 @@ const StorageMode = storage.HybridStorageMode
const secureLevel = 500 * unit.MB const secureLevel = 500 * unit.MB
var _ storage.Manager = (*hybridStorageMgr)(nil) var _ storage.Manager = (*hybridStorageManager)(nil)
var _ gc.Executor = (*hybridStorageMgr)(nil) var _ gc.Executor = (*hybridStorageManager)(nil)
func init() { func init() {
if err := storage.Register(StorageMode, newStorageManager); err != nil { if err := storage.Register(StorageMode, newStorageManager); err != nil {
@ -56,7 +55,7 @@ func init() {
} }
// NewStorageManager performs initialization for storage manager and return a storage Manager. // NewStorageManager performs initialization for storage manager and return a storage Manager.
func newStorageManager(cfg *storage.Config) (storage.Manager, error) { func newStorageManager(cfg *config.StorageConfig) (storage.Manager, error) {
if len(cfg.DriverConfigs) != 2 { if len(cfg.DriverConfigs) != 2 {
return nil, fmt.Errorf("disk storage manager should have two driver, cfg's driver number is wrong : %v", cfg) return nil, fmt.Errorf("disk storage manager should have two driver, cfg's driver number is wrong : %v", cfg)
} }
@ -68,36 +67,36 @@ func newStorageManager(cfg *storage.Config) (storage.Manager, error) {
if !ok { if !ok {
return nil, fmt.Errorf("can not find memory driver for hybrid storage manager, config %v", cfg) return nil, fmt.Errorf("can not find memory driver for hybrid storage manager, config %v", cfg)
} }
storageMgr := &hybridStorageMgr{ storageManager := &hybridStorageManager{
cfg: cfg, cfg: cfg,
memoryDriver: memoryDriver, memoryDriver: memoryDriver,
diskDriver: diskDriver, diskDriver: diskDriver,
hasShm: true, hasShm: true,
shmSwitch: newShmSwitch(), shmSwitch: newShmSwitch(),
} }
gc.Register("hybridStorage", cfg.GCInitialDelay, cfg.GCInterval, storageMgr) gc.Register("hybridStorage", cfg.GCInitialDelay, cfg.GCInterval, storageManager)
return storageMgr, nil return storageManager, nil
} }
func (h *hybridStorageMgr) Initialize(taskMgr supervisor.SeedTaskMgr) { func (h *hybridStorageManager) Initialize(taskManager task.Manager) {
h.taskMgr = taskMgr h.taskManager = taskManager
diskGcConfig := h.cfg.DriverConfigs[local.DiskDriverName].GCConfig diskGcConfig := h.cfg.DriverConfigs[local.DiskDriverName].GCConfig
if diskGcConfig == nil { if diskGcConfig == nil {
diskGcConfig = h.getDiskDefaultGcConfig() diskGcConfig = h.getDiskDefaultGcConfig()
logger.GcLogger.With("type", "hybrid").Warnf("disk gc config is nil, use default gcConfig: %v", diskGcConfig) logger.GcLogger.With("type", "hybrid").Warnf("disk gc config is nil, use default gcConfig: %v", diskGcConfig)
} }
h.diskDriverCleaner, _ = storage.NewStorageCleaner(diskGcConfig, h.diskDriver, h, taskMgr) h.diskDriverCleaner, _ = storage.NewStorageCleaner(diskGcConfig, h.diskDriver, h, taskManager)
memoryGcConfig := h.cfg.DriverConfigs[local.MemoryDriverName].GCConfig memoryGcConfig := h.cfg.DriverConfigs[local.MemoryDriverName].GCConfig
if memoryGcConfig == nil { if memoryGcConfig == nil {
memoryGcConfig = h.getMemoryDefaultGcConfig() memoryGcConfig = h.getMemoryDefaultGcConfig()
logger.GcLogger.With("type", "hybrid").Warnf("memory gc config is nil, use default gcConfig: %v", diskGcConfig) logger.GcLogger.With("type", "hybrid").Warnf("memory gc config is nil, use default gcConfig: %v", diskGcConfig)
} }
h.memoryDriverCleaner, _ = storage.NewStorageCleaner(memoryGcConfig, h.memoryDriver, h, taskMgr) h.memoryDriverCleaner, _ = storage.NewStorageCleaner(memoryGcConfig, h.memoryDriver, h, taskManager)
logger.GcLogger.With("type", "hybrid").Info("success initialize hybrid cleaners") logger.GcLogger.With("type", "hybrid").Info("success initialize hybrid cleaners")
} }
func (h *hybridStorageMgr) getDiskDefaultGcConfig() *storage.GCConfig { func (h *hybridStorageManager) getDiskDefaultGcConfig() *config.GCConfig {
totalSpace, err := h.diskDriver.GetTotalSpace() totalSpace, err := h.diskDriver.GetTotalSpace()
if err != nil { if err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("failed to get total space of disk: %v", err) logger.GcLogger.With("type", "hybrid").Errorf("failed to get total space of disk: %v", err)
@ -106,7 +105,7 @@ func (h *hybridStorageMgr) getDiskDefaultGcConfig() *storage.GCConfig {
if totalSpace > 0 && totalSpace/4 < yongGcThreshold { if totalSpace > 0 && totalSpace/4 < yongGcThreshold {
yongGcThreshold = totalSpace / 4 yongGcThreshold = totalSpace / 4
} }
return &storage.GCConfig{ return &config.GCConfig{
YoungGCThreshold: yongGcThreshold, YoungGCThreshold: yongGcThreshold,
FullGCThreshold: 25 * unit.GB, FullGCThreshold: 25 * unit.GB,
IntervalThreshold: 2 * time.Hour, IntervalThreshold: 2 * time.Hour,
@ -114,7 +113,7 @@ func (h *hybridStorageMgr) getDiskDefaultGcConfig() *storage.GCConfig {
} }
} }
func (h *hybridStorageMgr) getMemoryDefaultGcConfig() *storage.GCConfig { func (h *hybridStorageManager) getMemoryDefaultGcConfig() *config.GCConfig {
// determine whether the shared cache can be used // determine whether the shared cache can be used
diff := unit.Bytes(0) diff := unit.Bytes(0)
totalSpace, err := h.memoryDriver.GetTotalSpace() totalSpace, err := h.memoryDriver.GetTotalSpace()
@ -127,7 +126,7 @@ func (h *hybridStorageMgr) getMemoryDefaultGcConfig() *storage.GCConfig {
if diff >= totalSpace { if diff >= totalSpace {
h.hasShm = false h.hasShm = false
} }
return &storage.GCConfig{ return &config.GCConfig{
YoungGCThreshold: 10*unit.GB + diff, YoungGCThreshold: 10*unit.GB + diff,
FullGCThreshold: 2*unit.GB + diff, FullGCThreshold: 2*unit.GB + diff,
CleanRatio: 3, CleanRatio: 3,
@ -135,90 +134,34 @@ func (h *hybridStorageMgr) getMemoryDefaultGcConfig() *storage.GCConfig {
} }
} }
type hybridStorageMgr struct { type hybridStorageManager struct {
cfg *storage.Config cfg *config.StorageConfig
memoryDriver storedriver.Driver memoryDriver storedriver.Driver
diskDriver storedriver.Driver diskDriver storedriver.Driver
diskDriverCleaner *storage.Cleaner diskDriverCleaner storage.Cleaner
memoryDriverCleaner *storage.Cleaner memoryDriverCleaner storage.Cleaner
taskMgr supervisor.SeedTaskMgr taskManager task.Manager
shmSwitch *shmSwitch shmSwitch *shmSwitch
hasShm bool // whether enable shm
hasShm bool
} }
func (h *hybridStorageMgr) GC() error { func (h *hybridStorageManager) WriteDownloadFile(taskID string, offset int64, len int64, data io.Reader) error {
logger.GcLogger.With("type", "hybrid").Info("start the hybrid storage gc job")
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
gcTaskIDs, err := h.diskDriverCleaner.GC("hybrid", false)
if err != nil {
logger.GcLogger.With("type", "hybrid").Error("gc disk: failed to get gcTaskIds")
}
realGCCount := h.gcTasks(gcTaskIDs, true)
logger.GcLogger.With("type", "hybrid").Infof("at most %d tasks can be cleaned up from disk, actual gc %d tasks", len(gcTaskIDs), realGCCount)
}()
if h.hasShm {
wg.Add(1)
go func() {
defer wg.Done()
gcTaskIDs, err := h.memoryDriverCleaner.GC("hybrid", false)
logger.GcLogger.With("type", "hybrid").Infof("at most %d tasks can be cleaned up from memory", len(gcTaskIDs))
if err != nil {
logger.GcLogger.With("type", "hybrid").Error("gc memory: failed to get gcTaskIds")
}
h.gcTasks(gcTaskIDs, false)
}()
}
wg.Wait()
return nil
}
func (h *hybridStorageMgr) gcTasks(gcTaskIDs []string, isDisk bool) int {
var realGCCount int
for _, taskID := range gcTaskIDs {
synclock.Lock(taskID, false)
// try to ensure the taskID is not using again
if _, exist := h.taskMgr.Exist(taskID); exist {
synclock.UnLock(taskID, false)
continue
}
realGCCount++
if isDisk {
if err := h.deleteDiskFiles(taskID); err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("gc disk: failed to delete disk files with taskID(%s): %v", taskID, err)
synclock.UnLock(taskID, false)
continue
}
} else {
if err := h.deleteMemoryFiles(taskID); err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("gc memory: failed to delete memory files with taskID(%s): %v", taskID, err)
synclock.UnLock(taskID, false)
continue
}
}
synclock.UnLock(taskID, false)
}
return realGCCount
}
func (h *hybridStorageMgr) WriteDownloadFile(taskID string, offset int64, len int64, data io.Reader) error {
raw := storage.GetDownloadRaw(taskID) raw := storage.GetDownloadRaw(taskID)
raw.Offset = offset raw.Offset = offset
raw.Length = len raw.Length = len
return h.diskDriver.Put(raw, data) return h.diskDriver.Put(raw, data)
} }
func (h *hybridStorageMgr) DeleteTask(taskID string) error { func (h *hybridStorageManager) DeleteTask(taskID string) error {
return h.deleteTaskFiles(taskID, true, true) return h.deleteTaskFiles(taskID, true)
} }
func (h *hybridStorageMgr) ReadDownloadFile(taskID string) (io.ReadCloser, error) { func (h *hybridStorageManager) ReadDownloadFile(taskID string) (io.ReadCloser, error) {
return h.diskDriver.Get(storage.GetDownloadRaw(taskID)) return h.diskDriver.Get(storage.GetDownloadRaw(taskID))
} }
func (h *hybridStorageMgr) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) { func (h *hybridStorageManager) ReadPieceMetaRecords(taskID string) ([]*storage.PieceMetaRecord, error) {
readBytes, err := h.diskDriver.GetBytes(storage.GetPieceMetadataRaw(taskID)) readBytes, err := h.diskDriver.GetBytes(storage.GetPieceMetadataRaw(taskID))
if err != nil { if err != nil {
return nil, err return nil, err
@ -235,7 +178,7 @@ func (h *hybridStorageMgr) ReadPieceMetaRecords(taskID string) ([]*storage.Piece
return result, nil return result, nil
} }
func (h *hybridStorageMgr) ReadFileMetadata(taskID string) (*storage.FileMetadata, error) { func (h *hybridStorageManager) ReadFileMetadata(taskID string) (*storage.FileMetadata, error) {
readBytes, err := h.diskDriver.GetBytes(storage.GetTaskMetadataRaw(taskID)) readBytes, err := h.diskDriver.GetBytes(storage.GetTaskMetadataRaw(taskID))
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "get metadata bytes") return nil, errors.Wrapf(err, "get metadata bytes")
@ -248,11 +191,11 @@ func (h *hybridStorageMgr) ReadFileMetadata(taskID string) (*storage.FileMetadat
return metadata, nil return metadata, nil
} }
func (h *hybridStorageMgr) AppendPieceMetadata(taskID string, record *storage.PieceMetaRecord) error { func (h *hybridStorageManager) AppendPieceMetadata(taskID string, record *storage.PieceMetaRecord) error {
return h.diskDriver.PutBytes(storage.GetAppendPieceMetadataRaw(taskID), []byte(record.String()+"\n")) return h.diskDriver.PutBytes(storage.GetAppendPieceMetadataRaw(taskID), []byte(record.String()+"\n"))
} }
func (h *hybridStorageMgr) WriteFileMetadata(taskID string, metadata *storage.FileMetadata) error { func (h *hybridStorageManager) WriteFileMetadata(taskID string, metadata *storage.FileMetadata) error {
data, err := json.Marshal(metadata) data, err := json.Marshal(metadata)
if err != nil { if err != nil {
return errors.Wrapf(err, "marshal metadata") return errors.Wrapf(err, "marshal metadata")
@ -260,7 +203,7 @@ func (h *hybridStorageMgr) WriteFileMetadata(taskID string, metadata *storage.Fi
return h.diskDriver.PutBytes(storage.GetTaskMetadataRaw(taskID), data) return h.diskDriver.PutBytes(storage.GetTaskMetadataRaw(taskID), data)
} }
func (h *hybridStorageMgr) WritePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error { func (h *hybridStorageManager) WritePieceMetaRecords(taskID string, records []*storage.PieceMetaRecord) error {
recordStrings := make([]string, 0, len(records)) recordStrings := make([]string, 0, len(records))
for i := range records { for i := range records {
recordStrings = append(recordStrings, records[i].String()) recordStrings = append(recordStrings, records[i].String())
@ -268,36 +211,37 @@ func (h *hybridStorageMgr) WritePieceMetaRecords(taskID string, records []*stora
return h.diskDriver.PutBytes(storage.GetPieceMetadataRaw(taskID), []byte(strings.Join(recordStrings, "\n"))) return h.diskDriver.PutBytes(storage.GetPieceMetadataRaw(taskID), []byte(strings.Join(recordStrings, "\n")))
} }
func (h *hybridStorageMgr) CreateUploadLink(taskID string) error { func (h *hybridStorageManager) ResetRepo(seedTask *task.SeedTask) error {
if err := h.deleteTaskFiles(seedTask.ID, true); err != nil {
return errors.Errorf("delete task %s files: %v", seedTask.ID, err)
}
// 判断是否有足够空间存放
if shmPath, err := h.tryShmSpace(seedTask.RawURL, seedTask.ID, seedTask.SourceFileLength); err != nil {
if _, err := os.Create(h.diskDriver.GetPath(storage.GetDownloadRaw(seedTask.ID))); err != nil {
return err
}
} else {
if err := fileutils.SymbolicLink(shmPath, h.diskDriver.GetPath(storage.GetDownloadRaw(seedTask.ID))); err != nil {
return err
}
}
// create a soft link from the upload file to the download file // create a soft link from the upload file to the download file
if err := fileutils.SymbolicLink(h.diskDriver.GetPath(storage.GetDownloadRaw(taskID)), if err := fileutils.SymbolicLink(h.diskDriver.GetPath(storage.GetDownloadRaw(seedTask.ID)),
h.diskDriver.GetPath(storage.GetUploadRaw(taskID))); err != nil { h.diskDriver.GetPath(storage.GetUploadRaw(seedTask.ID))); err != nil {
return err return err
} }
return nil return nil
} }
func (h *hybridStorageMgr) ResetRepo(task *types.SeedTask) error { func (h *hybridStorageManager) GetDownloadPath(rawFunc *storedriver.Raw) string {
if err := h.deleteTaskFiles(task.TaskID, false, true); err != nil {
task.Log().Errorf("reset repo: failed to delete task files: %v", err)
}
// 判断是否有足够空间存放
shmPath, err := h.tryShmSpace(task.URL, task.TaskID, task.SourceFileLength)
if err == nil {
return fileutils.SymbolicLink(shmPath, h.diskDriver.GetPath(storage.GetDownloadRaw(task.TaskID)))
}
return nil
}
func (h *hybridStorageMgr) GetDownloadPath(rawFunc *storedriver.Raw) string {
return h.diskDriver.GetPath(rawFunc) return h.diskDriver.GetPath(rawFunc)
} }
func (h *hybridStorageMgr) StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) { func (h *hybridStorageManager) StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) {
return h.diskDriver.Stat(storage.GetDownloadRaw(taskID)) return h.diskDriver.Stat(storage.GetDownloadRaw(taskID))
} }
func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) { func (h *hybridStorageManager) TryFreeSpace(fileLength int64) (bool, error) {
diskFreeSpace, err := h.diskDriver.GetFreeSpace() diskFreeSpace, err := h.diskDriver.GetFreeSpace()
if err != nil { if err != nil {
return false, err return false, err
@ -311,13 +255,13 @@ func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
WalkFn: func(filePath string, info os.FileInfo, err error) error { WalkFn: func(filePath string, info os.FileInfo, err error) error {
if fileutils.IsRegular(filePath) { if fileutils.IsRegular(filePath) {
taskID := strings.Split(path.Base(filePath), ".")[0] taskID := strings.Split(path.Base(filePath), ".")[0]
task, exist := h.taskMgr.Exist(taskID) seedTask, exist := h.taskManager.Exist(taskID)
if exist { if exist {
var totalLen int64 = 0 var totalLen int64 = 0
if task.CdnFileLength > 0 { if seedTask.CdnFileLength > 0 {
totalLen = task.CdnFileLength totalLen = seedTask.CdnFileLength
} else { } else {
totalLen = task.SourceFileLength totalLen = seedTask.SourceFileLength
} }
if totalLen > 0 { if totalLen > 0 {
remainder.Add(totalLen - info.Size()) remainder.Add(totalLen - info.Size())
@ -331,7 +275,7 @@ func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
return false, err return false, err
} }
enoughSpace := diskFreeSpace.ToNumber()-remainder.Load() > fileLength enoughSpace := diskFreeSpace.ToNumber()-remainder.Load() > (fileLength + int64(5*unit.GB))
if !enoughSpace { if !enoughSpace {
if _, err := h.diskDriverCleaner.GC("hybrid", true); err != nil { if _, err := h.diskDriverCleaner.GC("hybrid", true); err != nil {
return false, err return false, err
@ -345,7 +289,7 @@ func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
if err != nil { if err != nil {
return false, err return false, err
} }
enoughSpace = diskFreeSpace.ToNumber()-remainder.Load() > fileLength enoughSpace = diskFreeSpace.ToNumber()-remainder.Load() > (fileLength + int64(5*unit.GB))
} }
if !enoughSpace { if !enoughSpace {
return false, nil return false, nil
@ -354,28 +298,26 @@ func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
return true, nil return true, nil
} }
func (h *hybridStorageMgr) deleteDiskFiles(taskID string) error { func (h *hybridStorageManager) deleteDiskFiles(taskID string) error {
return h.deleteTaskFiles(taskID, true, true) return h.deleteTaskFiles(taskID, true)
} }
func (h *hybridStorageMgr) deleteMemoryFiles(taskID string) error { func (h *hybridStorageManager) deleteMemoryFiles(taskID string) error {
return h.deleteTaskFiles(taskID, true, false) return h.deleteTaskFiles(taskID, false)
} }
func (h *hybridStorageMgr) deleteTaskFiles(taskID string, deleteUploadPath bool, deleteHardLink bool) error { func (h *hybridStorageManager) deleteTaskFiles(taskID string, deleteHardLink bool) error {
// delete task file data // delete task file data
if err := h.diskDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { if err := h.diskDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err return err
} }
// delete memory file // delete memory file
if err := h.memoryDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { if err := h.memoryDriver.Remove(storage.GetDownloadRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err return err
} }
// delete upload file
if deleteUploadPath { if err := h.diskDriver.Remove(storage.GetUploadRaw(taskID)); err != nil && !os.IsNotExist(err) {
if err := h.diskDriver.Remove(storage.GetUploadRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { return err
return err
}
} }
exists := h.diskDriver.Exits(getHardLinkRaw(taskID)) exists := h.diskDriver.Exits(getHardLinkRaw(taskID))
if !deleteHardLink && exists { if !deleteHardLink && exists {
@ -383,40 +325,40 @@ func (h *hybridStorageMgr) deleteTaskFiles(taskID string, deleteUploadPath bool,
return err return err
} }
} else { } else {
if err := h.diskDriver.Remove(getHardLinkRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { if err := h.diskDriver.Remove(getHardLinkRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err return err
} }
// deleteTaskFiles delete files associated with taskID // deleteTaskFiles delete files associated with taskID
if err := h.diskDriver.Remove(storage.GetTaskMetadataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { if err := h.diskDriver.Remove(storage.GetTaskMetadataRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err return err
} }
// delete piece meta data // delete piece meta data
if err := h.diskDriver.Remove(storage.GetPieceMetadataRaw(taskID)); err != nil && !cdnerrors.IsFileNotExist(err) { if err := h.diskDriver.Remove(storage.GetPieceMetadataRaw(taskID)); err != nil && !os.IsNotExist(err) {
return err return err
} }
} }
// try to clean the parent bucket // try to clean the parent bucket
if err := h.diskDriver.Remove(storage.GetParentRaw(taskID)); err != nil && if err := h.diskDriver.Remove(storage.GetParentRaw(taskID)); err != nil &&
!cdnerrors.IsFileNotExist(err) { !os.IsNotExist(err) {
logger.WithTaskID(taskID).Warnf("failed to remove parent bucket: %v", err) logger.WithTaskID(taskID).Warnf("failed to remove parent bucket: %v", err)
} }
return nil return nil
} }
func (h *hybridStorageMgr) tryShmSpace(url, taskID string, fileLength int64) (string, error) { func (h *hybridStorageManager) tryShmSpace(url, taskID string, fileLength int64) (string, error) {
if h.shmSwitch.check(url, fileLength) && h.hasShm { if h.shmSwitch.check(url, fileLength) && h.hasShm {
remainder := atomic.NewInt64(0) remainder := atomic.NewInt64(0)
if err := h.memoryDriver.Walk(&storedriver.Raw{ if err := h.memoryDriver.Walk(&storedriver.Raw{
WalkFn: func(filePath string, info os.FileInfo, err error) error { WalkFn: func(filePath string, info os.FileInfo, err error) error {
if fileutils.IsRegular(filePath) { if fileutils.IsRegular(filePath) {
taskID := strings.Split(path.Base(filePath), ".")[0] taskID := strings.Split(path.Base(filePath), ".")[0]
task, exist := h.taskMgr.Exist(taskID) seedTask, exist := h.taskManager.Exist(taskID)
if exist { if exist {
var totalLen int64 = 0 var totalLen int64 = 0
if task.CdnFileLength > 0 { if seedTask.CdnFileLength > 0 {
totalLen = task.CdnFileLength totalLen = seedTask.CdnFileLength
} else { } else {
totalLen = task.SourceFileLength totalLen = seedTask.SourceFileLength
} }
if totalLen > 0 { if totalLen > 0 {
remainder.Add(totalLen - info.Size()) remainder.Add(totalLen - info.Size())
@ -451,7 +393,63 @@ func (h *hybridStorageMgr) tryShmSpace(url, taskID string, fileLength int64) (st
return "", fmt.Errorf("shared memory is not allowed") return "", fmt.Errorf("shared memory is not allowed")
} }
func (h *hybridStorageMgr) getMemoryUsableSpace() unit.Bytes { func (h *hybridStorageManager) GC() error {
logger.GcLogger.With("type", "hybrid").Info("start the hybrid storage gc job")
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
gcTaskIDs, err := h.diskDriverCleaner.GC("hybrid", false)
if err != nil {
logger.GcLogger.With("type", "hybrid").Error("gc disk: failed to get gcTaskIds")
}
realGCCount := h.gcTasks(gcTaskIDs, true)
logger.GcLogger.With("type", "hybrid").Infof("at most %d tasks can be cleaned up from disk, actual gc %d tasks", len(gcTaskIDs), realGCCount)
}()
if h.hasShm {
wg.Add(1)
go func() {
defer wg.Done()
gcTaskIDs, err := h.memoryDriverCleaner.GC("hybrid", false)
logger.GcLogger.With("type", "hybrid").Infof("at most %d tasks can be cleaned up from memory", len(gcTaskIDs))
if err != nil {
logger.GcLogger.With("type", "hybrid").Error("gc memory: failed to get gcTaskIds")
}
h.gcTasks(gcTaskIDs, false)
}()
}
wg.Wait()
return nil
}
func (h *hybridStorageManager) gcTasks(gcTaskIDs []string, isDisk bool) int {
var realGCCount int
for _, taskID := range gcTaskIDs {
// try to ensure the taskID is not using again
if _, exist := h.taskManager.Exist(taskID); exist {
continue
}
realGCCount++
synclock.Lock(taskID, false)
if isDisk {
if err := h.deleteDiskFiles(taskID); err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("gc disk: failed to delete disk files with taskID(%s): %v", taskID, err)
synclock.UnLock(taskID, false)
continue
}
} else {
if err := h.deleteMemoryFiles(taskID); err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("gc memory: failed to delete memory files with taskID(%s): %v", taskID, err)
synclock.UnLock(taskID, false)
continue
}
}
synclock.UnLock(taskID, false)
}
return realGCCount
}
func (h *hybridStorageManager) getMemoryUsableSpace() unit.Bytes {
totalSize, freeSize, err := h.memoryDriver.GetTotalAndFreeSpace() totalSize, freeSize, err := h.memoryDriver.GetTotalAndFreeSpace()
if err != nil { if err != nil {
logger.GcLogger.With("type", "hybrid").Errorf("failed to get total and free space of memory: %v", err) logger.GcLogger.With("type", "hybrid").Errorf("failed to get total and free space of memory: %v", err)

View File

@ -9,9 +9,8 @@ import (
reflect "reflect" reflect "reflect"
storedriver "d7y.io/dragonfly/v2/cdn/storedriver" storedriver "d7y.io/dragonfly/v2/cdn/storedriver"
supervisor "d7y.io/dragonfly/v2/cdn/supervisor"
storage "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage" storage "d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage"
types "d7y.io/dragonfly/v2/cdn/types" task "d7y.io/dragonfly/v2/cdn/supervisor/task"
gomock "github.com/golang/mock/gomock" gomock "github.com/golang/mock/gomock"
) )
@ -67,7 +66,7 @@ func (mr *MockManagerMockRecorder) DeleteTask(arg0 interface{}) *gomock.Call {
} }
// Initialize mocks base method. // Initialize mocks base method.
func (m *MockManager) Initialize(arg0 supervisor.SeedTaskMgr) { func (m *MockManager) Initialize(arg0 task.Manager) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
m.ctrl.Call(m, "Initialize", arg0) m.ctrl.Call(m, "Initialize", arg0)
} }
@ -124,7 +123,7 @@ func (mr *MockManagerMockRecorder) ReadPieceMetaRecords(arg0 interface{}) *gomoc
} }
// ResetRepo mocks base method. // ResetRepo mocks base method.
func (m *MockManager) ResetRepo(arg0 *types.SeedTask) error { func (m *MockManager) ResetRepo(arg0 *task.SeedTask) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ResetRepo", arg0) ret := m.ctrl.Call(m, "ResetRepo", arg0)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)

View File

@ -28,6 +28,8 @@ const (
// which is a relative path. // which is a relative path.
DownloadHome = "download" DownloadHome = "download"
// UploadHome is the parent directory where the upload files are stored
// which is a relative path
UploadHome = "upload" UploadHome = "upload"
) )

View File

@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
//go:generate mockgen -destination ./mock/mock_storage_mgr.go -package mock d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage Manager //go:generate mockgen -destination ./mock/mock_storage_manager.go -package mock d7y.io/dragonfly/v2/cdn/supervisor/cdn/storage Manager
package storage package storage
@ -29,21 +29,21 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/plugins" "d7y.io/dragonfly/v2/cdn/plugins"
"d7y.io/dragonfly/v2/cdn/storedriver" "d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/supervisor" "d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/unit"
"d7y.io/dragonfly/v2/pkg/util/rangeutils" "d7y.io/dragonfly/v2/pkg/util/rangeutils"
) )
type Manager interface { type Manager interface {
Initialize(taskMgr supervisor.SeedTaskMgr) Initialize(taskManager task.Manager)
// ResetRepo reset the storage of task // ResetRepo reset the storage of task
ResetRepo(*types.SeedTask) error ResetRepo(*task.SeedTask) error
// StatDownloadFile stat download file info // StatDownloadFile stat download file info, if task file is not exist on storage, return errTaskNotPersisted
StatDownloadFile(taskID string) (*storedriver.StorageInfo, error) StatDownloadFile(taskID string) (*storedriver.StorageInfo, error)
// WriteDownloadFile write data to download file // WriteDownloadFile write data to download file
@ -83,23 +83,32 @@ type FileMetadata struct {
AccessTime int64 `json:"accessTime"` AccessTime int64 `json:"accessTime"`
Interval int64 `json:"interval"` Interval int64 `json:"interval"`
CdnFileLength int64 `json:"cdnFileLength"` CdnFileLength int64 `json:"cdnFileLength"`
Digest string `json:"digest"`
SourceRealDigest string `json:"sourceRealDigest"` SourceRealDigest string `json:"sourceRealDigest"`
PieceMd5Sign string `json:"pieceMd5Sign"` Tag string `json:"tag"`
ExpireInfo map[string]string `json:"expireInfo"` ExpireInfo map[string]string `json:"expireInfo"`
Finish bool `json:"finish"` Finish bool `json:"finish"`
Success bool `json:"success"` Success bool `json:"success"`
TotalPieceCount int32 `json:"totalPieceCount"` TotalPieceCount int32 `json:"totalPieceCount"`
//PieceMetadataSign string `json:"pieceMetadataSign"` PieceMd5Sign string `json:"pieceMd5Sign"`
Range string `json:"range"`
Filter string `json:"filter"`
} }
// PieceMetaRecord meta data of piece // PieceMetaRecord meta data of piece
type PieceMetaRecord struct { type PieceMetaRecord struct {
PieceNum uint32 `json:"pieceNum"` // piece Num start from 0 // piece Num start from 0
PieceLen uint32 `json:"pieceLen"` // 存储到存储介质的真实长度 PieceNum uint32 `json:"pieceNum"`
Md5 string `json:"md5"` // for transported piece content不是origin source 的 md5是真是存储到存储介质后的md5为了读取数据文件时方便校验完整性 // 存储到存储介质的真实长度
Range *rangeutils.Range `json:"range"` // 下载存储到磁盘的range不是origin source的range.提供给客户端发送下载请求,for transported piece content PieceLen uint32 `json:"pieceLen"`
OriginRange *rangeutils.Range `json:"originRange"` // piece's real offset in the file // for transported piece content不是origin source 的 md5是真是存储到存储介质后的md5为了读取数据文件时方便校验完整性
PieceStyle types.PieceFormat `json:"pieceStyle"` // 1: PlainUnspecified Md5 string `json:"md5"`
// 下载存储到磁盘的range不是origin source的range.提供给客户端发送下载请求,for transported piece content
Range *rangeutils.Range `json:"range"`
// piece's real offset in the file
OriginRange *rangeutils.Range `json:"originRange"`
// 0: PlainUnspecified
PieceStyle int32 `json:"pieceStyle"`
} }
const fieldSeparator = ":" const fieldSeparator = ":"
@ -116,11 +125,11 @@ func ParsePieceMetaRecord(value string) (record *PieceMetaRecord, err error) {
} }
}() }()
fields := strings.Split(value, fieldSeparator) fields := strings.Split(value, fieldSeparator)
pieceNum, err := strconv.ParseInt(fields[0], 10, 32) pieceNum, err := strconv.ParseUint(fields[0], 10, 32)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "invalid pieceNum: %s", fields[0]) return nil, errors.Wrapf(err, "invalid pieceNum: %s", fields[0])
} }
pieceLen, err := strconv.ParseInt(fields[1], 10, 32) pieceLen, err := strconv.ParseUint(fields[1], 10, 32)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "invalid pieceLen: %s", fields[1]) return nil, errors.Wrapf(err, "invalid pieceLen: %s", fields[1])
} }
@ -143,7 +152,7 @@ func ParsePieceMetaRecord(value string) (record *PieceMetaRecord, err error) {
Md5: md5, Md5: md5,
Range: pieceRange, Range: pieceRange,
OriginRange: originRange, OriginRange: originRange,
PieceStyle: types.PieceFormat(pieceStyle), PieceStyle: int32(pieceStyle),
}, nil }, nil
} }
@ -162,7 +171,7 @@ func (m *managerPlugin) Name() string {
return m.name return m.name
} }
func (m *managerPlugin) ResetRepo(task *types.SeedTask) error { func (m *managerPlugin) ResetRepo(task *task.SeedTask) error {
return m.instance.ResetRepo(task) return m.instance.ResetRepo(task)
} }
@ -203,7 +212,7 @@ func (m *managerPlugin) DeleteTask(taskID string) error {
} }
// ManagerBuilder is a function that creates a new storage manager plugin instant with the giving conf. // ManagerBuilder is a function that creates a new storage manager plugin instant with the giving conf.
type ManagerBuilder func(cfg *Config) (Manager, error) type ManagerBuilder func(cfg *config.StorageConfig) (Manager, error)
// Register defines an interface to register a storage manager with specified name. // Register defines an interface to register a storage manager with specified name.
// All storage managers should call this function to register itself to the storage manager factory. // All storage managers should call this function to register itself to the storage manager factory.
@ -211,7 +220,7 @@ func Register(name string, builder ManagerBuilder) error {
name = strings.ToLower(name) name = strings.ToLower(name)
// plugin builder // plugin builder
var f = func(conf interface{}) (plugins.Plugin, error) { var f = func(conf interface{}) (plugins.Plugin, error) {
cfg := &Config{} cfg := &config.StorageConfig{}
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.ComposeDecodeHookFunc(func(from, to reflect.Type, v interface{}) (interface{}, error) { DecodeHook: mapstructure.ComposeDecodeHookFunc(func(from, to reflect.Type, v interface{}) (interface{}, error) {
switch to { switch to {
@ -241,7 +250,7 @@ func Register(name string, builder ManagerBuilder) error {
return plugins.RegisterPluginBuilder(plugins.StorageManagerPlugin, name, f) return plugins.RegisterPluginBuilder(plugins.StorageManagerPlugin, name, f)
} }
func newManagerPlugin(name string, builder ManagerBuilder, cfg *Config) (plugins.Plugin, error) { func newManagerPlugin(name string, builder ManagerBuilder, cfg *config.StorageConfig) (plugins.Plugin, error) {
if name == "" || builder == nil { if name == "" || builder == nil {
return nil, fmt.Errorf("storage manager plugin's name and builder cannot be nil") return nil, fmt.Errorf("storage manager plugin's name and builder cannot be nil")
} }
@ -266,24 +275,6 @@ func Get(name string) (Manager, bool) {
return v.(*managerPlugin).instance, true return v.(*managerPlugin).instance, true
} }
type Config struct {
GCInitialDelay time.Duration `yaml:"gcInitialDelay"`
GCInterval time.Duration `yaml:"gcInterval"`
DriverConfigs map[string]*DriverConfig `yaml:"driverConfigs"`
}
type DriverConfig struct {
GCConfig *GCConfig `yaml:"gcConfig"`
}
// GCConfig gc config
type GCConfig struct {
YoungGCThreshold unit.Bytes `yaml:"youngGCThreshold"`
FullGCThreshold unit.Bytes `yaml:"fullGCThreshold"`
CleanRatio int `yaml:"cleanRatio"`
IntervalThreshold time.Duration `yaml:"intervalThreshold"`
}
const ( const (
HybridStorageMode = "hybrid" HybridStorageMode = "hybrid"
DiskStorageMode = "disk" DiskStorageMode = "disk"

View File

@ -25,33 +25,37 @@ import (
"github.com/emirpasic/gods/maps/treemap" "github.com/emirpasic/gods/maps/treemap"
godsutils "github.com/emirpasic/gods/utils" godsutils "github.com/emirpasic/gods/utils"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors" "d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/storedriver" "d7y.io/dragonfly/v2/cdn/storedriver"
"d7y.io/dragonfly/v2/cdn/supervisor" "d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/util/timeutils" "d7y.io/dragonfly/v2/pkg/util/timeutils"
) )
type Cleaner struct { type Cleaner interface {
cfg *GCConfig GC(storagePattern string, force bool) ([]string, error)
driver storedriver.Driver
taskMgr supervisor.SeedTaskMgr
storageMgr Manager
} }
func NewStorageCleaner(cfg *GCConfig, driver storedriver.Driver, storageMgr Manager, taskMgr supervisor.SeedTaskMgr) (*Cleaner, error) { type cleaner struct {
return &Cleaner{ cfg *config.GCConfig
cfg: cfg, driver storedriver.Driver
driver: driver, taskManager task.Manager
taskMgr: taskMgr, storageManager Manager
storageMgr: storageMgr, }
func NewStorageCleaner(cfg *config.GCConfig, driver storedriver.Driver, storageManager Manager, taskManager task.Manager) (Cleaner, error) {
return &cleaner{
cfg: cfg,
driver: driver,
taskManager: taskManager,
storageManager: storageManager,
}, nil }, nil
} }
func (cleaner *Cleaner) GC(storagePattern string, force bool) ([]string, error) { func (cleaner *cleaner) GC(storagePattern string, force bool) ([]string, error) {
freeSpace, err := cleaner.driver.GetFreeSpace() freeSpace, err := cleaner.driver.GetFreeSpace()
if err != nil { if err != nil {
if cdnerrors.IsFileNotExist(err) { if os.IsNotExist(err) {
err = cleaner.driver.CreateBaseDir() err = cleaner.driver.CreateBaseDir()
if err != nil { if err != nil {
return nil, err return nil, err
@ -74,7 +78,7 @@ func (cleaner *Cleaner) GC(storagePattern string, force bool) ([]string, error)
} }
} }
logger.GcLogger.With("type", storagePattern).Debugf("start to exec gc with fullGC: %t", fullGC) logger.GcLogger.With("type", storagePattern).Debugf("storage is insufficient, start to exec gc with fullGC: %t", fullGC)
gapTasks := treemap.NewWith(godsutils.Int64Comparator) gapTasks := treemap.NewWith(godsutils.Int64Comparator)
intervalTasks := treemap.NewWith(godsutils.Int64Comparator) intervalTasks := treemap.NewWith(godsutils.Int64Comparator)
@ -100,8 +104,8 @@ func (cleaner *Cleaner) GC(storagePattern string, force bool) ([]string, error)
} }
walkTaskIds[taskID] = true walkTaskIds[taskID] = true
// we should return directly when we success to get info which means it is being used // we should return directly when success to get info which means it is being used
if _, exist := cleaner.taskMgr.Exist(taskID); exist { if _, exist := cleaner.taskManager.Exist(taskID); exist {
return nil return nil
} }
@ -111,13 +115,13 @@ func (cleaner *Cleaner) GC(storagePattern string, force bool) ([]string, error)
return nil return nil
} }
metadata, err := cleaner.storageMgr.ReadFileMetadata(taskID) metadata, err := cleaner.storageManager.ReadFileMetadata(taskID)
if err != nil || metadata == nil { if err != nil || metadata == nil {
logger.GcLogger.With("type", storagePattern).Debugf("taskID: %s, failed to get metadata: %v", taskID, err) logger.GcLogger.With("type", storagePattern).Debugf("taskID: %s, failed to get metadata: %v", taskID, err)
gcTaskIDs = append(gcTaskIDs, taskID) gcTaskIDs = append(gcTaskIDs, taskID)
return nil return nil
} }
// put taskId into gapTasks or intervalTasks which will sort by some rules // put taskID into gapTasks or intervalTasks which will sort by some rules
if err := cleaner.sortInert(gapTasks, intervalTasks, metadata); err != nil { if err := cleaner.sortInert(gapTasks, intervalTasks, metadata); err != nil {
logger.GcLogger.With("type", storagePattern).Errorf("failed to parse inert metadata(%#v): %v", metadata, err) logger.GcLogger.With("type", storagePattern).Errorf("failed to parse inert metadata(%#v): %v", metadata, err)
} }
@ -138,12 +142,12 @@ func (cleaner *Cleaner) GC(storagePattern string, force bool) ([]string, error)
return gcTaskIDs, nil return gcTaskIDs, nil
} }
func (cleaner *Cleaner) sortInert(gapTasks, intervalTasks *treemap.Map, metadata *FileMetadata) error { func (cleaner *cleaner) sortInert(gapTasks, intervalTasks *treemap.Map, metadata *FileMetadata) error {
gap := timeutils.CurrentTimeMillis() - metadata.AccessTime gap := timeutils.CurrentTimeMillis() - metadata.AccessTime
if metadata.Interval > 0 && if metadata.Interval > 0 &&
gap <= metadata.Interval+(int64(cleaner.cfg.IntervalThreshold.Seconds())*int64(time.Millisecond)) { gap <= metadata.Interval+(int64(cleaner.cfg.IntervalThreshold.Seconds())*int64(time.Millisecond)) {
info, err := cleaner.storageMgr.StatDownloadFile(metadata.TaskID) info, err := cleaner.storageManager.StatDownloadFile(metadata.TaskID)
if err != nil { if err != nil {
return err return err
} }
@ -168,7 +172,7 @@ func (cleaner *Cleaner) sortInert(gapTasks, intervalTasks *treemap.Map, metadata
return nil return nil
} }
func (cleaner *Cleaner) getGCTasks(gapTasks, intervalTasks *treemap.Map) []string { func (cleaner *cleaner) getGCTasks(gapTasks, intervalTasks *treemap.Map) []string {
var gcTasks = make([]string, 0) var gcTasks = make([]string, 0)
for _, v := range gapTasks.Values() { for _, v := range gapTasks.Values() {

View File

@ -1,40 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//go:generate mockgen -destination ./mock/mock_cdn_mgr.go -package mock d7y.io/dragonfly/v2/cdn/supervisor CDNMgr
package supervisor
import (
"context"
"d7y.io/dragonfly/v2/cdn/types"
)
// CDNMgr as an interface defines all operations against CDN and
// operates on the underlying files stored on the local disk, etc.
type CDNMgr interface {
// TriggerCDN will trigger CDN to download the resource from sourceUrl.
TriggerCDN(context.Context, *types.SeedTask) (*types.SeedTask, error)
// Delete the cdn meta with specified taskID.
// The file on the disk will be deleted when the force is true.
Delete(string) error
// TryFreeSpace checks if the free space of the storage is larger than the fileLength.
TryFreeSpace(fileLength int64) (bool, error)
}

View File

@ -1,80 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor (interfaces: CDNMgr)
// Package mock is a generated GoMock package.
package mock
import (
context "context"
reflect "reflect"
types "d7y.io/dragonfly/v2/cdn/types"
gomock "github.com/golang/mock/gomock"
)
// MockCDNMgr is a mock of CDNMgr interface.
type MockCDNMgr struct {
ctrl *gomock.Controller
recorder *MockCDNMgrMockRecorder
}
// MockCDNMgrMockRecorder is the mock recorder for MockCDNMgr.
type MockCDNMgrMockRecorder struct {
mock *MockCDNMgr
}
// NewMockCDNMgr creates a new mock instance.
func NewMockCDNMgr(ctrl *gomock.Controller) *MockCDNMgr {
mock := &MockCDNMgr{ctrl: ctrl}
mock.recorder = &MockCDNMgrMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockCDNMgr) EXPECT() *MockCDNMgrMockRecorder {
return m.recorder
}
// Delete mocks base method.
func (m *MockCDNMgr) Delete(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockCDNMgrMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockCDNMgr)(nil).Delete), arg0)
}
// TriggerCDN mocks base method.
func (m *MockCDNMgr) TriggerCDN(arg0 context.Context, arg1 *types.SeedTask) (*types.SeedTask, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TriggerCDN", arg0, arg1)
ret0, _ := ret[0].(*types.SeedTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// TriggerCDN indicates an expected call of TriggerCDN.
func (mr *MockCDNMgrMockRecorder) TriggerCDN(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TriggerCDN", reflect.TypeOf((*MockCDNMgr)(nil).TriggerCDN), arg0, arg1)
}
// TryFreeSpace mocks base method.
func (m *MockCDNMgr) TryFreeSpace(arg0 int64) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TryFreeSpace", arg0)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// TryFreeSpace indicates an expected call of TryFreeSpace.
func (mr *MockCDNMgrMockRecorder) TryFreeSpace(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TryFreeSpace", reflect.TypeOf((*MockCDNMgr)(nil).TryFreeSpace), arg0)
}

View File

@ -1,63 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor/mgr (interfaces: GCMgr)
// Package mock is a generated GoMock package.
package mock
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockGCMgr is a mock of GCMgr interface.
type MockGCMgr struct {
ctrl *gomock.Controller
recorder *MockGCMgrMockRecorder
}
// MockGCMgrMockRecorder is the mock recorder for MockGCMgr.
type MockGCMgrMockRecorder struct {
mock *MockGCMgr
}
// NewMockGCMgr creates a new mock instance.
func NewMockGCMgr(ctrl *gomock.Controller) *MockGCMgr {
mock := &MockGCMgr{ctrl: ctrl}
mock.recorder = &MockGCMgrMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockGCMgr) EXPECT() *MockGCMgrMockRecorder {
return m.recorder
}
// GCTask mocks base method.
func (m *MockGCMgr) GCTask(arg0 string, arg1 bool) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GCTask", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// GCTask indicates an expected call of GCTask.
func (mr *MockGCMgrMockRecorder) GCTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GCTask", reflect.TypeOf((*MockGCMgr)(nil).GCTask), arg0, arg1)
}
// StartGC mocks base method.
func (m *MockGCMgr) StartGC(arg0 context.Context) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StartGC", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// StartGC indicates an expected call of StartGC.
func (mr *MockGCMgrMockRecorder) StartGC(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartGC", reflect.TypeOf((*MockGCMgr)(nil).StartGC), arg0)
}

View File

@ -1,133 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor (interfaces: SeedProgressMgr)
// Package mock is a generated GoMock package.
package mock
import (
context "context"
reflect "reflect"
supervisor "d7y.io/dragonfly/v2/cdn/supervisor"
types "d7y.io/dragonfly/v2/cdn/types"
gomock "github.com/golang/mock/gomock"
)
// MockSeedProgressMgr is a mock of SeedProgressMgr interface.
type MockSeedProgressMgr struct {
ctrl *gomock.Controller
recorder *MockSeedProgressMgrMockRecorder
}
// MockSeedProgressMgrMockRecorder is the mock recorder for MockSeedProgressMgr.
type MockSeedProgressMgrMockRecorder struct {
mock *MockSeedProgressMgr
}
// NewMockSeedProgressMgr creates a new mock instance.
func NewMockSeedProgressMgr(ctrl *gomock.Controller) *MockSeedProgressMgr {
mock := &MockSeedProgressMgr{ctrl: ctrl}
mock.recorder = &MockSeedProgressMgrMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSeedProgressMgr) EXPECT() *MockSeedProgressMgrMockRecorder {
return m.recorder
}
// Clear mocks base method.
func (m *MockSeedProgressMgr) Clear(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Clear", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Clear indicates an expected call of Clear.
func (mr *MockSeedProgressMgrMockRecorder) Clear(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clear", reflect.TypeOf((*MockSeedProgressMgr)(nil).Clear), arg0)
}
// GetPieces mocks base method.
func (m *MockSeedProgressMgr) GetPieces(arg0 context.Context, arg1 string) ([]*types.SeedPiece, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPieces", arg0, arg1)
ret0, _ := ret[0].([]*types.SeedPiece)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieces indicates an expected call of GetPieces.
func (mr *MockSeedProgressMgrMockRecorder) GetPieces(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieces", reflect.TypeOf((*MockSeedProgressMgr)(nil).GetPieces), arg0, arg1)
}
// InitSeedProgress mocks base method.
func (m *MockSeedProgressMgr) InitSeedProgress(arg0 context.Context, arg1 string) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "InitSeedProgress", arg0, arg1)
}
// InitSeedProgress indicates an expected call of InitSeedProgress.
func (mr *MockSeedProgressMgrMockRecorder) InitSeedProgress(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitSeedProgress", reflect.TypeOf((*MockSeedProgressMgr)(nil).InitSeedProgress), arg0, arg1)
}
// PublishPiece mocks base method.
func (m *MockSeedProgressMgr) PublishPiece(arg0 context.Context, arg1 string, arg2 *types.SeedPiece) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PublishPiece", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// PublishPiece indicates an expected call of PublishPiece.
func (mr *MockSeedProgressMgrMockRecorder) PublishPiece(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishPiece", reflect.TypeOf((*MockSeedProgressMgr)(nil).PublishPiece), arg0, arg1, arg2)
}
// PublishTask mocks base method.
func (m *MockSeedProgressMgr) PublishTask(arg0 context.Context, arg1 string, arg2 *types.SeedTask) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PublishTask", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// PublishTask indicates an expected call of PublishTask.
func (mr *MockSeedProgressMgrMockRecorder) PublishTask(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishTask", reflect.TypeOf((*MockSeedProgressMgr)(nil).PublishTask), arg0, arg1, arg2)
}
// SetTaskMgr mocks base method.
func (m *MockSeedProgressMgr) SetTaskMgr(arg0 supervisor.SeedTaskMgr) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetTaskMgr", arg0)
}
// SetTaskMgr indicates an expected call of SetTaskMgr.
func (mr *MockSeedProgressMgrMockRecorder) SetTaskMgr(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTaskMgr", reflect.TypeOf((*MockSeedProgressMgr)(nil).SetTaskMgr), arg0)
}
// WatchSeedProgress mocks base method.
func (m *MockSeedProgressMgr) WatchSeedProgress(arg0 context.Context, arg1 string) (<-chan *types.SeedPiece, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WatchSeedProgress", arg0, arg1)
ret0, _ := ret[0].(<-chan *types.SeedPiece)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// WatchSeedProgress indicates an expected call of WatchSeedProgress.
func (mr *MockSeedProgressMgrMockRecorder) WatchSeedProgress(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchSeedProgress", reflect.TypeOf((*MockSeedProgressMgr)(nil).WatchSeedProgress), arg0, arg1)
}

View File

@ -1,110 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor (interfaces: SeedTaskMgr)
// Package mock is a generated GoMock package.
package mock
import (
context "context"
reflect "reflect"
types "d7y.io/dragonfly/v2/cdn/types"
gomock "github.com/golang/mock/gomock"
)
// MockSeedTaskMgr is a mock of SeedTaskMgr interface.
type MockSeedTaskMgr struct {
ctrl *gomock.Controller
recorder *MockSeedTaskMgrMockRecorder
}
// MockSeedTaskMgrMockRecorder is the mock recorder for MockSeedTaskMgr.
type MockSeedTaskMgrMockRecorder struct {
mock *MockSeedTaskMgr
}
// NewMockSeedTaskMgr creates a new mock instance.
func NewMockSeedTaskMgr(ctrl *gomock.Controller) *MockSeedTaskMgr {
mock := &MockSeedTaskMgr{ctrl: ctrl}
mock.recorder = &MockSeedTaskMgrMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockSeedTaskMgr) EXPECT() *MockSeedTaskMgrMockRecorder {
return m.recorder
}
// Delete mocks base method.
func (m *MockSeedTaskMgr) Delete(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockSeedTaskMgrMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockSeedTaskMgr)(nil).Delete), arg0)
}
// Exist mocks base method.
func (m *MockSeedTaskMgr) Exist(arg0 string) (*types.SeedTask, bool) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Exist", arg0)
ret0, _ := ret[0].(*types.SeedTask)
ret1, _ := ret[1].(bool)
return ret0, ret1
}
// Exist indicates an expected call of Exist.
func (mr *MockSeedTaskMgrMockRecorder) Exist(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exist", reflect.TypeOf((*MockSeedTaskMgr)(nil).Exist), arg0)
}
// Get mocks base method.
func (m *MockSeedTaskMgr) Get(arg0 string) (*types.SeedTask, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0)
ret0, _ := ret[0].(*types.SeedTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get.
func (mr *MockSeedTaskMgrMockRecorder) Get(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSeedTaskMgr)(nil).Get), arg0)
}
// GetPieces mocks base method.
func (m *MockSeedTaskMgr) GetPieces(arg0 context.Context, arg1 string) ([]*types.SeedPiece, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPieces", arg0, arg1)
ret0, _ := ret[0].([]*types.SeedPiece)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPieces indicates an expected call of GetPieces.
func (mr *MockSeedTaskMgrMockRecorder) GetPieces(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPieces", reflect.TypeOf((*MockSeedTaskMgr)(nil).GetPieces), arg0, arg1)
}
// Register mocks base method.
func (m *MockSeedTaskMgr) Register(arg0 context.Context, arg1 *types.SeedTask) (<-chan *types.SeedPiece, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Register", arg0, arg1)
ret0, _ := ret[0].(<-chan *types.SeedPiece)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Register indicates an expected call of Register.
func (mr *MockSeedTaskMgrMockRecorder) Register(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockSeedTaskMgr)(nil).Register), arg0, arg1)
}

View File

@ -0,0 +1,80 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor/cdn (interfaces: Manager)
// Package cdn is a generated GoMock package.
package cdn
import (
context "context"
reflect "reflect"
task "d7y.io/dragonfly/v2/cdn/supervisor/task"
gomock "github.com/golang/mock/gomock"
)
// MockManager is a mock of Manager interface.
type MockManager struct {
ctrl *gomock.Controller
recorder *MockManagerMockRecorder
}
// MockManagerMockRecorder is the mock recorder for MockManager.
type MockManagerMockRecorder struct {
mock *MockManager
}
// NewMockManager creates a new mock instance.
func NewMockManager(ctrl *gomock.Controller) *MockManager {
mock := &MockManager{ctrl: ctrl}
mock.recorder = &MockManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager) EXPECT() *MockManagerMockRecorder {
return m.recorder
}
// Delete mocks base method.
func (m *MockManager) Delete(arg0 string) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockManagerMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockManager)(nil).Delete), arg0)
}
// TriggerCDN mocks base method.
func (m *MockManager) TriggerCDN(arg0 context.Context, arg1 *task.SeedTask) (*task.SeedTask, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TriggerCDN", arg0, arg1)
ret0, _ := ret[0].(*task.SeedTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// TriggerCDN indicates an expected call of TriggerCDN.
func (mr *MockManagerMockRecorder) TriggerCDN(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TriggerCDN", reflect.TypeOf((*MockManager)(nil).TriggerCDN), arg0, arg1)
}
// TryFreeSpace mocks base method.
func (m *MockManager) TryFreeSpace(arg0 int64) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "TryFreeSpace", arg0)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// TryFreeSpace indicates an expected call of TryFreeSpace.
func (mr *MockManagerMockRecorder) TryFreeSpace(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TryFreeSpace", reflect.TypeOf((*MockManager)(nil).TryFreeSpace), arg0)
}

View File

@ -0,0 +1,79 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor/progress (interfaces: Manager)
// Package progress is a generated GoMock package.
package progress
import (
context "context"
reflect "reflect"
task "d7y.io/dragonfly/v2/cdn/supervisor/task"
gomock "github.com/golang/mock/gomock"
)
// MockManager is a mock of Manager interface.
type MockManager struct {
ctrl *gomock.Controller
recorder *MockManagerMockRecorder
}
// MockManagerMockRecorder is the mock recorder for MockManager.
type MockManagerMockRecorder struct {
mock *MockManager
}
// NewMockManager creates a new mock instance.
func NewMockManager(ctrl *gomock.Controller) *MockManager {
mock := &MockManager{ctrl: ctrl}
mock.recorder = &MockManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager) EXPECT() *MockManagerMockRecorder {
return m.recorder
}
// PublishPiece mocks base method.
func (m *MockManager) PublishPiece(arg0 context.Context, arg1 string, arg2 *task.PieceInfo) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PublishPiece", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// PublishPiece indicates an expected call of PublishPiece.
func (mr *MockManagerMockRecorder) PublishPiece(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishPiece", reflect.TypeOf((*MockManager)(nil).PublishPiece), arg0, arg1, arg2)
}
// PublishTask mocks base method.
func (m *MockManager) PublishTask(arg0 context.Context, arg1 string, arg2 *task.SeedTask) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "PublishTask", arg0, arg1, arg2)
ret0, _ := ret[0].(error)
return ret0
}
// PublishTask indicates an expected call of PublishTask.
func (mr *MockManagerMockRecorder) PublishTask(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishTask", reflect.TypeOf((*MockManager)(nil).PublishTask), arg0, arg1, arg2)
}
// WatchSeedProgress mocks base method.
func (m *MockManager) WatchSeedProgress(arg0 context.Context, arg1, arg2 string) (<-chan *task.PieceInfo, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "WatchSeedProgress", arg0, arg1, arg2)
ret0, _ := ret[0].(<-chan *task.PieceInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// WatchSeedProgress indicates an expected call of WatchSeedProgress.
func (mr *MockManagerMockRecorder) WatchSeedProgress(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchSeedProgress", reflect.TypeOf((*MockManager)(nil).WatchSeedProgress), arg0, arg1, arg2)
}

View File

@ -0,0 +1,135 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: d7y.io/dragonfly/v2/cdn/supervisor/task (interfaces: Manager)
// Package task is a generated GoMock package.
package task
import (
reflect "reflect"
task "d7y.io/dragonfly/v2/cdn/supervisor/task"
gomock "github.com/golang/mock/gomock"
)
// MockManager is a mock of Manager interface.
type MockManager struct {
ctrl *gomock.Controller
recorder *MockManagerMockRecorder
}
// MockManagerMockRecorder is the mock recorder for MockManager.
type MockManagerMockRecorder struct {
mock *MockManager
}
// NewMockManager creates a new mock instance.
func NewMockManager(ctrl *gomock.Controller) *MockManager {
mock := &MockManager{ctrl: ctrl}
mock.recorder = &MockManagerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockManager) EXPECT() *MockManagerMockRecorder {
return m.recorder
}
// AddOrUpdate mocks base method.
func (m *MockManager) AddOrUpdate(arg0 *task.SeedTask) (*task.SeedTask, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddOrUpdate", arg0)
ret0, _ := ret[0].(*task.SeedTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AddOrUpdate indicates an expected call of AddOrUpdate.
func (mr *MockManagerMockRecorder) AddOrUpdate(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddOrUpdate", reflect.TypeOf((*MockManager)(nil).AddOrUpdate), arg0)
}
// Delete mocks base method.
func (m *MockManager) Delete(arg0 string) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Delete", arg0)
}
// Delete indicates an expected call of Delete.
func (mr *MockManagerMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockManager)(nil).Delete), arg0)
}
// Exist mocks base method.
func (m *MockManager) Exist(arg0 string) (*task.SeedTask, bool) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Exist", arg0)
ret0, _ := ret[0].(*task.SeedTask)
ret1, _ := ret[1].(bool)
return ret0, ret1
}
// Exist indicates an expected call of Exist.
func (mr *MockManagerMockRecorder) Exist(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exist", reflect.TypeOf((*MockManager)(nil).Exist), arg0)
}
// Get mocks base method.
func (m *MockManager) Get(arg0 string) (*task.SeedTask, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0)
ret0, _ := ret[0].(*task.SeedTask)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get.
func (mr *MockManagerMockRecorder) Get(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockManager)(nil).Get), arg0)
}
// GetProgress mocks base method.
func (m *MockManager) GetProgress(arg0 string) (map[uint32]*task.PieceInfo, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetProgress", arg0)
ret0, _ := ret[0].(map[uint32]*task.PieceInfo)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetProgress indicates an expected call of GetProgress.
func (mr *MockManagerMockRecorder) GetProgress(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProgress", reflect.TypeOf((*MockManager)(nil).GetProgress), arg0)
}
// Update mocks base method.
func (m *MockManager) Update(arg0 string, arg1 *task.SeedTask) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Update indicates an expected call of Update.
func (mr *MockManagerMockRecorder) Update(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockManager)(nil).Update), arg0, arg1)
}
// UpdateProgress mocks base method.
func (m *MockManager) UpdateProgress(arg0 string, arg1 *task.PieceInfo) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "UpdateProgress", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// UpdateProgress indicates an expected call of UpdateProgress.
func (mr *MockManagerMockRecorder) UpdateProgress(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProgress", reflect.TypeOf((*MockManager)(nil).UpdateProgress), arg0, arg1)
}

View File

@ -14,231 +14,131 @@
* limitations under the License. * limitations under the License.
*/ */
//go:generate mockgen -destination ../mocks/progress/mock_progress_manager.go -package progress d7y.io/dragonfly/v2/cdn/supervisor/progress Manager
package progress package progress
import ( import (
"container/list"
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"sort" "sort"
"strconv"
"sync"
"time"
"github.com/pkg/errors" "github.com/pkg/errors"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"d7y.io/dragonfly/v2/cdn/config" "d7y.io/dragonfly/v2/cdn/constants"
"d7y.io/dragonfly/v2/cdn/supervisor" "d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/internal/dferrors"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/synclock" "d7y.io/dragonfly/v2/pkg/synclock"
"d7y.io/dragonfly/v2/pkg/syncmap"
) )
var _ supervisor.SeedProgressMgr = (*Manager)(nil) // Manager as an interface defines all operations about seed progress
type Manager interface {
type Manager struct { // WatchSeedProgress watch task seed progress
seedSubscribers *syncmap.SyncMap WatchSeedProgress(ctx context.Context, clientAddr string, taskID string) (<-chan *task.PieceInfo, error)
taskPieceMetaRecords *syncmap.SyncMap
taskMgr supervisor.SeedTaskMgr // PublishPiece publish piece seed
mu *synclock.LockerPool PublishPiece(ctx context.Context, taskID string, piece *task.PieceInfo) error
timeout time.Duration
buffer int // PublishTask publish task seed
PublishTask(ctx context.Context, taskID string, task *task.SeedTask) error
} }
func (pm *Manager) SetTaskMgr(taskMgr supervisor.SeedTaskMgr) { var _ Manager = (*manager)(nil)
pm.taskMgr = taskMgr
type manager struct {
mu *synclock.LockerPool
taskManager task.Manager
seedTaskSubjects map[string]*publisher
} }
func NewManager() (supervisor.SeedProgressMgr, error) { func NewManager(taskManager task.Manager) (Manager, error) {
return &Manager{ return newManager(taskManager)
seedSubscribers: syncmap.NewSyncMap(), }
taskPieceMetaRecords: syncmap.NewSyncMap(),
mu: synclock.NewLockerPool(), func newManager(taskManager task.Manager) (*manager, error) {
timeout: 3 * time.Second, return &manager{
buffer: 4, mu: synclock.NewLockerPool(),
taskManager: taskManager,
seedTaskSubjects: make(map[string]*publisher),
}, nil }, nil
} }
func (pm *Manager) InitSeedProgress(ctx context.Context, taskID string) { func (pm *manager) WatchSeedProgress(ctx context.Context, clientAddr string, taskID string) (<-chan *task.PieceInfo, error) {
span := trace.SpanFromContext(ctx)
span.AddEvent(config.EventInitSeedProgress)
pm.mu.Lock(taskID, true)
if _, ok := pm.seedSubscribers.Load(taskID); ok {
logger.WithTaskID(taskID).Debugf("the task seedSubscribers already exist")
if _, ok := pm.taskPieceMetaRecords.Load(taskID); ok {
logger.WithTaskID(taskID).Debugf("the task taskPieceMetaRecords already exist")
pm.mu.UnLock(taskID, true)
return
}
}
pm.mu.UnLock(taskID, true)
pm.mu.Lock(taskID, false) pm.mu.Lock(taskID, false)
defer pm.mu.UnLock(taskID, false) defer pm.mu.UnLock(taskID, false)
if _, loaded := pm.seedSubscribers.LoadOrStore(taskID, list.New()); loaded {
logger.WithTaskID(taskID).Info("the task seedSubscribers already exist")
}
if _, loaded := pm.taskPieceMetaRecords.LoadOrStore(taskID, syncmap.NewSyncMap()); loaded {
logger.WithTaskID(taskID).Info("the task taskPieceMetaRecords already exist")
}
}
func (pm *Manager) WatchSeedProgress(ctx context.Context, taskID string) (<-chan *types.SeedPiece, error) {
span := trace.SpanFromContext(ctx) span := trace.SpanFromContext(ctx)
span.AddEvent(config.EventWatchSeedProgress) span.AddEvent(constants.EventWatchSeedProgress)
logger.Debugf("watch seed progress begin for taskID: %s", taskID) seedTask, err := pm.taskManager.Get(taskID)
pm.mu.Lock(taskID, true)
defer pm.mu.UnLock(taskID, true)
chanList, err := pm.seedSubscribers.GetAsList(taskID)
if err != nil { if err != nil {
return nil, fmt.Errorf("get seed subscribers: %v", err) return nil, err
} }
pieceMetadataRecords, err := pm.getPieceMetaRecordsByTaskID(taskID) if seedTask.IsDone() {
if err != nil { pieceChan := make(chan *task.PieceInfo)
return nil, fmt.Errorf("get piece meta records by taskID: %v", err) go func(pieceChan chan *task.PieceInfo) {
} defer func() {
ch := make(chan *types.SeedPiece, pm.buffer) logger.Debugf("subscriber %s starts watching task %s seed progress", clientAddr, taskID)
ele := chanList.PushBack(ch) close(pieceChan)
go func(seedCh chan *types.SeedPiece, ele *list.Element) { }()
for _, pieceMetaRecord := range pieceMetadataRecords { pieceNums := make([]uint32, 0, len(seedTask.Pieces))
logger.Debugf("seed piece meta record %#v", pieceMetaRecord) for pieceNum := range seedTask.Pieces {
select { pieceNums = append(pieceNums, pieceNum)
case seedCh <- pieceMetaRecord:
case <-time.After(pm.timeout):
} }
} sort.Slice(pieceNums, func(i, j int) bool {
if task, err := pm.taskMgr.Get(taskID); err == nil && task.IsDone() { return pieceNums[i] < pieceNums[j]
chanList.Remove(ele) })
close(seedCh) for _, pieceNum := range pieceNums {
} logger.Debugf("notifies subscriber %s about %d piece info of taskID %s", clientAddr, pieceNum, taskID)
}(ch, ele) pieceChan <- seedTask.Pieces[pieceNum]
return ch, nil
}
// PublishPiece publish seedPiece
func (pm *Manager) PublishPiece(ctx context.Context, taskID string, record *types.SeedPiece) error {
span := trace.SpanFromContext(ctx)
recordBytes, _ := json.Marshal(record)
span.AddEvent(config.EventPublishPiece, trace.WithAttributes(config.AttributeSeedPiece.String(string(recordBytes))))
logger.Debugf("seed piece meta record %#v", record)
pm.mu.Lock(taskID, false)
defer pm.mu.UnLock(taskID, false)
// update task access time
if pm.taskMgr != nil {
if _, err := pm.taskMgr.Get(taskID); err != nil {
return err
}
}
err := pm.setPieceMetaRecord(taskID, record)
if err != nil {
return fmt.Errorf("set piece meta record: %v", err)
}
chanList, err := pm.seedSubscribers.GetAsList(taskID)
if err != nil {
return fmt.Errorf("get seed subscribers: %v", err)
}
var wg sync.WaitGroup
for e := chanList.Front(); e != nil; e = e.Next() {
wg.Add(1)
sub := e.Value.(chan *types.SeedPiece)
go func(sub chan *types.SeedPiece, record *types.SeedPiece) {
defer wg.Done()
select {
case sub <- record:
case <-time.After(pm.timeout):
} }
}(pieceChan)
}(sub, record) return pieceChan, nil
} }
wg.Wait() var progressPublisher, ok = pm.seedTaskSubjects[taskID]
return nil if !ok {
progressPublisher = newProgressPublisher(taskID)
pm.seedTaskSubjects[taskID] = progressPublisher
}
observer := newProgressSubscriber(ctx, clientAddr, seedTask.ID, seedTask.Pieces)
progressPublisher.AddSubscriber(observer)
return observer.Receiver(), nil
} }
func (pm *Manager) PublishTask(ctx context.Context, taskID string, task *types.SeedTask) error { func (pm *manager) PublishPiece(ctx context.Context, taskID string, record *task.PieceInfo) (err error) {
pm.mu.Lock(taskID, false)
defer pm.mu.UnLock(taskID, false)
span := trace.SpanFromContext(ctx) span := trace.SpanFromContext(ctx)
taskBytes, _ := json.Marshal(task) jsonRecord, err := json.Marshal(record)
span.AddEvent(config.EventPublishTask, trace.WithAttributes(config.AttributeSeedTask.String(string(taskBytes)))) if err != nil {
logger.Debugf("publish task record %#v", task) return errors.Wrapf(err, "json marshal piece record: %#v", record)
}
span.AddEvent(constants.EventPublishPiece, trace.WithAttributes(constants.AttributeSeedPiece.String(string(jsonRecord))))
logger.Debugf("publish task %s seed piece record: %s", taskID, jsonRecord)
var progressPublisher, ok = pm.seedTaskSubjects[taskID]
if ok {
progressPublisher.NotifySubscribers(record)
}
return pm.taskManager.UpdateProgress(taskID, record)
}
func (pm *manager) PublishTask(ctx context.Context, taskID string, seedTask *task.SeedTask) error {
jsonTask, err := json.Marshal(seedTask)
if err != nil {
return errors.Wrapf(err, "json marshal seedTask: %#v", seedTask)
}
logger.Debugf("publish task %s seed piece record: %s", taskID, jsonTask)
pm.mu.Lock(taskID, false) pm.mu.Lock(taskID, false)
defer pm.mu.UnLock(taskID, false) defer pm.mu.UnLock(taskID, false)
chanList, err := pm.seedSubscribers.GetAsList(taskID) span := trace.SpanFromContext(ctx)
if err != nil { recordBytes, _ := json.Marshal(seedTask)
return fmt.Errorf("get seed subscribers: %v", err) span.AddEvent(constants.EventPublishTask, trace.WithAttributes(constants.AttributeSeedTask.String(string(recordBytes))))
} if err := pm.taskManager.Update(taskID, seedTask); err != nil {
// unwatch
for e := chanList.Front(); e != nil; e = e.Next() {
chanList.Remove(e)
sub, ok := e.Value.(chan *types.SeedPiece)
if !ok {
logger.Warnf("failed to convert chan seedPiece, e.Value: %v", e.Value)
continue
}
close(sub)
}
return nil
}
func (pm *Manager) Clear(taskID string) error {
pm.mu.Lock(taskID, false)
defer pm.mu.UnLock(taskID, false)
chanList, err := pm.seedSubscribers.GetAsList(taskID)
if err != nil && errors.Cause(err) != dferrors.ErrDataNotFound {
return errors.Wrap(err, "get seed subscribers")
}
if chanList != nil {
for e := chanList.Front(); e != nil; e = e.Next() {
chanList.Remove(e)
sub, ok := e.Value.(chan *types.SeedPiece)
if !ok {
logger.Warnf("failed to convert chan seedPiece, e.Value: %v", e.Value)
continue
}
close(sub)
}
chanList = nil
}
err = pm.seedSubscribers.Remove(taskID)
if err != nil && dferrors.ErrDataNotFound != errors.Cause(err) {
return errors.Wrap(err, "clear seed subscribes")
}
err = pm.taskPieceMetaRecords.Remove(taskID)
if err != nil && dferrors.ErrDataNotFound != errors.Cause(err) {
return errors.Wrap(err, "clear piece meta records")
}
return nil
}
func (pm *Manager) GetPieces(ctx context.Context, taskID string) (records []*types.SeedPiece, err error) {
pm.mu.Lock(taskID, true)
defer pm.mu.UnLock(taskID, true)
return pm.getPieceMetaRecordsByTaskID(taskID)
}
// setPieceMetaRecord
func (pm *Manager) setPieceMetaRecord(taskID string, record *types.SeedPiece) error {
pieceRecords, err := pm.taskPieceMetaRecords.GetAsMap(taskID)
if err != nil {
return err return err
} }
return pieceRecords.Add(strconv.Itoa(int(record.PieceNum)), record) if progressPublisher, ok := pm.seedTaskSubjects[taskID]; ok {
} progressPublisher.RemoveAllSubscribers()
delete(pm.seedTaskSubjects, taskID)
// getPieceMetaRecordsByTaskID
func (pm *Manager) getPieceMetaRecordsByTaskID(taskID string) (records []*types.SeedPiece, err error) {
pieceRecords, err := pm.taskPieceMetaRecords.GetAsMap(taskID)
if err != nil {
return nil, errors.Wrap(err, "failed to get piece meta records")
} }
pieceNums := pieceRecords.ListKeyAsIntSlice() return nil
sort.Ints(pieceNums)
for i := 0; i < len(pieceNums); i++ {
v, _ := pieceRecords.Get(strconv.Itoa(pieceNums[i]))
if value, ok := v.(*types.SeedPiece); ok {
records = append(records, value)
}
}
return records, nil
} }

View File

@ -0,0 +1,207 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package progress
import (
"context"
"sync"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/suite"
"d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/source/httpprotocol"
sourcemock "d7y.io/dragonfly/v2/pkg/source/mock"
"d7y.io/dragonfly/v2/pkg/util/rangeutils"
)
func TestProgressManagerSuite(t *testing.T) {
suite.Run(t, new(ProgressManagerTestSuite))
}
type ProgressManagerTestSuite struct {
manager *manager
suite.Suite
}
var (
testTaskID = "testTaskID"
testTask = task.NewSeedTask(testTaskID, "https://www.drgonfly.com", nil)
taskPieces = map[uint32]*task.PieceInfo{
0: {
PieceNum: 0,
PieceMd5: "md50",
PieceRange: &rangeutils.Range{
StartIndex: 0,
EndIndex: 99,
},
OriginRange: &rangeutils.Range{
StartIndex: 0,
EndIndex: 99,
},
PieceLen: 100,
PieceStyle: 0,
},
1: {
PieceNum: 1,
PieceMd5: "md51",
PieceRange: &rangeutils.Range{
StartIndex: 100,
EndIndex: 199,
},
OriginRange: &rangeutils.Range{
StartIndex: 100,
EndIndex: 199,
},
PieceLen: 100,
PieceStyle: 0,
},
2: {
PieceNum: 2,
PieceMd5: "md52",
PieceRange: &rangeutils.Range{
StartIndex: 200,
EndIndex: 299,
},
OriginRange: &rangeutils.Range{
StartIndex: 200,
EndIndex: 299,
},
PieceLen: 100,
PieceStyle: 0,
},
3: {
PieceNum: 3,
PieceMd5: "md53",
PieceRange: &rangeutils.Range{
StartIndex: 300,
EndIndex: 399,
},
OriginRange: &rangeutils.Range{
StartIndex: 300,
EndIndex: 399,
},
PieceLen: 100,
PieceStyle: 0,
},
}
)
func (suite *ProgressManagerTestSuite) SetupSuite() {
ctl := gomock.NewController(suite.T())
sourceClient := sourcemock.NewMockResourceClient(ctl)
source.UnRegister("https")
suite.Nil(source.Register("https", sourceClient, httpprotocol.Adapter))
sourceClient.EXPECT().GetContentLength(source.RequestEq(testTask.RawURL)).Return(int64(1024*1024*500+1000), nil).Times(1)
taskManager, err := task.NewManager(config.New())
suite.Nil(err)
seedTask, err := taskManager.AddOrUpdate(testTask)
suite.Nil(err)
suite.Equal(int64(1024*1024*500+1000), seedTask.SourceFileLength)
manager, err := newManager(taskManager)
suite.Nil(err)
suite.manager = manager
}
func (suite *ProgressManagerTestSuite) TestWatchSeedProgress() {
// watch not exit task
got, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr", "notExistTask")
suite.NotNil(err)
suite.Nil(got)
// testTaskID has not pieces currently
wg := sync.WaitGroup{}
wg.Add(5)
got1, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr1", testTaskID)
suite.Nil(err)
suite.NotNil(got1)
go func() {
defer wg.Done()
var pieceCount uint32 = 0
for info := range got1 {
suite.Equal(taskPieces[pieceCount], info)
pieceCount++
}
suite.Equal(len(taskPieces), int(pieceCount))
}()
// publish first piece
suite.Nil(suite.manager.PublishPiece(context.Background(), testTaskID, taskPieces[0]))
// testTaskID has one-piece currently
got2, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr2", testTaskID)
suite.Nil(err)
suite.NotNil(got2)
go func() {
defer wg.Done()
var pieceCount uint32 = 0
for info := range got2 {
suite.Equal(taskPieces[pieceCount], info)
pieceCount++
}
suite.Equal(len(taskPieces), int(pieceCount))
}()
// publish secondary piece
suite.Nil(suite.manager.PublishPiece(context.Background(), testTaskID, taskPieces[1]))
// testTaskID has two-piece currently
got3, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr3", testTaskID)
suite.Nil(err)
suite.NotNil(got3)
go func() {
defer wg.Done()
var pieceCount uint32 = 0
for info := range got3 {
suite.Equal(taskPieces[pieceCount], info)
pieceCount++
}
suite.Equal(len(taskPieces), int(pieceCount))
}()
// publish third piece
suite.Nil(suite.manager.PublishPiece(context.Background(), testTaskID, taskPieces[2]))
// testTaskID has three-piece currently
got4, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr4", testTaskID)
suite.Nil(err)
suite.NotNil(got4)
go func() {
defer wg.Done()
var pieceCount uint32 = 0
for info := range got4 {
suite.Equal(taskPieces[pieceCount], info)
pieceCount++
}
suite.Equal(len(taskPieces), int(pieceCount))
}()
// publish forth piece
suite.Nil(suite.manager.PublishPiece(context.Background(), testTaskID, taskPieces[3]))
// publish task
testTask.CdnStatus = task.StatusSuccess
suite.Nil(suite.manager.PublishTask(context.Background(), testTaskID, testTask))
// testTaskID has done currently
got5, err := suite.manager.WatchSeedProgress(context.Background(), "clientAddr5", testTaskID)
suite.Nil(err)
suite.NotNil(got5)
go func() {
defer wg.Done()
var pieceCount uint32 = 0
for info := range got5 {
suite.Equal(taskPieces[pieceCount], info)
pieceCount++
}
suite.Equal(len(taskPieces), int(pieceCount))
}()
wg.Wait()
}

View File

@ -0,0 +1,167 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package progress
import (
"container/list"
"context"
"sort"
"sync"
"go.uber.org/atomic"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
logger "d7y.io/dragonfly/v2/internal/dflog"
)
type subscriber struct {
ctx context.Context
scheduler string
taskID string
done chan struct{}
once sync.Once
pieces map[uint32]*task.PieceInfo
pieceChan chan *task.PieceInfo
cond *sync.Cond
closed *atomic.Bool
}
func newProgressSubscriber(ctx context.Context, clientAddr, taskID string, taskPieces map[uint32]*task.PieceInfo) *subscriber {
pieces := make(map[uint32]*task.PieceInfo, len(taskPieces))
for u, info := range taskPieces {
pieces[u] = info
}
sub := &subscriber{
ctx: ctx,
scheduler: clientAddr,
taskID: taskID,
pieces: pieces,
done: make(chan struct{}),
pieceChan: make(chan *task.PieceInfo, 100),
cond: sync.NewCond(&sync.Mutex{}),
closed: atomic.NewBool(false),
}
go sub.readLoop()
return sub
}
func (sub *subscriber) readLoop() {
logger.Debugf("subscriber %s starts watching task %s seed progress", sub.scheduler, sub.taskID)
defer func() {
close(sub.pieceChan)
logger.Debugf("subscriber %s stopped watch task %s seed progress", sub.scheduler, sub.taskID)
}()
for {
select {
case <-sub.ctx.Done():
return
case <-sub.done:
if len(sub.pieces) == 0 {
return
}
logger.Debugf("sub has been closed, there are still has %d pieces waiting to be sent", len(sub.pieces))
sub.cond.L.Lock()
sub.sendPieces()
sub.cond.L.Unlock()
default:
sub.cond.L.Lock()
for len(sub.pieces) == 0 && !sub.closed.Load() {
sub.cond.Wait()
}
sub.sendPieces()
sub.cond.L.Unlock()
}
}
}
func (sub *subscriber) sendPieces() {
pieceNums := make([]uint32, 0, len(sub.pieces))
for pieceNum := range sub.pieces {
pieceNums = append(pieceNums, pieceNum)
}
sort.Slice(pieceNums, func(i, j int) bool {
return pieceNums[i] < pieceNums[j]
})
for _, pieceNum := range pieceNums {
logger.Debugf("subscriber %s send %d piece info of taskID %s", sub.scheduler, pieceNum, sub.taskID)
sub.pieceChan <- sub.pieces[pieceNum]
delete(sub.pieces, pieceNum)
}
}
func (sub *subscriber) Notify(seedPiece *task.PieceInfo) {
logger.Debugf("notifies subscriber %s about %d piece info of taskID %s", sub.scheduler, seedPiece.PieceNum, sub.taskID)
sub.cond.L.Lock()
sub.pieces[seedPiece.PieceNum] = seedPiece
sub.cond.L.Unlock()
sub.cond.Signal()
}
func (sub *subscriber) Receiver() <-chan *task.PieceInfo {
return sub.pieceChan
}
func (sub *subscriber) Close() {
sub.once.Do(func() {
logger.Debugf("close subscriber %s from taskID %s", sub.scheduler, sub.taskID)
sub.closed.CAS(false, true)
sub.cond.Signal()
close(sub.done)
})
}
type publisher struct {
taskID string
subscribers *list.List
}
func newProgressPublisher(taskID string) *publisher {
return &publisher{
taskID: taskID,
subscribers: list.New(),
}
}
func (pub *publisher) AddSubscriber(sub *subscriber) {
pub.subscribers.PushBack(sub)
logger.Debugf("subscriber %s has been added into subscribers of publisher %s, list size is %d", sub.scheduler, sub.taskID, pub.subscribers.Len())
}
func (pub *publisher) RemoveSubscriber(sub *subscriber) {
sub.Close()
for e := pub.subscribers.Front(); e != nil; e = e.Next() {
if e.Value == sub {
pub.subscribers.Remove(e)
logger.Debugf("subscriber %s has been removed from subscribers of publisher %s, list size is %d", sub.scheduler, sub.taskID, pub.subscribers.Len())
return
}
}
}
func (pub *publisher) NotifySubscribers(seedPiece *task.PieceInfo) {
for e := pub.subscribers.Front(); e != nil; e = e.Next() {
e.Value.(*subscriber).Notify(seedPiece)
}
}
func (pub *publisher) RemoveAllSubscribers() {
var next *list.Element
for e := pub.subscribers.Front(); e != nil; e = next {
next = e.Next()
pub.RemoveSubscriber(e.Value.(*subscriber))
}
}

View File

@ -0,0 +1,152 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package progress
import (
"context"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/util/rangeutils"
)
func Test_publisher_NotifySubscribers(t *testing.T) {
assert := assert.New(t)
publisher := newProgressPublisher("testTask")
notifyPieces := []*task.PieceInfo{
{
PieceNum: 0,
PieceMd5: "pieceMd51",
PieceRange: &rangeutils.Range{
StartIndex: 0,
EndIndex: 99,
},
OriginRange: &rangeutils.Range{
StartIndex: 0,
EndIndex: 99,
},
PieceLen: 100,
PieceStyle: 0,
}, {
PieceNum: 1,
PieceMd5: "pieceMd52",
PieceRange: &rangeutils.Range{
StartIndex: 100,
EndIndex: 199,
},
OriginRange: &rangeutils.Range{
StartIndex: 100,
EndIndex: 199,
},
PieceLen: 100,
PieceStyle: 0,
},
}
wg := sync.WaitGroup{}
sub1 := newProgressSubscriber(context.Background(), "client1", "testTask", nil)
publisher.AddSubscriber(sub1)
sub2 := newProgressSubscriber(context.Background(), "client2", "testTask", nil)
publisher.AddSubscriber(sub2)
additionPieceInfo1 := &task.PieceInfo{
PieceNum: 100,
PieceMd5: "xxxxx",
PieceRange: &rangeutils.Range{},
OriginRange: &rangeutils.Range{},
PieceLen: 0,
PieceStyle: 0,
}
sub3 := newProgressSubscriber(context.Background(), "client3", "taskTask", map[uint32]*task.PieceInfo{
100: additionPieceInfo1,
})
additionPieceInfo2 := &task.PieceInfo{
PieceNum: 200,
PieceMd5: "xxxxx",
PieceRange: &rangeutils.Range{},
OriginRange: &rangeutils.Range{},
PieceLen: 0,
PieceStyle: 0,
}
publisher.AddSubscriber(sub3)
sub4 := newProgressSubscriber(context.Background(), "client4", "taskTask", map[uint32]*task.PieceInfo{
100: additionPieceInfo1,
200: additionPieceInfo2,
})
publisher.AddSubscriber(sub4)
chan1 := sub1.Receiver()
chan2 := sub2.Receiver()
chan3 := sub3.Receiver()
chan4 := sub4.Receiver()
wg.Add(1)
go func(pieceChan <-chan *task.PieceInfo) {
defer wg.Done()
var pieceCount = 0
for info := range pieceChan {
pieceCount++
assert.EqualValues(notifyPieces[info.PieceNum], info)
}
assert.Equal(2, pieceCount)
}(chan1)
wg.Add(1)
go func(pieceChan <-chan *task.PieceInfo) {
defer wg.Done()
var pieceCount = 0
for info := range pieceChan {
pieceCount++
assert.EqualValues(notifyPieces[info.PieceNum], info)
}
assert.Equal(2, pieceCount)
}(chan2)
wg.Add(1)
go func(pieceChan <-chan *task.PieceInfo) {
defer wg.Done()
var pieceCount = 0
for info := range pieceChan {
pieceCount++
if info.PieceNum == 100 {
assert.EqualValues(additionPieceInfo1, info)
} else {
assert.EqualValues(notifyPieces[info.PieceNum], info)
}
}
}(chan3)
wg.Add(1)
go func(pieceChan <-chan *task.PieceInfo) {
defer wg.Done()
var pieceCount = 0
for info := range pieceChan {
pieceCount++
if info.PieceNum == 100 {
assert.EqualValues(additionPieceInfo1, info)
} else if info.PieceNum == 200 {
assert.EqualValues(additionPieceInfo2, info)
} else {
assert.EqualValues(notifyPieces[info.PieceNum], info)
}
}
assert.Equal(4, pieceCount)
}(chan4)
for i := range notifyPieces {
publisher.NotifySubscribers(notifyPieces[i])
}
publisher.RemoveAllSubscribers()
wg.Wait()
}

View File

@ -1,48 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//go:generate mockgen -destination ./mock/mock_progress_mgr.go -package mock d7y.io/dragonfly/v2/cdn/supervisor SeedProgressMgr
package supervisor
import (
"context"
"d7y.io/dragonfly/v2/cdn/types"
)
// SeedProgressMgr as an interface defines all operations about seed progress
type SeedProgressMgr interface {
// InitSeedProgress init task seed progress
InitSeedProgress(ctx context.Context, taskID string)
// WatchSeedProgress watch task seed progress
WatchSeedProgress(ctx context.Context, taskID string) (<-chan *types.SeedPiece, error)
// PublishPiece publish piece seed
PublishPiece(ctx context.Context, taskID string, piece *types.SeedPiece) error
// PublishTask publish task seed
PublishTask(ctx context.Context, taskID string, task *types.SeedTask) error
// GetPieces get pieces by taskID
GetPieces(ctx context.Context, taskID string) (records []*types.SeedPiece, err error)
// Clear meta info of task
Clear(taskID string) error
SetTaskMgr(taskMgr SeedTaskMgr)
}

138
cdn/supervisor/service.go Normal file
View File

@ -0,0 +1,138 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package supervisor
import (
"context"
"encoding/json"
"sort"
"github.com/pkg/errors"
"d7y.io/dragonfly/v2/cdn/supervisor/cdn"
"d7y.io/dragonfly/v2/cdn/supervisor/progress"
"d7y.io/dragonfly/v2/cdn/supervisor/task"
"d7y.io/dragonfly/v2/pkg/synclock"
)
var (
// errResourcesLacked represents a lack of resources, for example, the disk does not have enough space.
errResourcesLacked = errors.New("resources lacked")
)
func IsResourcesLacked(err error) bool {
return errors.Is(err, errResourcesLacked)
}
type CDNService interface {
// RegisterSeedTask registers seed task
RegisterSeedTask(ctx context.Context, clientAddr string, registerTask *task.SeedTask) (<-chan *task.PieceInfo, error)
// GetSeedPieces returns pieces associated with taskID, which are sorted by pieceNum
GetSeedPieces(taskID string) (pieces []*task.PieceInfo, err error)
// GetSeedTask returns seed task associated with taskID
GetSeedTask(taskID string) (seedTask *task.SeedTask, err error)
}
type cdnService struct {
taskManager task.Manager
cdnManager cdn.Manager
progressManager progress.Manager
}
func NewCDNService(taskManager task.Manager, cdnManager cdn.Manager, progressManager progress.Manager) (CDNService, error) {
return &cdnService{
taskManager: taskManager,
cdnManager: cdnManager,
progressManager: progressManager,
}, nil
}
func (service *cdnService) RegisterSeedTask(ctx context.Context, clientAddr string, registerTask *task.SeedTask) (<-chan *task.PieceInfo, error) {
if _, err := service.taskManager.AddOrUpdate(registerTask); err != nil {
return nil, err
}
if err := service.triggerCdnSyncAction(ctx, registerTask.ID); err != nil {
return nil, err
}
return service.progressManager.WatchSeedProgress(ctx, clientAddr, registerTask.ID)
}
// triggerCdnSyncAction trigger cdn sync action
func (service *cdnService) triggerCdnSyncAction(ctx context.Context, taskID string) error {
seedTask, err := service.taskManager.Get(taskID)
if err != nil {
return err
}
synclock.Lock(taskID, true)
if seedTask.SourceFileLength > 0 {
if ok, err := service.cdnManager.TryFreeSpace(seedTask.SourceFileLength); err != nil {
seedTask.Log().Errorf("failed to try free space: %v", err)
} else if !ok {
return errResourcesLacked
}
}
if !seedTask.IsFrozen() {
seedTask.Log().Infof("seedTask status is %sno need trigger again", seedTask.CdnStatus)
synclock.UnLock(seedTask.ID, true)
return nil
}
synclock.UnLock(seedTask.ID, true)
synclock.Lock(seedTask.ID, false)
defer synclock.UnLock(seedTask.ID, false)
// reconfirm
if !seedTask.IsFrozen() {
seedTask.Log().Infof("reconfirm seedTask status is not frozen, no need trigger again, current status: %s", seedTask.CdnStatus)
return nil
}
seedTask.StartTrigger()
// triggerCDN goroutine
go func() {
updateTaskInfo, err := service.cdnManager.TriggerCDN(context.Background(), seedTask)
if err != nil {
seedTask.Log().Errorf("failed to trigger cdn: %v", err)
}
jsonTaskInfo, err := json.Marshal(updateTaskInfo)
if err != nil {
seedTask.Log().Errorf("failed to json marshal updateTaskInfo: %#v: %v", updateTaskInfo, err)
return
}
seedTask.Log().Infof("trigger cdn result: %s", jsonTaskInfo)
}()
return nil
}
func (service *cdnService) GetSeedPieces(taskID string) ([]*task.PieceInfo, error) {
pieceMap, err := service.taskManager.GetProgress(taskID)
if err != nil {
return nil, err
}
pieces := make([]*task.PieceInfo, 0, len(pieceMap))
for i := range pieceMap {
pieces = append(pieces, pieceMap[i])
}
sort.Slice(pieces, func(i, j int) bool {
return pieces[i].PieceNum < pieces[j].PieceNum
})
return pieces, nil
}
func (service *cdnService) GetSeedTask(taskID string) (*task.SeedTask, error) {
return service.taskManager.Get(taskID)
}

View File

@ -14,206 +14,128 @@
* limitations under the License. * limitations under the License.
*/ */
//go:generate mockgen -destination ../mocks/task/mock_task_manager.go -package task d7y.io/dragonfly/v2/cdn/supervisor/task Manager
package task package task
import ( import (
"context" "sync"
"encoding/json"
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"d7y.io/dragonfly/v2/cdn/config" "d7y.io/dragonfly/v2/cdn/config"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors" "d7y.io/dragonfly/v2/cdn/gc"
"d7y.io/dragonfly/v2/cdn/supervisor"
"d7y.io/dragonfly/v2/cdn/supervisor/gc"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/internal/dferrors"
logger "d7y.io/dragonfly/v2/internal/dflog" logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/internal/util" "d7y.io/dragonfly/v2/internal/util"
"d7y.io/dragonfly/v2/pkg/source" "d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/synclock" "d7y.io/dragonfly/v2/pkg/synclock"
"d7y.io/dragonfly/v2/pkg/syncmap"
"d7y.io/dragonfly/v2/pkg/unit" "d7y.io/dragonfly/v2/pkg/unit"
"d7y.io/dragonfly/v2/pkg/util/stringutils" "d7y.io/dragonfly/v2/pkg/util/stringutils"
) )
// Ensure that Manager implements the SeedTaskMgr and gcExecutor interfaces // Manager as an interface defines all operations against SeedTask.
var _ supervisor.SeedTaskMgr = (*Manager)(nil) // A SeedTask will store some meta info about the taskFile, pieces and something else.
var _ gc.Executor = (*Manager)(nil) // A seedTask corresponds to three files on the disk, which are identified by taskId, the data file meta file piece file
type Manager interface {
// AddOrUpdate update existing task info for the key if present.
// Otherwise, it stores and returns the given value.
// The isUpdate result is true if the value was updated, false if added.
AddOrUpdate(registerTask *SeedTask) (seedTask *SeedTask, err error)
// Get returns the task info with specified taskID, or nil if no
// value is present.
// The ok result indicates whether value was found in the taskManager.
Get(taskID string) (seedTask *SeedTask, err error)
// Update the task info with specified taskID and updateTask
Update(taskID string, updateTask *SeedTask) (err error)
// UpdateProgress update the downloaded pieces belonging to the task
UpdateProgress(taskID string, piece *PieceInfo) (err error)
// GetProgress returns the downloaded pieces belonging to the task
GetProgress(taskID string) (map[uint32]*PieceInfo, error)
// Exist check task existence with specified taskID.
// returns the task info with specified taskID, or nil if no value is present.
// The ok result indicates whether value was found in the taskManager.
Exist(taskID string) (seedTask *SeedTask, ok bool)
// Delete a task with specified taskID.
Delete(taskID string)
}
// Ensure that manager implements the Manager and gc.Executor interfaces
var (
_ Manager = (*manager)(nil)
_ gc.Executor = (*manager)(nil)
)
var ( var (
errTaskNotFound = errors.New("task is not found")
errURLUnreachable = errors.New("url is unreachable") errURLUnreachable = errors.New("url is unreachable")
errTaskIDConflict = errors.New("taskID is conflict") errTaskIDConflict = errors.New("taskID is conflict")
) )
var tracer trace.Tracer func IsTaskNotFound(err error) bool {
return errors.Is(err, errTaskNotFound)
func init() {
tracer = otel.Tracer("cdn-task-manager")
} }
// Manager is an implementation of the interface of TaskMgr. // manager is an implementation of the interface of Manager.
type Manager struct { type manager struct {
cfg *config.Config config *config.Config
taskStore *syncmap.SyncMap taskStore sync.Map
accessTimeMap *syncmap.SyncMap accessTimeMap sync.Map
taskURLUnReachableStore *syncmap.SyncMap taskURLUnreachableStore sync.Map
cdnMgr supervisor.CDNMgr
progressMgr supervisor.SeedProgressMgr
} }
// NewManager returns a new Manager Object. // NewManager returns a new Manager Object.
func NewManager(cfg *config.Config, cdnMgr supervisor.CDNMgr, progressMgr supervisor.SeedProgressMgr) (*Manager, error) { func NewManager(config *config.Config) (Manager, error) {
taskMgr := &Manager{
cfg: cfg, manager := &manager{
taskStore: syncmap.NewSyncMap(), config: config,
accessTimeMap: syncmap.NewSyncMap(),
taskURLUnReachableStore: syncmap.NewSyncMap(),
cdnMgr: cdnMgr,
progressMgr: progressMgr,
} }
progressMgr.SetTaskMgr(taskMgr)
gc.Register("task", cfg.GCInitialDelay, cfg.GCMetaInterval, taskMgr) gc.Register("task", config.GCInitialDelay, config.GCMetaInterval, manager)
return taskMgr, nil return manager, nil
} }
func (tm *Manager) Register(ctx context.Context, registerTask *types.SeedTask) (pieceChan <-chan *types.SeedPiece, err error) { func (tm *manager) AddOrUpdate(registerTask *SeedTask) (seedTask *SeedTask, err error) {
var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanTaskRegister)
defer span.End()
task, err := tm.AddOrUpdate(registerTask)
if err != nil {
span.RecordError(err)
logger.WithTaskID(registerTask.TaskID).Infof("failed to add or update task with req: %#v: %v", registerTask, err)
return nil, err
}
taskBytes, _ := json.Marshal(task)
span.SetAttributes(config.AttributeTaskInfo.String(string(taskBytes)))
task.Log().Debugf("success get task info: %#v", task)
// update accessTime for taskId
if err := tm.accessTimeMap.Add(task.TaskID, time.Now()); err != nil {
task.Log().Warnf("failed to update accessTime: %v", err)
}
// trigger CDN
if err := tm.triggerCdnSyncAction(ctx, task); err != nil {
return nil, errors.Wrapf(err, "trigger cdn")
}
task.Log().Infof("successfully trigger cdn sync action")
// watch seed progress
return tm.progressMgr.WatchSeedProgress(ctx, task.TaskID)
}
// triggerCdnSyncAction
func (tm *Manager) triggerCdnSyncAction(ctx context.Context, task *types.SeedTask) error {
var span trace.Span
ctx, span = tracer.Start(ctx, config.SpanTriggerCDNSyncAction)
defer span.End()
synclock.Lock(task.TaskID, true)
if !task.IsFrozen() {
span.SetAttributes(config.AttributeTaskStatus.String(task.CdnStatus))
task.Log().Infof("seedTask is running or has been downloaded successfully, status: %s", task.CdnStatus)
synclock.UnLock(task.TaskID, true)
return nil
}
synclock.UnLock(task.TaskID, true)
synclock.Lock(task.TaskID, false)
defer synclock.UnLock(task.TaskID, false)
// reconfirm
span.SetAttributes(config.AttributeTaskStatus.String(task.CdnStatus))
if !task.IsFrozen() {
task.Log().Infof("reconfirm find seedTask is running or has been downloaded successfully, status: %s", task.CdnStatus)
return nil
}
if task.IsWait() {
tm.progressMgr.InitSeedProgress(ctx, task.TaskID)
task.Log().Infof("successfully init seed progress for task")
}
updatedTask, err := tm.updateTask(task.TaskID, &types.SeedTask{
CdnStatus: types.TaskInfoCdnStatusRunning,
})
if err != nil {
return errors.Wrapf(err, "update task")
}
// triggerCDN goroutine
go func() {
updateTaskInfo, err := tm.cdnMgr.TriggerCDN(context.Background(), task)
if err != nil {
task.Log().Errorf("trigger cdn get error: %v", err)
}
updatedTask, err = tm.updateTask(task.TaskID, updateTaskInfo)
go func() {
if err := tm.progressMgr.PublishTask(ctx, task.TaskID, updatedTask); err != nil {
task.Log().Errorf("failed to publish task: %v", err)
}
}()
if err != nil {
task.Log().Errorf("failed to update task: %v", err)
}
task.Log().Infof("successfully update task cdn updatedTask: %#v", updatedTask)
}()
return nil
}
func (tm *Manager) getTask(taskID string) (*types.SeedTask, error) {
if stringutils.IsBlank(taskID) {
return nil, errors.Wrap(cdnerrors.ErrInvalidValue, "taskID is empty")
}
v, err := tm.taskStore.Get(taskID)
if err != nil {
if errors.Cause(err) == dferrors.ErrDataNotFound {
return nil, errors.Wrapf(cdnerrors.ErrDataNotFound, "task not found")
}
return nil, err
}
// type assertion
if info, ok := v.(*types.SeedTask); ok {
return info, nil
}
return nil, errors.Wrapf(cdnerrors.ErrConvertFailed, "origin object: %#v", v)
}
func (tm *Manager) AddOrUpdate(registerTask *types.SeedTask) (seedTask *types.SeedTask, err error) {
defer func() { defer func() {
if err != nil { if err != nil {
tm.accessTimeMap.Store(registerTask.TaskID, time.Now()) tm.accessTimeMap.Store(registerTask.ID, time.Now())
} }
}() }()
synclock.Lock(registerTask.TaskID, true) synclock.Lock(registerTask.ID, true)
if unreachableTime, ok := tm.getTaskUnreachableTime(registerTask.TaskID); ok { if unreachableTime, ok := tm.getTaskUnreachableTime(registerTask.ID); ok {
if time.Since(unreachableTime) < tm.cfg.FailAccessInterval { if time.Since(unreachableTime) < tm.config.FailAccessInterval {
synclock.UnLock(registerTask.TaskID, true) synclock.UnLock(registerTask.ID, true)
// TODO 校验Header // TODO 校验Header
return nil, errURLUnreachable return nil, errURLUnreachable
} }
logger.Debugf("delete taskID: %s from unreachable url list", registerTask.TaskID) logger.Debugf("delete taskID: %s from unreachable url list", registerTask.ID)
tm.taskURLUnReachableStore.Delete(registerTask.TaskID) tm.taskURLUnreachableStore.Delete(registerTask.ID)
} }
actual, loaded := tm.taskStore.LoadOrStore(registerTask.TaskID, registerTask) actual, loaded := tm.taskStore.LoadOrStore(registerTask.ID, registerTask)
seedTask = actual.(*types.SeedTask) seedTask = actual.(*SeedTask)
if loaded && !IsSame(seedTask, registerTask) { if loaded && !IsSame(seedTask, registerTask) {
synclock.UnLock(registerTask.TaskID, true) synclock.UnLock(registerTask.ID, true)
return nil, errors.Wrapf(errTaskIDConflict, "register task %#v is conflict with exist task %#v", registerTask, seedTask) return nil, errors.Wrapf(errTaskIDConflict, "register task %#v is conflict with exist task %#v", registerTask, seedTask)
} }
if seedTask.SourceFileLength != source.UnknownSourceFileLen { if seedTask.SourceFileLength != source.UnknownSourceFileLen {
synclock.UnLock(registerTask.TaskID, true) synclock.UnLock(registerTask.ID, true)
return seedTask, nil return seedTask, nil
} }
synclock.UnLock(registerTask.TaskID, true) synclock.UnLock(registerTask.ID, true)
synclock.Lock(registerTask.TaskID, false) synclock.Lock(registerTask.ID, false)
defer synclock.UnLock(registerTask.TaskID, false) defer synclock.UnLock(registerTask.ID, false)
if seedTask.SourceFileLength != source.UnknownSourceFileLen { if seedTask.SourceFileLength != source.UnknownSourceFileLen {
return seedTask, nil return seedTask, nil
} }
// get sourceContentLength with req.Header // get sourceContentLength with req.Header
contentLengthRequest, err := source.NewRequestWithHeader(registerTask.URL, registerTask.Header) contentLengthRequest, err := source.NewRequestWithHeader(registerTask.RawURL, registerTask.Header)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -223,9 +145,9 @@ func (tm *Manager) AddOrUpdate(registerTask *types.SeedTask) (seedTask *types.Se
} }
sourceFileLength, err := source.GetContentLength(contentLengthRequest) sourceFileLength, err := source.GetContentLength(contentLengthRequest)
if err != nil { if err != nil {
registerTask.Log().Errorf("get url (%s) content length failed: %v", registerTask.URL, err) registerTask.Log().Errorf("get url (%s) content length failed: %v", registerTask.RawURL, err)
if source.IsResourceNotReachableError(err) { if source.IsResourceNotReachableError(err) {
tm.taskURLUnReachableStore.Store(registerTask, time.Now()) tm.taskURLUnreachableStore.Store(registerTask, time.Now())
} }
return seedTask, err return seedTask, err
} }
@ -246,34 +168,62 @@ func (tm *Manager) AddOrUpdate(registerTask *types.SeedTask) (seedTask *types.Se
return seedTask, nil return seedTask, nil
} }
func (tm Manager) Get(taskID string) (*types.SeedTask, error) { func (tm *manager) Get(taskID string) (*SeedTask, error) {
task, err := tm.getTask(taskID) synclock.Lock(taskID, true)
// update accessTime for taskID defer synclock.UnLock(taskID, true)
if err := tm.accessTimeMap.Add(taskID, time.Now()); err != nil { // only update access when get task success
logger.WithTaskID(taskID).Warnf("failed to update accessTime: %v", err) if task, ok := tm.getTask(taskID); ok {
tm.accessTimeMap.Store(taskID, time.Now())
return task, nil
} }
return task, err return nil, errTaskNotFound
} }
func (tm Manager) Exist(taskID string) (*types.SeedTask, bool) { func (tm *manager) Update(taskID string, taskInfo *SeedTask) error {
task, err := tm.getTask(taskID) synclock.Lock(taskID, false)
return task, err == nil defer synclock.UnLock(taskID, false)
}
func (tm Manager) Delete(taskID string) error { if err := tm.updateTask(taskID, taskInfo); err != nil {
tm.accessTimeMap.Delete(taskID)
tm.taskURLUnReachableStore.Delete(taskID)
tm.taskStore.Delete(taskID)
if err := tm.progressMgr.Clear(taskID); err != nil {
return err return err
} }
// only update access when update task success
tm.accessTimeMap.Store(taskID, time.Now())
return nil return nil
} }
func (tm *Manager) GetPieces(ctx context.Context, taskID string) (pieces []*types.SeedPiece, err error) { func (tm *manager) UpdateProgress(taskID string, info *PieceInfo) error {
synclock.Lock(taskID, true) synclock.Lock(taskID, false)
defer synclock.UnLock(taskID, true) defer synclock.UnLock(taskID, false)
return tm.progressMgr.GetPieces(ctx, taskID)
seedTask, ok := tm.getTask(taskID)
if !ok {
return errTaskNotFound
}
seedTask.Pieces[info.PieceNum] = info
// only update access when update task success
tm.accessTimeMap.Store(taskID, time.Now())
return nil
}
func (tm *manager) GetProgress(taskID string) (map[uint32]*PieceInfo, error) {
synclock.Lock(taskID, false)
defer synclock.UnLock(taskID, false)
seedTask, ok := tm.getTask(taskID)
if !ok {
return nil, errTaskNotFound
}
tm.accessTimeMap.Store(taskID, time.Now())
return seedTask.Pieces, nil
}
func (tm *manager) Exist(taskID string) (*SeedTask, bool) {
return tm.getTask(taskID)
}
func (tm *manager) Delete(taskID string) {
synclock.Lock(taskID, false)
defer synclock.UnLock(taskID, false)
tm.deleteTask(taskID)
} }
const ( const (
@ -282,38 +232,34 @@ const (
gcTasksTimeout = 2.0 * time.Second gcTasksTimeout = 2.0 * time.Second
) )
func (tm *Manager) GC() error { func (tm *manager) GC() error {
logger.Debugf("start the task meta gc job") logger.Info("start the task meta gc job")
var removedTaskCount int
startTime := time.Now() startTime := time.Now()
// get all taskIDs and the corresponding accessTime
taskAccessMap := tm.accessTimeMap
// range all tasks and determine whether they are expired totalTaskNums := 0
taskIDs := taskAccessMap.ListKeyAsStringSlice() removedTaskCount := 0
totalTaskNums := len(taskIDs) tm.accessTimeMap.Range(func(key, value interface{}) bool {
for _, taskID := range taskIDs { totalTaskNums++
atime, err := taskAccessMap.GetAsTime(taskID) taskID := key.(string)
if err != nil { synclock.Lock(taskID, false)
logger.GcLogger.With("type", "meta").Errorf("gc tasks: failed to get access time taskID(%s): %v", taskID, err) defer synclock.UnLock(taskID, false)
continue atime := value.(time.Time)
} if time.Since(atime) < tm.config.TaskExpireTime {
if time.Since(atime) < tm.cfg.TaskExpireTime { return true
continue
} }
// gc task memory data // gc task memory data
logger.GcLogger.With("type", "meta").Infof("gc task: start to deal with task: %s", taskID) logger.GcLogger.With("type", "meta").Infof("gc task: start to deal with task: %s", taskID)
if err := tm.Delete(taskID); err != nil { tm.deleteTask(taskID)
logger.GcLogger.With("type", "meta").Infof("gc task: failed to delete task: %s", taskID)
continue
}
removedTaskCount++ removedTaskCount++
} return true
})
// slow GC detected, report it with a log warning // slow GC detected, report it with a log warning
if timeDuring := time.Since(startTime); timeDuring > gcTasksTimeout { if timeDuring := time.Since(startTime); timeDuring > gcTasksTimeout {
logger.GcLogger.With("type", "meta").Warnf("gc tasks: %d cost: %.3f", removedTaskCount, timeDuring.Seconds()) logger.GcLogger.With("type", "meta").Warnf("gc tasks: %d cost: %.3f", removedTaskCount, timeDuring.Seconds())
} }
logger.GcLogger.With("type", "meta").Infof("gc tasks: successfully full gc task count(%d), remainder count(%d)", removedTaskCount, totalTaskNums-removedTaskCount) logger.GcLogger.With("type", "meta").Infof("%d tasks were successfully cleared, leaving %d tasks remaining", removedTaskCount,
totalTaskNums-removedTaskCount)
return nil return nil
} }

View File

@ -17,89 +17,115 @@
package task package task
import ( import (
"context" "net/url"
"os"
"testing" "testing"
"github.com/golang/mock/gomock" "github.com/golang/mock/gomock"
"github.com/stretchr/testify/suite" "github.com/jarcoal/httpmock"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"d7y.io/dragonfly/v2/cdn/config" "d7y.io/dragonfly/v2/cdn/config"
"d7y.io/dragonfly/v2/cdn/supervisor/mock" "d7y.io/dragonfly/v2/internal/util"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/internal/idgen"
"d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/source/httpprotocol"
sourcemock "d7y.io/dragonfly/v2/pkg/source/mock"
) )
func TestTaskManagerSuite(t *testing.T) { func TestMain(m *testing.M) {
suite.Run(t, new(TaskManagerTestSuite)) os.Exit(m.Run())
} }
type TaskManagerTestSuite struct { func TestIsTaskNotFound(t *testing.T) {
tm *Manager
suite.Suite
}
func (suite *TaskManagerTestSuite) TestRegister() {
dragonflyURL := "http://dragonfly.io.com?a=a&b=b&c=c"
ctrl := gomock.NewController(suite.T())
cdnMgr := mock.NewMockCDNMgr(ctrl)
progressMgr := mock.NewMockSeedProgressMgr(ctrl)
progressMgr.EXPECT().SetTaskMgr(gomock.Any()).Times(1)
tm, err := NewManager(config.New(), cdnMgr, progressMgr)
suite.Nil(err)
suite.NotNil(tm)
type args struct { type args struct {
ctx context.Context err error
req *types.TaskRegisterRequest
} }
tests := []struct { tests := []struct {
name string name string
args args args args
wantPieceChan <-chan *types.SeedPiece want bool
wantErr bool
}{ }{
{ {
name: "register_md5", name: "wrap task not found error",
args: args{ args: args{
ctx: context.Background(), err: errors.Wrap(errTaskNotFound, "wrap error"),
req: &types.TaskRegisterRequest{
URL: dragonflyURL,
TaskID: idgen.TaskID(dragonflyURL, &base.UrlMeta{Filter: "a&b", Tag: "dragonfly", Digest: "md5:f1e2488bba4d1267948d9e2f7008571c"}),
Digest: "md5:f1e2488bba4d1267948d9e2f7008571c",
Filter: []string{"a", "b"},
Header: nil,
},
}, },
wantPieceChan: nil, want: true,
wantErr: false, }, {
}, name: "wrap task two layers",
{
name: "register_sha256",
args: args{ args: args{
ctx: context.Background(), err: errors.Wrap(errors.Wrap(errTaskNotFound, "wrap error"), "wrap error again"),
req: &types.TaskRegisterRequest{
URL: dragonflyURL,
TaskID: idgen.TaskID(dragonflyURL, &base.UrlMeta{Filter: "a&b", Tag: "dragonfly", Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5"}),
Digest: "sha256:b9907b9a5ba2b0223868c201b9addfe2ec1da1b90325d57c34f192966b0a68c5",
Filter: []string{"a", "b"},
Header: nil,
},
}, },
wantPieceChan: nil, want: true,
wantErr: false, }, {
name: "native err",
args: args{
err: errTaskNotFound,
},
want: true,
}, },
} }
for _, tt := range tests { for _, tt := range tests {
suite.Run(tt.name, func() { t.Run(tt.name, func(t *testing.T) {
//gotPieceChan, err := tm.Register(tt.args.ctx, tt.args.req) if got := IsTaskNotFound(tt.args.err); got != tt.want {
// t.Errorf("IsTaskNotFound() = %v, want %v", got, tt.want)
//if (err != nil) != tt.wantErr { }
// suite.T().Errorf("Register() error = %v, wantErr %v", err, tt.wantErr)
// return
//}
//if !reflect.DeepEqual(gotPieceChan, tt.wantPieceChan) {
// suite.T().Errorf("Register() gotPieceChan = %v, want %v", gotPieceChan, tt.wantPieceChan)
//}
}) })
} }
} }
func Test_manager_Exist(t *testing.T) {
httpmock.Activate()
tm, err := NewManager(config.New())
require := require.New(t)
require.Nil(err)
ctl := gomock.NewController(t)
sourceClient := sourcemock.NewMockResourceClient(ctl)
testURL, err := url.Parse("https://dragonfly.com")
require.Nil(err)
source.UnRegister("https")
require.Nil(source.Register("https", sourceClient, httpprotocol.Adapter))
sourceClient.EXPECT().GetContentLength(source.RequestEq(testURL.String())).Return(int64(1024*1024*500+1000), nil).Times(1)
seedTask := NewSeedTask("taskID", testURL.String(), nil)
addedTask, err := tm.AddOrUpdate(seedTask)
require.Nil(err)
existTask, ok := tm.Exist("taskID")
require.True(ok)
require.EqualValues(addedTask, existTask)
require.EqualValues(1024*1024*500+1000, existTask.SourceFileLength)
require.EqualValues(1024*1024*7, existTask.PieceSize)
}
func Test_manager_AddOrUpdate(t *testing.T) {
tm, err := NewManager(config.New())
require := require.New(t)
require.Nil(err)
ctl := gomock.NewController(t)
sourceClient := sourcemock.NewMockResourceClient(ctl)
testURL, err := url.Parse("https://dragonfly.com")
require.Nil(err)
source.UnRegister("https")
require.Nil(source.Register("https", sourceClient, httpprotocol.Adapter))
sourceClient.EXPECT().GetContentLength(source.RequestEq(testURL.String())).Return(int64(1024*1024*500+1000), nil).Times(1)
registerTask := NewSeedTask("dragonfly", testURL.String(), &base.UrlMeta{
Digest: "sha256:xxxxx",
Tag: "dragonfly",
Range: "0-3",
Filter: "",
Header: map[string]string{"key1": "value1"},
})
existTask, ok := tm.Exist("dragonfly")
require.Nil(existTask)
require.False(ok)
seedTask, err := tm.AddOrUpdate(registerTask)
require.Nil(err)
existTask, ok = tm.Exist("dragonfly")
require.NotNil(existTask)
require.True(ok)
require.EqualValues(registerTask, seedTask)
require.Equal(util.ComputePieceSize(int64(1024*1024*500+1000)), uint32(seedTask.PieceSize))
require.Equal(int64(1024*1024*500+1000), seedTask.SourceFileLength)
require.EqualValues(map[string]string{"key1": "value1"}, seedTask.Header)
}

View File

@ -21,57 +21,38 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
cdnerrors "d7y.io/dragonfly/v2/cdn/errors"
"d7y.io/dragonfly/v2/cdn/types"
"d7y.io/dragonfly/v2/pkg/util/stringutils" "d7y.io/dragonfly/v2/pkg/util/stringutils"
) )
// getTaskUnreachableTime get unreachable time of task and convert it to time.Time type // updateTask updates task
func (tm *Manager) getTaskUnreachableTime(taskID string) (time.Time, bool) { func (tm *manager) updateTask(taskID string, updateTaskInfo *SeedTask) error {
unreachableTime, ok := tm.taskURLUnReachableStore.Load(taskID)
if !ok {
return time.Time{}, false
}
return unreachableTime.(time.Time), true
}
// updateTask
func (tm *Manager) updateTask(taskID string, updateTaskInfo *types.SeedTask) (*types.SeedTask, error) {
if stringutils.IsBlank(taskID) {
return nil, errors.Wrap(cdnerrors.ErrInvalidValue, "taskID is empty")
}
if updateTaskInfo == nil { if updateTaskInfo == nil {
return nil, errors.Wrap(cdnerrors.ErrInvalidValue, "updateTaskInfo is nil") return errors.New("updateTaskInfo is nil")
} }
if stringutils.IsBlank(updateTaskInfo.CdnStatus) { if stringutils.IsBlank(updateTaskInfo.CdnStatus) {
return nil, errors.Wrap(cdnerrors.ErrInvalidValue, "status of task is empty") return errors.New("status of updateTaskInfo is empty")
} }
// get origin task // get origin task
task, err := tm.getTask(taskID) task, ok := tm.getTask(taskID)
if err != nil { if !ok {
return nil, err return errTaskNotFound
} }
if !updateTaskInfo.IsSuccess() { if !updateTaskInfo.IsSuccess() {
// when the origin CDNStatus equals success, do not update it to unsuccessful
if task.IsSuccess() { if task.IsSuccess() {
return task, nil task.Log().Warnf("origin task status is success, but update task status is %s, return origin task", task.CdnStatus)
return nil
} }
// only update the task CdnStatus when the new task CDNStatus and
// the origin CDNStatus both not equals success
task.CdnStatus = updateTaskInfo.CdnStatus task.CdnStatus = updateTaskInfo.CdnStatus
return task, nil return nil
} }
// only update the task info when the new CDNStatus equals success // only update the task info when the updateTaskInfo CDNStatus equals success
// and the origin CDNStatus not equals success. // and the origin CDNStatus not equals success.
if updateTaskInfo.CdnFileLength != 0 { if updateTaskInfo.CdnFileLength > 0 {
task.CdnFileLength = updateTaskInfo.CdnFileLength task.CdnFileLength = updateTaskInfo.CdnFileLength
} }
if !stringutils.IsBlank(updateTaskInfo.SourceRealDigest) { if !stringutils.IsBlank(updateTaskInfo.SourceRealDigest) {
task.SourceRealDigest = updateTaskInfo.SourceRealDigest task.SourceRealDigest = updateTaskInfo.SourceRealDigest
} }
@ -79,36 +60,75 @@ func (tm *Manager) updateTask(taskID string, updateTaskInfo *types.SeedTask) (*t
if !stringutils.IsBlank(updateTaskInfo.PieceMd5Sign) { if !stringutils.IsBlank(updateTaskInfo.PieceMd5Sign) {
task.PieceMd5Sign = updateTaskInfo.PieceMd5Sign task.PieceMd5Sign = updateTaskInfo.PieceMd5Sign
} }
var pieceTotal int32 if updateTaskInfo.SourceFileLength >= 0 {
if updateTaskInfo.SourceFileLength > 0 { task.TotalPieceCount = updateTaskInfo.TotalPieceCount
pieceTotal = int32((updateTaskInfo.SourceFileLength + int64(task.PieceSize-1)) / int64(task.PieceSize))
task.SourceFileLength = updateTaskInfo.SourceFileLength task.SourceFileLength = updateTaskInfo.SourceFileLength
} }
if pieceTotal != 0 {
task.PieceTotal = pieceTotal
}
task.CdnStatus = updateTaskInfo.CdnStatus task.CdnStatus = updateTaskInfo.CdnStatus
return task, nil return nil
} }
// IsSame check whether the two task provided are the same // getTask get task from taskStore and convert it to *SeedTask type
func IsSame(task1, task2 *types.SeedTask) bool { func (tm *manager) getTask(taskID string) (*SeedTask, bool) {
task, ok := tm.taskStore.Load(taskID)
if !ok {
return nil, false
}
return task.(*SeedTask), true
}
func (tm *manager) deleteTask(taskID string) {
tm.accessTimeMap.Delete(taskID)
tm.taskURLUnreachableStore.Delete(taskID)
tm.taskStore.Delete(taskID)
}
// getTaskAccessTime get access time of task and convert it to time.Time type
func (tm *manager) getTaskAccessTime(taskID string) (time.Time, bool) {
access, ok := tm.accessTimeMap.Load(taskID)
if !ok {
return time.Time{}, false
}
return access.(time.Time), true
}
// getTaskUnreachableTime get unreachable time of task and convert it to time.Time type
func (tm *manager) getTaskUnreachableTime(taskID string) (time.Time, bool) {
unreachableTime, ok := tm.taskURLUnreachableStore.Load(taskID)
if !ok {
return time.Time{}, false
}
return unreachableTime.(time.Time), true
}
// IsSame check if task1 is same with task2
func IsSame(task1, task2 *SeedTask) bool {
if task1 == task2 { if task1 == task2 {
return true return true
} }
if task1.ID != task2.ID {
return false
}
if task1.TaskURL != task2.TaskURL { if task1.TaskURL != task2.TaskURL {
return false return false
} }
if !stringutils.IsBlank(task1.RequestDigest) && !stringutils.IsBlank(task2.RequestDigest) { if task1.Range != task2.Range {
if task1.RequestDigest != task2.RequestDigest { return false
return false
}
} }
if !stringutils.IsBlank(task1.RequestDigest) && !stringutils.IsBlank(task2.SourceRealDigest) { if task1.Tag != task2.Tag {
return task1.SourceRealDigest == task2.RequestDigest return false
} }
if task1.Digest != task2.Digest {
return false
}
if task1.Filter != task2.Filter {
return false
}
return true return true
} }

215
cdn/supervisor/task/task.go Normal file
View File

@ -0,0 +1,215 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package task
import (
"strings"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/util/net/urlutils"
"d7y.io/dragonfly/v2/pkg/util/rangeutils"
)
type SeedTask struct {
// ID of the task
ID string `json:"ID,omitempty"`
// RawURL is the resource's URL which user uses dfget to download. The location of URL can be anywhere, LAN or WAN.
// For image distribution, this is image layer's URL in image registry.
// The resource url is provided by dfget command line parameter.
RawURL string `json:"rawURL,omitempty"`
// TaskURL is generated from rawURL. rawURL may contain some queries or parameter, dfget will filter some queries via
// --filter parameter of dfget. The usage of it is that different rawURL may generate the same taskID.
TaskURL string `json:"taskURL,omitempty"`
// SourceFileLength is the length of the source file in bytes.
SourceFileLength int64 `json:"sourceFileLength,omitempty"`
// CdnFileLength is the length of the file stored on CDN
CdnFileLength int64 `json:"cdnFileLength,omitempty"`
// PieceSize is the size of pieces in bytes
PieceSize int32 `json:"pieceSize,omitempty"`
// CdnStatus is the status of the created task related to CDN functionality.
//
// Enum: [WAITING RUNNING FAILED SUCCESS SOURCE_ERROR]
CdnStatus string `json:"cdnStatus,omitempty"`
// TotalPieceCount is the total number of pieces
TotalPieceCount int32 `json:"totalPieceCount,omitempty"`
// SourceRealDigest when CDN finishes downloading file/image from the source location,
// the md5 sum of the source file will be calculated as the value of the SourceRealDigest.
// And it will be used to compare with RequestDigest value to check whether file is complete.
SourceRealDigest string `json:"sourceRealDigest,omitempty"`
// PieceMd5Sign Is the SHA256 signature of all pieces md5 signature
PieceMd5Sign string `json:"pieceMd5Sign,omitempty"`
// Digest checks integrity of url content, for example md5:xxx or sha256:yyy
Digest string `json:"digest,omitempty"`
// Tag identifies different task for same url, conflict with digest
Tag string `json:"tag,omitempty"`
// Range content range for url
Range string `json:"range,omitempty"`
// Filter url used to generate task id
Filter string `json:"filter,omitempty"`
// Header other url header infos
Header map[string]string `json:"header,omitempty"`
// Pieces pieces of task
Pieces map[uint32]*PieceInfo `json:"-"`
logger *logger.SugaredLoggerOnWith
}
type PieceInfo struct {
PieceNum uint32 `json:"piece_num"`
PieceMd5 string `json:"piece_md5"`
PieceRange *rangeutils.Range `json:"piece_range"`
OriginRange *rangeutils.Range `json:"origin_range"`
PieceLen uint32 `json:"piece_len"`
PieceStyle base.PieceStyle `json:"piece_style"`
}
const (
UnknownTotalPieceCount = -1
)
func NewSeedTask(taskID string, rawURL string, urlMeta *base.UrlMeta) *SeedTask {
if urlMeta == nil {
urlMeta = &base.UrlMeta{}
}
return &SeedTask{
ID: taskID,
RawURL: rawURL,
TaskURL: urlutils.FilterURLParam(rawURL, strings.Split(urlMeta.Filter, "&")),
SourceFileLength: source.UnknownSourceFileLen,
CdnFileLength: 0,
PieceSize: 0,
CdnStatus: StatusWaiting,
TotalPieceCount: UnknownTotalPieceCount,
SourceRealDigest: "",
PieceMd5Sign: "",
Digest: urlMeta.Digest,
Tag: urlMeta.Tag,
Range: urlMeta.Range,
Filter: urlMeta.Filter,
Header: urlMeta.Header,
Pieces: make(map[uint32]*PieceInfo),
logger: logger.WithTaskID(taskID),
}
}
func (task *SeedTask) Clone() *SeedTask {
cloneTask := new(SeedTask)
*cloneTask = *task
if task.Header != nil {
for key, value := range task.Header {
cloneTask.Header[key] = value
}
}
if len(task.Pieces) > 0 {
for pieceNum, piece := range task.Pieces {
cloneTask.Pieces[pieceNum] = piece
}
}
return cloneTask
}
// IsSuccess determines that whether the CDNStatus is success.
func (task *SeedTask) IsSuccess() bool {
return task.CdnStatus == StatusSuccess
}
// IsFrozen if task status is frozen
func (task *SeedTask) IsFrozen() bool {
return task.CdnStatus == StatusFailed ||
task.CdnStatus == StatusWaiting ||
task.CdnStatus == StatusSourceError
}
// IsWait if task status is wait
func (task *SeedTask) IsWait() bool {
return task.CdnStatus == StatusWaiting
}
// IsError if task status if fail
func (task *SeedTask) IsError() bool {
return task.CdnStatus == StatusFailed || task.CdnStatus == StatusSourceError
}
func (task *SeedTask) IsDone() bool {
return task.CdnStatus == StatusFailed || task.CdnStatus == StatusSuccess || task.CdnStatus == StatusSourceError
}
func (task *SeedTask) UpdateStatus(cdnStatus string) {
task.CdnStatus = cdnStatus
}
func (task *SeedTask) UpdateTaskInfo(cdnStatus, realDigest, pieceMd5Sign string, sourceFileLength, cdnFileLength int64) {
task.CdnStatus = cdnStatus
task.PieceMd5Sign = pieceMd5Sign
task.SourceRealDigest = realDigest
task.SourceFileLength = sourceFileLength
task.CdnFileLength = cdnFileLength
}
func (task *SeedTask) Log() *logger.SugaredLoggerOnWith {
if task.logger == nil {
task.logger = logger.WithTaskID(task.ID)
}
return task.logger
}
func (task *SeedTask) StartTrigger() {
task.CdnStatus = StatusRunning
task.Pieces = make(map[uint32]*PieceInfo)
}
const (
// StatusWaiting captures enum value "WAITING"
StatusWaiting string = "WAITING"
// StatusRunning captures enum value "RUNNING"
StatusRunning string = "RUNNING"
// StatusFailed captures enum value "FAILED"
StatusFailed string = "FAILED"
// StatusSuccess captures enum value "SUCCESS"
StatusSuccess string = "SUCCESS"
// StatusSourceError captures enum value "SOURCE_ERROR"
StatusSourceError string = "SOURCE_ERROR"
)
func IsEqual(task1, task2 SeedTask) bool {
return cmp.Equal(task1, task2, cmpopts.IgnoreFields(SeedTask{}, "Pieces"), cmpopts.IgnoreUnexported(SeedTask{}))
}

View File

@ -1,45 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//go:generate mockgen -destination ./mock/mock_task_mgr.go -package mock d7y.io/dragonfly/v2/cdn/supervisor SeedTaskMgr
package supervisor
import (
"context"
"d7y.io/dragonfly/v2/cdn/types"
)
// SeedTaskMgr as an interface defines all operations against SeedTask.
// A SeedTask will store some meta info about the taskFile, pieces and something else.
// A seedTask corresponds to three files on the disk, which are identified by taskId, the data file meta file piece file
type SeedTaskMgr interface {
// Register register seed task
Register(context.Context, *types.SeedTask) (pieceCh <-chan *types.SeedPiece, err error)
// Get get task Info with specified taskId.
Get(string) (*types.SeedTask, error)
// Exist check task existence with specified taskId.
Exist(string) (*types.SeedTask, bool)
// Delete delete a task.
Delete(string) error
// GetPieces
GetPieces(context.Context, string) (pieces []*types.SeedPiece, err error)
}

View File

@ -1,34 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package types
import "d7y.io/dragonfly/v2/pkg/util/rangeutils"
type SeedPiece struct {
PieceStyle PieceFormat `json:"piece_style"` // 0: PlainUnspecified
PieceNum uint32 `json:"piece_num"`
PieceMd5 string `json:"piece_md_5"`
PieceRange *rangeutils.Range `json:"piece_range"`
OriginRange *rangeutils.Range `json:"origin_range"`
PieceLen uint32 `json:"piece_len"`
}
type PieceFormat int8
const (
PlainUnspecified PieceFormat = 1
)

View File

@ -1,127 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package types
import (
"strings"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base"
"d7y.io/dragonfly/v2/pkg/source"
"d7y.io/dragonfly/v2/pkg/util/net/urlutils"
)
type SeedTask struct {
TaskID string `json:"taskId,omitempty"`
URL string `json:"url,omitempty"`
TaskURL string `json:"taskUrl,omitempty"`
SourceFileLength int64 `json:"sourceFileLength,omitempty"`
CdnFileLength int64 `json:"cdnFileLength,omitempty"`
PieceSize int32 `json:"pieceSize,omitempty"`
Header map[string]string `json:"header,omitempty"`
CdnStatus string `json:"cdnStatus,omitempty"`
PieceTotal int32 `json:"pieceTotal,omitempty"`
RequestDigest string `json:"requestDigest,omitempty"`
SourceRealDigest string `json:"sourceRealDigest,omitempty"`
Range string `json:"range,omitempty"`
PieceMd5Sign string `json:"pieceMd5Sign,omitempty"`
logger *logger.SugaredLoggerOnWith
}
const (
IllegalSourceFileLen = -100
)
func NewSeedTask(taskID string, rawURL string, urlMeta *base.UrlMeta) *SeedTask {
if urlMeta == nil {
urlMeta = &base.UrlMeta{}
}
return &SeedTask{
TaskID: taskID,
Header: urlMeta.Header,
RequestDigest: urlMeta.Digest,
URL: rawURL,
TaskURL: urlutils.FilterURLParam(rawURL, strings.Split(urlMeta.Filter, "&")),
SourceFileLength: source.UnknownSourceFileLen,
CdnFileLength: 0,
PieceSize: 0,
Range: urlMeta.Range,
CdnStatus: TaskInfoCdnStatusWaiting,
logger: logger.WithTaskID(taskID),
}
}
// IsSuccess determines that whether the CDNStatus is success.
func (task *SeedTask) IsSuccess() bool {
return task.CdnStatus == TaskInfoCdnStatusSuccess
}
// IsFrozen if task status is frozen
func (task *SeedTask) IsFrozen() bool {
return task.CdnStatus == TaskInfoCdnStatusFailed || task.CdnStatus == TaskInfoCdnStatusWaiting || task.CdnStatus == TaskInfoCdnStatusSourceError
}
// IsWait if task status is wait
func (task *SeedTask) IsWait() bool {
return task.CdnStatus == TaskInfoCdnStatusWaiting
}
// IsError if task status if fail
func (task *SeedTask) IsError() bool {
return task.CdnStatus == TaskInfoCdnStatusFailed || task.CdnStatus == TaskInfoCdnStatusSourceError
}
func (task *SeedTask) IsDone() bool {
return task.CdnStatus == TaskInfoCdnStatusFailed || task.CdnStatus == TaskInfoCdnStatusSuccess || task.CdnStatus == TaskInfoCdnStatusSourceError
}
func (task *SeedTask) UpdateStatus(cdnStatus string) {
task.CdnStatus = cdnStatus
}
func (task *SeedTask) UpdateTaskInfo(cdnStatus, realDigest, pieceMd5Sign string, sourceFileLength, cdnFileLength int64) {
task.CdnStatus = cdnStatus
task.PieceMd5Sign = pieceMd5Sign
task.SourceRealDigest = realDigest
task.SourceFileLength = sourceFileLength
task.CdnFileLength = cdnFileLength
}
func (task *SeedTask) Log() *logger.SugaredLoggerOnWith {
if task.logger == nil {
task.logger = logger.WithTaskID(task.TaskID)
}
return task.logger
}
const (
// TaskInfoCdnStatusWaiting captures enum value "WAITING"
TaskInfoCdnStatusWaiting string = "WAITING"
// TaskInfoCdnStatusRunning captures enum value "RUNNING"
TaskInfoCdnStatusRunning string = "RUNNING"
// TaskInfoCdnStatusFailed captures enum value "FAILED"
TaskInfoCdnStatusFailed string = "FAILED"
// TaskInfoCdnStatusSuccess captures enum value "SUCCESS"
TaskInfoCdnStatusSuccess string = "SUCCESS"
// TaskInfoCdnStatusSourceError captures enum value "SOURCE_ERROR"
TaskInfoCdnStatusSourceError string = "SOURCE_ERROR"
)

View File

@ -1,26 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package types
// TaskRegisterRequest
type TaskRegisterRequest struct {
URL string `json:"rawURL,omitempty"`
TaskID string `json:"taskId,omitempty"`
Digest string `json:"digest,omitempty"`
Filter []string `json:"filter,omitempty"`
Header map[string]string `json:"header,omitempty"`
}

View File

@ -102,6 +102,6 @@ func runCdnSystem() error {
return err return err
} }
dependency.SetupQuitSignalHandler(func() { svr.Stop() }) dependency.SetupQuitSignalHandler(func() { logger.Fatalf("stop server failed: %v", svr.Stop()) })
return svr.Serve() return svr.Serve()
} }

View File

@ -59,6 +59,12 @@ func TestComputePieceSize(t *testing.T) {
length: 3100 * 1024 * 1024, length: 3100 * 1024 * 1024,
}, },
want: DefaultPieceSizeLimit, want: DefaultPieceSizeLimit,
}, {
name: "500M+ length",
args: args{
length: 552562021,
},
want: DefaultPieceSize + 3*1024*1024,
}, },
} }
for _, tt := range tests { for _, tt := range tests {

View File

@ -1,230 +0,0 @@
/*
* Copyright 2020 The Dragonfly Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package syncmap
import (
"container/list"
"strconv"
"sync"
"time"
"github.com/pkg/errors"
"go.uber.org/atomic"
"d7y.io/dragonfly/v2/internal/dferrors"
"d7y.io/dragonfly/v2/pkg/util/stringutils"
)
// SyncMap is a thread-safe map providing generic support
type SyncMap struct {
*sync.Map
}
// NewSyncMap returns a new SyncMap.
func NewSyncMap() *SyncMap {
return &SyncMap{&sync.Map{}}
}
// Add adds a key-value pair into the *sync.Map.
// The ErrEmptyValue error will be returned if the key is empty.
func (mmap *SyncMap) Add(key string, value interface{}) error {
if stringutils.IsBlank(key) {
return errors.Wrap(dferrors.ErrEmptyValue, "key")
}
mmap.Store(key, value)
return nil
}
// Get returns result as interface{} according to the key.
// The ErrEmptyValue error will be returned if the key is empty.
// And the ErrDataNotFound error will be returned if the key cannot be found.
func (mmap *SyncMap) Get(key string) (interface{}, error) {
if stringutils.IsBlank(key) {
return nil, errors.Wrap(dferrors.ErrEmptyValue, "key")
}
if v, ok := mmap.Load(key); ok {
return v, nil
}
return nil, errors.Wrapf(dferrors.ErrDataNotFound, "get key %s from map", key)
}
// GetAsMap returns result as SyncMap.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsMap(key string) (*SyncMap, error) {
v, err := mmap.Get(key)
if err != nil {
return nil, errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(*SyncMap); ok {
return value, nil
}
return nil, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsList returns result as list
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsList(key string) (*list.List, error) {
v, err := mmap.Get(key)
if err != nil {
return list.New(), errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(*list.List); ok {
return value, nil
}
return nil, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsInt returns result as int.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsInt(key string) (int, error) {
v, err := mmap.Get(key)
if err != nil {
return 0, errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(int); ok {
return value, nil
}
return 0, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsInt64 returns result as int64.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsInt64(key string) (int64, error) {
v, err := mmap.Get(key)
if err != nil {
return 0, errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(int64); ok {
return value, nil
}
return 0, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsString returns result as string.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsString(key string) (string, error) {
v, err := mmap.Get(key)
if err != nil {
return "", errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(string); ok {
return value, nil
}
return "", errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsBool returns result as bool.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsBool(key string) (bool, error) {
v, err := mmap.Get(key)
if err != nil {
return false, errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(bool); ok {
return value, nil
}
return false, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsAtomicInt returns result as *AtomicInt.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsAtomicInt(key string) (*atomic.Int32, error) {
v, err := mmap.Get(key)
if err != nil {
return nil, errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(*atomic.Int32); ok {
return value, nil
}
return nil, errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// GetAsTime returns result as Time.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsTime(key string) (time.Time, error) {
v, err := mmap.Get(key)
if err != nil {
return time.Now(), errors.Wrapf(err, "get key %s from map", key)
}
if value, ok := v.(time.Time); ok {
return value, nil
}
return time.Now(), errors.Wrapf(dferrors.ErrConvertFailed, "get key %s from map with value %s", key, v)
}
// Remove deletes the key-value pair from the mmap.
// The ErrEmptyValue error will be returned if the key is empty.
// And the ErrDataNotFound error will be returned if the key cannot be found.
func (mmap *SyncMap) Remove(key string) error {
if stringutils.IsBlank(key) {
return errors.Wrap(dferrors.ErrEmptyValue, "key")
}
if _, ok := mmap.Load(key); !ok {
return errors.Wrapf(dferrors.ErrDataNotFound, "get key %s from map", key)
}
mmap.Delete(key)
return nil
}
// ListKeyAsStringSlice returns the list of keys as a string slice.
func (mmap *SyncMap) ListKeyAsStringSlice() (result []string) {
if mmap == nil {
return []string{}
}
rangeFunc := func(key, value interface{}) bool {
if v, ok := key.(string); ok {
result = append(result, v)
return true
}
return true
}
mmap.Range(rangeFunc)
return
}
// ListKeyAsIntSlice returns the list of keys as an int slice.
func (mmap *SyncMap) ListKeyAsIntSlice() (result []int) {
if mmap == nil {
return []int{}
}
rangeFunc := func(key, value interface{}) bool {
if v, ok := key.(string); ok {
if value, err := strconv.Atoi(v); err == nil {
result = append(result, value)
return true
}
}
return true
}
mmap.Range(rangeFunc)
return
}

View File

@ -14,14 +14,11 @@
* limitations under the License. * limitations under the License.
*/ */
package maputils package gc
// DeepCopy copies the src to dst and return a non-nil dst map. type Config struct {
func DeepCopy(src map[string]string) map[string]string { }
dst := make(map[string]string)
func (config Config) applyDefaults() Config {
for k, v := range src { return config
dst[k] = v
}
return dst
} }

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
// Package timeutils provides utilities supplementing the standard 'time' package. // Package structutils provides utilities supplementing the standard 'time' package.
package structutils package structutils
import ( import (

View File

@ -42,7 +42,7 @@ type TaskManager interface {
Get(string) (*Task, bool) Get(string) (*Task, bool)
// Delete task // Delete task
Delete(string) Delete(string)
// Get or add task // GetOrAdd or add task
GetOrAdd(*Task) (*Task, bool) GetOrAdd(*Task) (*Task, bool)
} }