Fix golang lint (#249)

Signed-off-by: Gaius <gaius.qi@gmail.com>
This commit is contained in:
Gaius 2021-05-24 16:57:36 +08:00
parent 59f227abf2
commit f9b4d77bb2
No known key found for this signature in database
GPG Key ID: 8B4E5D1290FA2FFB
68 changed files with 404 additions and 345 deletions

View File

@ -25,7 +25,7 @@ jobs:
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Golangci lint - name: Golangci lint
uses: golangci/golangci-lint-action@v2 uses: golangci/golangci-lint-action@v2.5.2
with: with:
version: latest version: latest

29
.gitignore vendored
View File

@ -31,3 +31,32 @@ mysql
!.vscode/launch.json !.vscode/launch.json
!.vscode/extensions.json !.vscode/extensions.json
*.code-workspace *.code-workspace
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk

View File

@ -4,7 +4,7 @@ run:
linters-settings: linters-settings:
gocyclo: gocyclo:
min-complexity: 20 min-complexity: 40
linters: linters:
disable-all: true disable-all: true

View File

@ -48,11 +48,11 @@ func newCacheDataManager(storeMgr storage.Manager) *cacheDataManager {
// writeFileMetaDataByTask stores the metadata of task by task to storage. // writeFileMetaDataByTask stores the metadata of task by task to storage.
func (mm *cacheDataManager) writeFileMetaDataByTask(ctx context.Context, task *types.SeedTask) (*storage.FileMetaData, error) { func (mm *cacheDataManager) writeFileMetaDataByTask(ctx context.Context, task *types.SeedTask) (*storage.FileMetaData, error) {
mm.cacheLocker.Lock(task.TaskId, false) mm.cacheLocker.Lock(task.TaskID, false)
defer mm.cacheLocker.UnLock(task.TaskId, false) defer mm.cacheLocker.UnLock(task.TaskID, false)
metaData := &storage.FileMetaData{ metaData := &storage.FileMetaData{
TaskId: task.TaskId, TaskID: task.TaskID,
TaskURL: task.TaskUrl, TaskURL: task.TaskURL,
PieceSize: task.PieceSize, PieceSize: task.PieceSize,
SourceFileLen: task.SourceFileLength, SourceFileLen: task.SourceFileLength,
AccessTime: getCurrentTimeMillisFunc(), AccessTime: getCurrentTimeMillisFunc(),
@ -60,7 +60,7 @@ func (mm *cacheDataManager) writeFileMetaDataByTask(ctx context.Context, task *t
TotalPieceCount: task.PieceTotal, TotalPieceCount: task.PieceTotal,
} }
if err := mm.storage.WriteFileMetaData(ctx, task.TaskId, metaData); err != nil { if err := mm.storage.WriteFileMetaData(ctx, task.TaskID, metaData); err != nil {
return nil, errors.Wrapf(err, "failed to write file metadata to storage") return nil, errors.Wrapf(err, "failed to write file metadata to storage")
} }
@ -197,8 +197,8 @@ func (mm *cacheDataManager) readDownloadFile(ctx context.Context, taskID string)
} }
func (mm *cacheDataManager) resetRepo(ctx context.Context, task *types.SeedTask) error { func (mm *cacheDataManager) resetRepo(ctx context.Context, task *types.SeedTask) error {
mm.cacheLocker.Lock(task.TaskId, false) mm.cacheLocker.Lock(task.TaskID, false)
defer mm.cacheLocker.UnLock(task.TaskId, false) defer mm.cacheLocker.UnLock(task.TaskID, false)
return mm.storage.ResetRepo(ctx, task) return mm.storage.ResetRepo(ctx, task)
} }

View File

@ -65,7 +65,7 @@ func (cd *cacheDetector) detectCache(ctx context.Context, task *types.SeedTask)
//} //}
result, err := cd.doDetect(ctx, task) result, err := cd.doDetect(ctx, task)
if err != nil { if err != nil {
logger.WithTaskID(task.TaskId).Infof("failed to detect cache, reset cache: %v", err) logger.WithTaskID(task.TaskID).Infof("failed to detect cache, reset cache: %v", err)
metaData, err := cd.resetCache(ctx, task) metaData, err := cd.resetCache(ctx, task)
if err == nil { if err == nil {
result = &cacheResult{ result = &cacheResult{
@ -75,46 +75,46 @@ func (cd *cacheDetector) detectCache(ctx context.Context, task *types.SeedTask)
} }
return result, err return result, err
} }
if err := cd.cacheDataManager.updateAccessTime(ctx, task.TaskId, getCurrentTimeMillisFunc()); err != nil { if err := cd.cacheDataManager.updateAccessTime(ctx, task.TaskID, getCurrentTimeMillisFunc()); err != nil {
logger.WithTaskID(task.TaskId).Warnf("failed to update task access time ") logger.WithTaskID(task.TaskID).Warnf("failed to update task access time ")
} }
return result, nil return result, nil
} }
// detectCache the actual detect action which detects file metaData and pieces metaData of specific task // detectCache the actual detect action which detects file metaData and pieces metaData of specific task
func (cd *cacheDetector) doDetect(ctx context.Context, task *types.SeedTask) (result *cacheResult, err error) { func (cd *cacheDetector) doDetect(ctx context.Context, task *types.SeedTask) (result *cacheResult, err error) {
fileMetaData, err := cd.cacheDataManager.readFileMetaData(ctx, task.TaskId) fileMetaData, err := cd.cacheDataManager.readFileMetaData(ctx, task.TaskID)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to read file meta data") return nil, errors.Wrapf(err, "failed to read file meta data")
} }
if err := checkSameFile(task, fileMetaData); err != nil { if err := checkSameFile(task, fileMetaData); err != nil {
return nil, errors.Wrapf(err, "task does not match meta information of task file") return nil, errors.Wrapf(err, "task does not match meta information of task file")
} }
expired, err := cd.resourceClient.IsExpired(task.Url, task.Header, fileMetaData.ExpireInfo) expired, err := cd.resourceClient.IsExpired(task.URL, task.Header, fileMetaData.ExpireInfo)
if err != nil { if err != nil {
// 如果获取失败,则认为没有过期,防止打爆源 // 如果获取失败,则认为没有过期,防止打爆源
logger.WithTaskID(task.TaskId).Errorf("failed to check if the task expired: %v", err) logger.WithTaskID(task.TaskID).Errorf("failed to check if the task expired: %v", err)
} }
logger.WithTaskID(task.TaskId).Debugf("task expired result: %t", expired) logger.WithTaskID(task.TaskID).Debugf("task expired result: %t", expired)
if expired { if expired {
return nil, errors.Wrapf(cdnerrors.ErrResourceExpired, "url:%s, expireInfo:%+v", task.Url, return nil, errors.Wrapf(cdnerrors.ErrResourceExpired, "url:%s, expireInfo:%+v", task.URL,
fileMetaData.ExpireInfo) fileMetaData.ExpireInfo)
} }
// not expired // not expired
if fileMetaData.Finish { if fileMetaData.Finish {
// quickly detect the cache situation through the meta data // quickly detect the cache situation through the meta data
return cd.parseByReadMetaFile(ctx, task.TaskId, fileMetaData) return cd.parseByReadMetaFile(ctx, task.TaskID, fileMetaData)
} }
// check if the resource supports range request. if so, // check if the resource supports range request. if so,
// detect the cache situation by reading piece meta and data file // detect the cache situation by reading piece meta and data file
supportRange, err := cd.resourceClient.IsSupportRange(task.Url, task.Header) supportRange, err := cd.resourceClient.IsSupportRange(task.URL, task.Header)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to check if url(%s) supports range request", task.Url) return nil, errors.Wrapf(err, "failed to check if url(%s) supports range request", task.URL)
} }
if !supportRange { if !supportRange {
return nil, errors.Wrapf(cdnerrors.ErrResourceNotSupportRangeRequest, "url:%s", task.Url) return nil, errors.Wrapf(cdnerrors.ErrResourceNotSupportRangeRequest, "url:%s", task.URL)
} }
return cd.parseByReadFile(ctx, task.TaskId, fileMetaData) return cd.parseByReadFile(ctx, task.TaskID, fileMetaData)
} }
// parseByReadMetaFile detect cache by read meta and pieceMeta files of task // parseByReadMetaFile detect cache by read meta and pieceMeta files of task

View File

@ -42,12 +42,12 @@ func checkSameFile(task *types.SeedTask, metaData *storage.FileMetaData) error {
task.PieceSize) task.PieceSize)
} }
if metaData.TaskId != task.TaskId { if metaData.TaskID != task.TaskID {
return errors.Errorf("meta task TaskId(%s) is not equals with task TaskId(%s)", metaData.TaskId, task.TaskId) return errors.Errorf("meta task TaskId(%s) is not equals with task TaskId(%s)", metaData.TaskID, task.TaskID)
} }
if metaData.TaskURL != task.TaskUrl { if metaData.TaskURL != task.TaskURL {
return errors.Errorf("meta task taskUrl(%s) is not equals with task taskUrl(%s)", metaData.TaskURL, task.Url) return errors.Errorf("meta task taskUrl(%s) is not equals with task taskUrl(%s)", metaData.TaskURL, task.URL)
} }
if !stringutils.IsBlank(metaData.SourceRealMd5) && !stringutils.IsBlank(task.RequestMd5) && if !stringutils.IsBlank(metaData.SourceRealMd5) && !stringutils.IsBlank(task.RequestMd5) &&
metaData.SourceRealMd5 != task.RequestMd5 { metaData.SourceRealMd5 != task.RequestMd5 {

View File

@ -28,7 +28,7 @@ import (
) )
type protocolContent struct { type protocolContent struct {
TaskId string TaskID string
pieceNum int32 pieceNum int32
pieceSize int32 pieceSize int32
pieceContent *bytes.Buffer pieceContent *bytes.Buffer
@ -80,13 +80,13 @@ func (cw *cacheWriter) startWriter(ctx context.Context, reader io.Reader, task *
if int(pieceContLeft) <= n { if int(pieceContLeft) <= n {
bb.Write(buf[:pieceContLeft]) bb.Write(buf[:pieceContLeft])
pc := &protocolContent{ pc := &protocolContent{
TaskId: task.TaskId, TaskID: task.TaskID,
pieceNum: curPieceNum, pieceNum: curPieceNum,
pieceSize: task.PieceSize, pieceSize: task.PieceSize,
pieceContent: bb, pieceContent: bb,
} }
jobCh <- pc jobCh <- pc
logger.WithTaskID(task.TaskId).Debugf("send protocolContent to jobCh, pieceNum: %d", curPieceNum) logger.WithTaskID(task.TaskID).Debugf("send protocolContent to jobCh, pieceNum: %d", curPieceNum)
curPieceNum++ curPieceNum++
// write the data left to a new buffer // write the data left to a new buffer
@ -105,16 +105,16 @@ func (cw *cacheWriter) startWriter(ctx context.Context, reader io.Reader, task *
if err == io.EOF { if err == io.EOF {
if bb.Len() > 0 { if bb.Len() > 0 {
pc := &protocolContent{ pc := &protocolContent{
TaskId: task.TaskId, TaskID: task.TaskID,
pieceNum: curPieceNum, pieceNum: curPieceNum,
pieceSize: task.PieceSize, pieceSize: task.PieceSize,
pieceContent: bb, pieceContent: bb,
} }
jobCh <- pc jobCh <- pc
curPieceNum++ curPieceNum++
logger.WithTaskID(task.TaskId).Debugf("send the last protocolContent, pieceNum: %d", curPieceNum) logger.WithTaskID(task.TaskID).Debugf("send the last protocolContent, pieceNum: %d", curPieceNum)
} }
logger.WithTaskID(task.TaskId).Info("send all protocolContents and wait for cdnWriter") logger.WithTaskID(task.TaskID).Info("send all protocolContents and wait for cdnWriter")
break break
} }
if err != nil { if err != nil {
@ -127,12 +127,12 @@ func (cw *cacheWriter) startWriter(ctx context.Context, reader io.Reader, task *
close(jobCh) close(jobCh)
wg.Wait() wg.Wait()
storageInfo, err := cw.cacheDataManager.statDownloadFile(ctx, task.TaskId) storageInfo, err := cw.cacheDataManager.statDownloadFile(ctx, task.TaskID)
if err != nil { if err != nil {
return &downloadMetadata{backSourceLength: backSourceFileLength}, errors.Wrapf(err, "failed to get cdn file length") return &downloadMetadata{backSourceLength: backSourceFileLength}, errors.Wrapf(err, "failed to get cdn file length")
} }
pieceMd5Sign, _, err := cw.cacheDataManager.getPieceMd5Sign(ctx, task.TaskId) pieceMd5Sign, _, err := cw.cacheDataManager.getPieceMd5Sign(ctx, task.TaskID)
if err != nil { if err != nil {
return &downloadMetadata{backSourceLength: backSourceFileLength}, errors.Wrapf(err, "failed to get piece md5 sign") return &downloadMetadata{backSourceLength: backSourceFileLength}, errors.Wrapf(err, "failed to get piece md5 sign")
} }

View File

@ -69,8 +69,8 @@ func (cw *cacheWriter) writerPool(ctx context.Context, wg *sync.WaitGroup, write
pieceLen := originPieceLen // 经过处理后写到存储介质的真实长度 pieceLen := originPieceLen // 经过处理后写到存储介质的真实长度
pieceStyle := types.PlainUnspecified pieceStyle := types.PlainUnspecified
if err := cw.writeToFile(ctx, job.TaskId, waitToWriteContent, int64(job.pieceNum)*int64(job.pieceSize), pieceMd5); err != nil { if err := cw.writeToFile(ctx, job.TaskID, waitToWriteContent, int64(job.pieceNum)*int64(job.pieceSize), pieceMd5); err != nil {
logger.WithTaskID(job.TaskId).Errorf("failed to write file, pieceNum %d: %v", job.pieceNum, err) logger.WithTaskID(job.TaskID).Errorf("failed to write file, pieceNum %d: %v", job.pieceNum, err)
// todo redo the job? // todo redo the job?
continue continue
} }
@ -95,16 +95,16 @@ func (cw *cacheWriter) writerPool(ctx context.Context, wg *sync.WaitGroup, write
go func(record *storage.PieceMetaRecord) { go func(record *storage.PieceMetaRecord) {
defer wg.Done() defer wg.Done()
// todo 可以先塞入channel然后启动单独goroutine顺序写文件 // todo 可以先塞入channel然后启动单独goroutine顺序写文件
if err := cw.cacheDataManager.appendPieceMetaData(ctx, job.TaskId, record); err != nil { if err := cw.cacheDataManager.appendPieceMetaData(ctx, job.TaskID, record); err != nil {
logger.WithTaskID(job.TaskId).Errorf("failed to append piece meta data to file:%v", err) logger.WithTaskID(job.TaskID).Errorf("failed to append piece meta data to file:%v", err)
} }
}(pieceRecord) }(pieceRecord)
if cw.cdnReporter != nil { if cw.cdnReporter != nil {
if err := cw.cdnReporter.reportPieceMetaRecord(ctx, job.TaskId, pieceRecord, if err := cw.cdnReporter.reportPieceMetaRecord(ctx, job.TaskID, pieceRecord,
DownloaderReport); err != nil { DownloaderReport); err != nil {
// NOTE: should we do this job again? // NOTE: should we do this job again?
logger.WithTaskID(job.TaskId).Errorf("failed to report piece status, pieceNum %d pieceMetaRecord %s: %v", job.pieceNum, pieceRecord, err) logger.WithTaskID(job.TaskID).Errorf("failed to report piece status, pieceNum %d pieceMetaRecord %s: %v", job.pieceNum, pieceRecord, err)
continue continue
} }
} }

View File

@ -41,7 +41,7 @@ func (cm *Manager) download(task *types.SeedTask, detectResult *cacheResult) (io
headers[RangeHeaderName] = fmt.Sprintf("bytes=%s", breakRange) headers[RangeHeaderName] = fmt.Sprintf("bytes=%s", breakRange)
} }
} }
logger.WithTaskID(task.TaskId).Infof("start download url %s at range:%d-%d: with header: %+v", task.Url, detectResult.breakPoint, logger.WithTaskID(task.TaskID).Infof("start download url %s at range:%d-%d: with header: %+v", task.URL, detectResult.breakPoint,
task.SourceFileLength, task.Header) task.SourceFileLength, task.Header)
return cm.resourceClient.Download(task.Url, headers) return cm.resourceClient.Download(task.URL, headers)
} }

View File

@ -81,38 +81,38 @@ func NewManager(cfg *config.Config, cacheStore storage.Manager, progressMgr mgr.
func (cm *Manager) TriggerCDN(ctx context.Context, task *types.SeedTask) (seedTask *types.SeedTask, err error) { func (cm *Manager) TriggerCDN(ctx context.Context, task *types.SeedTask) (seedTask *types.SeedTask, err error) {
// obtain taskId write lock // obtain taskId write lock
cm.cdnLocker.Lock(task.TaskId, false) cm.cdnLocker.Lock(task.TaskID, false)
defer cm.cdnLocker.UnLock(task.TaskId, false) defer cm.cdnLocker.UnLock(task.TaskID, false)
// first: detect Cache // first: detect Cache
detectResult, err := cm.detector.detectCache(ctx, task) detectResult, err := cm.detector.detectCache(ctx, task)
if err != nil { if err != nil {
return getUpdateTaskInfoWithStatusOnly(types.TaskInfoCdnStatusFailed), errors.Wrapf(err, "failed to detect cache") return getUpdateTaskInfoWithStatusOnly(types.TaskInfoCdnStatusFailed), errors.Wrapf(err, "failed to detect cache")
} }
logger.WithTaskID(task.TaskId).Debugf("detects cache result: %+v", detectResult) logger.WithTaskID(task.TaskID).Debugf("detects cache result: %+v", detectResult)
// second: report detect result // second: report detect result
err = cm.cdnReporter.reportCache(ctx, task.TaskId, detectResult) err = cm.cdnReporter.reportCache(ctx, task.TaskID, detectResult)
if err != nil { if err != nil {
logger.WithTaskID(task.TaskId).Errorf("failed to report cache, reset detectResult:%v", err) logger.WithTaskID(task.TaskID).Errorf("failed to report cache, reset detectResult:%v", err)
} }
// full cache // full cache
if detectResult.breakPoint == -1 { if detectResult.breakPoint == -1 {
logger.WithTaskID(task.TaskId).Infof("cache full hit on local") logger.WithTaskID(task.TaskID).Infof("cache full hit on local")
return getUpdateTaskInfo(types.TaskInfoCdnStatusSuccess, detectResult.fileMetaData.SourceRealMd5, detectResult.fileMetaData.PieceMd5Sign, return getUpdateTaskInfo(types.TaskInfoCdnStatusSuccess, detectResult.fileMetaData.SourceRealMd5, detectResult.fileMetaData.PieceMd5Sign,
detectResult.fileMetaData.SourceFileLen, detectResult.fileMetaData.CdnFileLength), nil detectResult.fileMetaData.SourceFileLen, detectResult.fileMetaData.CdnFileLength), nil
} }
server.StatSeedStart(task.TaskId, task.Url) server.StatSeedStart(task.TaskID, task.URL)
start := time.Now() start := time.Now()
// third: start to download the source file // third: start to download the source file
body, expireInfo, err := cm.download(task, detectResult) body, expireInfo, err := cm.download(task, detectResult)
// download fail // download fail
if err != nil { if err != nil {
server.StatSeedFinish(task.TaskId, task.Url, false, err, start.Nanosecond(), time.Now().Nanosecond(), 0, 0) server.StatSeedFinish(task.TaskID, task.URL, false, err, start.Nanosecond(), time.Now().Nanosecond(), 0, 0)
return getUpdateTaskInfoWithStatusOnly(types.TaskInfoCdnStatusSourceError), err return getUpdateTaskInfoWithStatusOnly(types.TaskInfoCdnStatusSourceError), err
} }
defer body.Close() defer body.Close()
//update Expire info //update Expire info
cm.updateExpireInfo(ctx, task.TaskId, expireInfo) cm.updateExpireInfo(ctx, task.TaskID, expireInfo)
fileMd5 := md5.New() fileMd5 := md5.New()
if detectResult.fileMd5 != nil { if detectResult.fileMd5 != nil {
fileMd5 = detectResult.fileMd5 fileMd5 = detectResult.fileMd5
@ -121,12 +121,12 @@ func (cm *Manager) TriggerCDN(ctx context.Context, task *types.SeedTask) (seedTa
// forth: write to storage // forth: write to storage
downloadMetadata, err := cm.writer.startWriter(ctx, reader, task, detectResult) downloadMetadata, err := cm.writer.startWriter(ctx, reader, task, detectResult)
if err != nil { if err != nil {
server.StatSeedFinish(task.TaskId, task.Url, false, err, start.Nanosecond(), time.Now().Nanosecond(), downloadMetadata.backSourceLength, server.StatSeedFinish(task.TaskID, task.URL, false, err, start.Nanosecond(), time.Now().Nanosecond(), downloadMetadata.backSourceLength,
downloadMetadata.realSourceFileLength) downloadMetadata.realSourceFileLength)
logger.WithTaskID(task.TaskId).Errorf("failed to write for task: %v", err) logger.WithTaskID(task.TaskID).Errorf("failed to write for task: %v", err)
return getUpdateTaskInfoWithStatusOnly(types.TaskInfoCdnStatusFailed), err return getUpdateTaskInfoWithStatusOnly(types.TaskInfoCdnStatusFailed), err
} }
server.StatSeedFinish(task.TaskId, task.Url, true, nil, start.Nanosecond(), time.Now().Nanosecond(), downloadMetadata.backSourceLength, server.StatSeedFinish(task.TaskID, task.URL, true, nil, start.Nanosecond(), time.Now().Nanosecond(), downloadMetadata.backSourceLength,
downloadMetadata.realSourceFileLength) downloadMetadata.realSourceFileLength)
sourceMD5 := reader.Md5() sourceMD5 := reader.Md5()
// fifth: handle CDN result // fifth: handle CDN result
@ -147,7 +147,7 @@ func (cm *Manager) Delete(ctx context.Context, taskID string) error {
} }
func (cm *Manager) handleCDNResult(ctx context.Context, task *types.SeedTask, sourceMd5 string, downloadMetadata *downloadMetadata) (bool, error) { func (cm *Manager) handleCDNResult(ctx context.Context, task *types.SeedTask, sourceMd5 string, downloadMetadata *downloadMetadata) (bool, error) {
logger.WithTaskID(task.TaskId).Debugf("handle cdn result, downloadMetaData: %+v", downloadMetadata) logger.WithTaskID(task.TaskID).Debugf("handle cdn result, downloadMetaData: %+v", downloadMetadata)
var isSuccess = true var isSuccess = true
var errorMsg string var errorMsg string
// check md5 // check md5
@ -165,7 +165,7 @@ func (cm *Manager) handleCDNResult(ctx context.Context, task *types.SeedTask, so
isSuccess = false isSuccess = false
} }
if !stringutils.IsBlank(errorMsg) { if !stringutils.IsBlank(errorMsg) {
logger.WithTaskID(task.TaskId).Error(errorMsg) logger.WithTaskID(task.TaskID).Error(errorMsg)
} }
sourceFileLen := task.SourceFileLength sourceFileLen := task.SourceFileLength
if isSuccess && task.SourceFileLength <= 0 { if isSuccess && task.SourceFileLength <= 0 {
@ -177,7 +177,7 @@ func (cm *Manager) handleCDNResult(ctx context.Context, task *types.SeedTask, so
if !isSuccess { if !isSuccess {
cdnFileLength = 0 cdnFileLength = 0
} }
if err := cm.cacheDataManager.updateStatusAndResult(ctx, task.TaskId, &storage.FileMetaData{ if err := cm.cacheDataManager.updateStatusAndResult(ctx, task.TaskID, &storage.FileMetaData{
Finish: true, Finish: true,
Success: isSuccess, Success: isSuccess,
SourceRealMd5: sourceMd5, SourceRealMd5: sourceMd5,
@ -193,7 +193,7 @@ func (cm *Manager) handleCDNResult(ctx context.Context, task *types.SeedTask, so
return false, errors.New(errorMsg) return false, errors.New(errorMsg)
} }
logger.WithTaskID(task.TaskId).Infof("success to get task, downloadMetadata:%+v realMd5: %s", downloadMetadata, sourceMd5) logger.WithTaskID(task.TaskID).Infof("success to get task, downloadMetadata:%+v realMd5: %s", downloadMetadata, sourceMd5)
return true, nil return true, nil
} }

View File

@ -243,7 +243,7 @@ func (s *diskStorageMgr) DeleteTask(ctx context.Context, taskID string) error {
} }
func (s *diskStorageMgr) ResetRepo(ctx context.Context, task *types.SeedTask) error { func (s *diskStorageMgr) ResetRepo(ctx context.Context, task *types.SeedTask) error {
return s.DeleteTask(ctx, task.TaskId) return s.DeleteTask(ctx, task.TaskID)
} }
func init() { func init() {

View File

@ -251,13 +251,13 @@ func (h *hybridStorageMgr) CreateUploadLink(ctx context.Context, taskID string)
} }
func (h *hybridStorageMgr) ResetRepo(ctx context.Context, task *types.SeedTask) error { func (h *hybridStorageMgr) ResetRepo(ctx context.Context, task *types.SeedTask) error {
if err := h.deleteTaskFiles(ctx, task.TaskId, false, true); err != nil { if err := h.deleteTaskFiles(ctx, task.TaskID, false, true); err != nil {
logger.WithTaskID(task.TaskId).Errorf("reset repo: failed to delete task files: %v", err) logger.WithTaskID(task.TaskID).Errorf("reset repo: failed to delete task files: %v", err)
} }
// 判断是否有足够空间存放 // 判断是否有足够空间存放
shmPath, err := h.tryShmSpace(ctx, task.Url, task.TaskId, task.SourceFileLength) shmPath, err := h.tryShmSpace(ctx, task.URL, task.TaskID, task.SourceFileLength)
if err == nil { if err == nil {
return fileutils.SymbolicLink(shmPath, h.diskStore.GetPath(storage.GetDownloadRaw(task.TaskId))) return fileutils.SymbolicLink(shmPath, h.diskStore.GetPath(storage.GetDownloadRaw(task.TaskID)))
} }
return nil return nil
} }

View File

@ -144,7 +144,7 @@ func (cleaner *Cleaner) sortInert(ctx context.Context, gapTasks, intervalTasks *
if metaData.Interval > 0 && if metaData.Interval > 0 &&
gap <= metaData.Interval+(int64(cleaner.Cfg.IntervalThreshold.Seconds())*int64(time.Millisecond)) { gap <= metaData.Interval+(int64(cleaner.Cfg.IntervalThreshold.Seconds())*int64(time.Millisecond)) {
info, err := cleaner.StorageMgr.StatDownloadFile(ctx, metaData.TaskId) info, err := cleaner.StorageMgr.StatDownloadFile(ctx, metaData.TaskID)
if err != nil { if err != nil {
return err return err
} }
@ -154,7 +154,7 @@ func (cleaner *Cleaner) sortInert(ctx context.Context, gapTasks, intervalTasks *
v = make([]string, 0) v = make([]string, 0)
} }
tasks := v.([]string) tasks := v.([]string)
tasks = append(tasks, metaData.TaskId) tasks = append(tasks, metaData.TaskID)
intervalTasks.Put(info.Size, tasks) intervalTasks.Put(info.Size, tasks)
return nil return nil
} }
@ -164,7 +164,7 @@ func (cleaner *Cleaner) sortInert(ctx context.Context, gapTasks, intervalTasks *
v = make([]string, 0) v = make([]string, 0)
} }
tasks := v.([]string) tasks := v.([]string)
tasks = append(tasks, metaData.TaskId) tasks = append(tasks, metaData.TaskID)
gapTasks.Put(gap, tasks) gapTasks.Put(gap, tasks)
return nil return nil
} }

View File

@ -65,7 +65,7 @@ type BuildOptions interface {
// fileMetaData // fileMetaData
type FileMetaData struct { type FileMetaData struct {
TaskId string `json:"taskId"` TaskID string `json:"taskId"`
TaskURL string `json:"taskUrl"` TaskURL string `json:"taskUrl"`
PieceSize int32 `json:"pieceSize"` PieceSize int32 `json:"pieceSize"`
SourceFileLen int64 `json:"sourceFileLen"` SourceFileLen int64 `json:"sourceFileLen"`

View File

@ -72,48 +72,48 @@ func NewManager(cfg *config.Config, cdnMgr mgr.CDNMgr, progressMgr mgr.SeedProgr
func (tm *Manager) Register(ctx context.Context, req *types.TaskRegisterRequest) (pieceChan <-chan *types.SeedPiece, err error) { func (tm *Manager) Register(ctx context.Context, req *types.TaskRegisterRequest) (pieceChan <-chan *types.SeedPiece, err error) {
task, err := tm.addOrUpdateTask(ctx, req) task, err := tm.addOrUpdateTask(ctx, req)
if err != nil { if err != nil {
logger.WithTaskID(req.TaskId).Infof("failed to add or update task with req: %+v: %v", req, err) logger.WithTaskID(req.TaskID).Infof("failed to add or update task with req: %+v: %v", req, err)
return nil, err return nil, err
} }
logger.WithTaskID(task.TaskId).Debugf("success get task info: %+v", task) logger.WithTaskID(task.TaskID).Debugf("success get task info: %+v", task)
// update accessTime for taskId // update accessTime for taskId
if err := tm.accessTimeMap.Add(task.TaskId, time.Now()); err != nil { if err := tm.accessTimeMap.Add(task.TaskID, time.Now()); err != nil {
logger.WithTaskID(task.TaskId).Warnf("failed to update accessTime: %v", err) logger.WithTaskID(task.TaskID).Warnf("failed to update accessTime: %v", err)
} }
// trigger CDN // trigger CDN
if err := tm.triggerCdnSyncAction(ctx, task); err != nil { if err := tm.triggerCdnSyncAction(ctx, task); err != nil {
return nil, errors.Wrapf(err, "failed to trigger cdn") return nil, errors.Wrapf(err, "failed to trigger cdn")
} }
logger.WithTaskID(task.TaskId).Infof("successfully trigger cdn sync action") logger.WithTaskID(task.TaskID).Infof("successfully trigger cdn sync action")
// watch seed progress // watch seed progress
return tm.progressMgr.WatchSeedProgress(ctx, task.TaskId) return tm.progressMgr.WatchSeedProgress(ctx, task.TaskID)
} }
// triggerCdnSyncAction // triggerCdnSyncAction
func (tm *Manager) triggerCdnSyncAction(ctx context.Context, task *types.SeedTask) error { func (tm *Manager) triggerCdnSyncAction(ctx context.Context, task *types.SeedTask) error {
synclock.Lock(task.TaskId, true) synclock.Lock(task.TaskID, true)
if !task.IsFrozen() { if !task.IsFrozen() {
logger.WithTaskID(task.TaskId).Infof("seedTask is running or has been downloaded successfully, status:%s", task.CdnStatus) logger.WithTaskID(task.TaskID).Infof("seedTask is running or has been downloaded successfully, status:%s", task.CdnStatus)
synclock.UnLock(task.TaskId, true) synclock.UnLock(task.TaskID, true)
return nil return nil
} }
synclock.UnLock(task.TaskId, true) synclock.UnLock(task.TaskID, true)
synclock.Lock(task.TaskId, false) synclock.Lock(task.TaskID, false)
defer synclock.UnLock(task.TaskId, false) defer synclock.UnLock(task.TaskID, false)
// reconfirm // reconfirm
if !task.IsFrozen() { if !task.IsFrozen() {
logger.WithTaskID(task.TaskId).Infof("reconfirm find seedTask is running or has been downloaded successfully, status:%s", task.CdnStatus) logger.WithTaskID(task.TaskID).Infof("reconfirm find seedTask is running or has been downloaded successfully, status:%s", task.CdnStatus)
return nil return nil
} }
if task.IsWait() { if task.IsWait() {
tm.progressMgr.InitSeedProgress(ctx, task.TaskId) tm.progressMgr.InitSeedProgress(ctx, task.TaskID)
logger.WithTaskID(task.TaskId).Infof("successfully init seed progress for task") logger.WithTaskID(task.TaskID).Infof("successfully init seed progress for task")
} }
updatedTask, err := tm.updateTask(task.TaskId, &types.SeedTask{ updatedTask, err := tm.updateTask(task.TaskID, &types.SeedTask{
CdnStatus: types.TaskInfoCdnStatusRunning, CdnStatus: types.TaskInfoCdnStatusRunning,
}) })
if err != nil { if err != nil {
@ -123,15 +123,14 @@ func (tm *Manager) triggerCdnSyncAction(ctx context.Context, task *types.SeedTas
go func() { go func() {
updateTaskInfo, err := tm.cdnMgr.TriggerCDN(ctx, task) updateTaskInfo, err := tm.cdnMgr.TriggerCDN(ctx, task)
if err != nil { if err != nil {
logger.WithTaskID(task.TaskId).Errorf("trigger cdn get error: %v", err) logger.WithTaskID(task.TaskID).Errorf("trigger cdn get error: %v", err)
} }
go tm.progressMgr.PublishTask(ctx, task.TaskId, updateTaskInfo) go tm.progressMgr.PublishTask(ctx, task.TaskID, updateTaskInfo)
updatedTask, err = tm.updateTask(task.TaskId, updateTaskInfo) updatedTask, err = tm.updateTask(task.TaskID, updateTaskInfo)
if err != nil { if err != nil {
logger.WithTaskID(task.TaskId).Errorf("failed to update task:%v", err) logger.WithTaskID(task.TaskID).Errorf("failed to update task:%v", err)
} else {
logger.WithTaskID(task.TaskId).Infof("successfully update task cdn updatedTask:%+v", updatedTask)
} }
logger.WithTaskID(task.TaskID).Infof("successfully update task cdn updatedTask:%+v", updatedTask)
}() }()
return nil return nil
} }

View File

@ -40,7 +40,7 @@ func (tm *Manager) addOrUpdateTask(ctx context.Context, request *types.TaskRegis
if request.Filter != nil { if request.Filter != nil {
taskURL = urlutils.FilterURLParam(request.URL, request.Filter) taskURL = urlutils.FilterURLParam(request.URL, request.Filter)
} }
taskID := request.TaskId taskID := request.TaskID
synclock.Lock(taskID, false) synclock.Lock(taskID, false)
defer synclock.UnLock(taskID, false) defer synclock.UnLock(taskID, false)
if key, err := tm.taskURLUnReachableStore.Get(taskID); err == nil { if key, err := tm.taskURLUnReachableStore.Get(taskID); err == nil {
@ -54,11 +54,11 @@ func (tm *Manager) addOrUpdateTask(ctx context.Context, request *types.TaskRegis
} }
var task *types.SeedTask var task *types.SeedTask
newTask := &types.SeedTask{ newTask := &types.SeedTask{
TaskId: taskID, TaskID: taskID,
Header: request.Header, Header: request.Header,
RequestMd5: request.Md5, RequestMd5: request.Md5,
Url: request.URL, URL: request.URL,
TaskUrl: taskURL, TaskURL: taskURL,
CdnStatus: types.TaskInfoCdnStatusWaiting, CdnStatus: types.TaskInfoCdnStatusWaiting,
SourceFileLength: IllegalSourceFileLen, SourceFileLength: IllegalSourceFileLen,
} }
@ -80,9 +80,9 @@ func (tm *Manager) addOrUpdateTask(ctx context.Context, request *types.TaskRegis
} }
// get sourceContentLength with req.Header // get sourceContentLength with req.Header
sourceFileLength, err := tm.resourceClient.GetContentLength(task.Url, request.Header) sourceFileLength, err := tm.resourceClient.GetContentLength(task.URL, request.Header)
if err != nil { if err != nil {
logger.WithTaskID(task.TaskId).Errorf("failed to get url (%s) content length: %v", task.Url, err) logger.WithTaskID(task.TaskID).Errorf("failed to get url (%s) content length: %v", task.URL, err)
if cdnerrors.IsURLNotReachable(err) { if cdnerrors.IsURLNotReachable(err) {
tm.taskURLUnReachableStore.Add(taskID, time.Now()) tm.taskURLUnReachableStore.Add(taskID, time.Now())
@ -103,7 +103,7 @@ func (tm *Manager) addOrUpdateTask(ctx context.Context, request *types.TaskRegis
pieceSize := computePieceSize(task.SourceFileLength) pieceSize := computePieceSize(task.SourceFileLength)
task.PieceSize = pieceSize task.PieceSize = pieceSize
} }
tm.taskStore.Add(task.TaskId, task) tm.taskStore.Add(task.TaskID, task)
logger.Debugf("success add task:%+v into taskStore", task) logger.Debugf("success add task:%+v into taskStore", task)
return task, nil return task, nil
} }
@ -169,7 +169,7 @@ func isSameTask(task1, task2 *types.SeedTask) bool {
if task1 == task2 { if task1 == task2 {
return true return true
} }
if task1.TaskUrl != task2.TaskUrl { if task1.TaskURL != task2.TaskURL {
return false return false
} }

View File

@ -30,11 +30,17 @@ import (
"d7y.io/dragonfly/v2/cdnsystem/plugins" "d7y.io/dragonfly/v2/cdnsystem/plugins"
"d7y.io/dragonfly/v2/cdnsystem/server/service" "d7y.io/dragonfly/v2/cdnsystem/server/service"
"d7y.io/dragonfly/v2/cdnsystem/source" "d7y.io/dragonfly/v2/cdnsystem/source"
// Init http client
_ "d7y.io/dragonfly/v2/cdnsystem/source/httpprotocol" _ "d7y.io/dragonfly/v2/cdnsystem/source/httpprotocol"
// Init OSS client
_ "d7y.io/dragonfly/v2/cdnsystem/source/ossprotocol" _ "d7y.io/dragonfly/v2/cdnsystem/source/ossprotocol"
"d7y.io/dragonfly/v2/pkg/basic/dfnet" "d7y.io/dragonfly/v2/pkg/basic/dfnet"
"d7y.io/dragonfly/v2/pkg/rpc" "d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/server" "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/server"
// Server registered to grpc
_ "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/server" _ "d7y.io/dragonfly/v2/pkg/rpc/cdnsystem/server"
"d7y.io/dragonfly/v2/pkg/rpc/manager" "d7y.io/dragonfly/v2/pkg/rpc/manager"
configServer "d7y.io/dragonfly/v2/pkg/rpc/manager/client" configServer "d7y.io/dragonfly/v2/pkg/rpc/manager/client"

View File

@ -72,7 +72,7 @@ func constructRegisterRequest(req *cdnsystem.SeedRequest) (*types.TaskRegisterRe
Header: header, Header: header,
URL: req.Url, URL: req.Url,
Md5: header["md5"], Md5: header["md5"],
TaskId: req.TaskId, TaskID: req.TaskId,
Filter: strings.Split(req.Filter, "&"), Filter: strings.Split(req.Filter, "&"),
}, nil }, nil
} }
@ -168,11 +168,11 @@ func (css *CdnSeedServer) GetPieceTasks(ctx context.Context, req *base.PieceTask
return nil, dferrors.Newf(dfcodes.CdnError, "failed to get task(%s) from cdn: %v", req.TaskId, err) return nil, dferrors.Newf(dfcodes.CdnError, "failed to get task(%s) from cdn: %v", req.TaskId, err)
} }
if task.IsError() { if task.IsError() {
return nil, dferrors.Newf(dfcodes.CdnTaskDownloadFail, "fail to download task(%s), cdnStatus: %s", task.TaskId, task.CdnStatus) return nil, dferrors.Newf(dfcodes.CdnTaskDownloadFail, "fail to download task(%s), cdnStatus: %s", task.TaskID, task.CdnStatus)
} }
pieces, err := css.taskMgr.GetPieces(ctx, req.TaskId) pieces, err := css.taskMgr.GetPieces(ctx, req.TaskId)
if err != nil { if err != nil {
return nil, dferrors.Newf(dfcodes.CdnError, "failed to get pieces of task(%s) from cdn: %v", task.TaskId, err) return nil, dferrors.Newf(dfcodes.CdnError, "failed to get pieces of task(%s) from cdn: %v", task.TaskID, err)
} }
pieceInfos := make([]*base.PieceInfo, 0) pieceInfos := make([]*base.PieceInfo, 0)
var count int32 = 0 var count int32 = 0

View File

@ -96,7 +96,7 @@ func (osc *ossSourceClient) IsExpired(url string, header, expireInfo map[string]
} }
func (osc *ossSourceClient) Download(url string, header map[string]string) (io.ReadCloser, map[string]string, error) { func (osc *ossSourceClient) Download(url string, header map[string]string) (io.ReadCloser, map[string]string, error) {
ossObject, err := ParseOssObject(url) ossObject, err := ParseOSSObject(url)
if err != nil { if err != nil {
return nil, nil, errors.Wrapf(err, "failed to parse oss object from url:%s", url) return nil, nil, errors.Wrapf(err, "failed to parse oss object from url:%s", url)
} }
@ -153,7 +153,7 @@ func (osc *ossSourceClient) getMeta(url string, header map[string]string) (http.
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to get oss client") return nil, errors.Wrapf(err, "failed to get oss client")
} }
ossObject, err := ParseOssObject(url) ossObject, err := ParseOSSObject(url)
if err != nil { if err != nil {
return nil, errors.Wrapf(cdnerrors.ErrURLNotReachable, "failed to parse oss object: %v", err) return nil, errors.Wrapf(cdnerrors.ErrURLNotReachable, "failed to parse oss object: %v", err)
} }
@ -183,12 +183,12 @@ func getOptions(header map[string]string) []oss.Option {
return opts return opts
} }
type ossObject struct { type OSSObject struct {
bucket string bucket string
object string object string
} }
func ParseOssObject(ossURL string) (*ossObject, error) { func ParseOSSObject(ossURL string) (*OSSObject, error) {
url, err := url.Parse(ossURL) url, err := url.Parse(ossURL)
if url.Scheme != "oss" { if url.Scheme != "oss" {
return nil, fmt.Errorf("url:%s is not oss object", ossURL) return nil, fmt.Errorf("url:%s is not oss object", ossURL)
@ -196,7 +196,7 @@ func ParseOssObject(ossURL string) (*ossObject, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &ossObject{ return &OSSObject{
bucket: url.Host, bucket: url.Host,
object: url.Path[1:], object: url.Path[1:],
}, nil }, nil

View File

@ -17,9 +17,9 @@
package types package types
type SeedTask struct { type SeedTask struct {
TaskId string `json:"taskId,omitempty"` TaskID string `json:"taskId,omitempty"`
Url string `json:"url,omitempty"` URL string `json:"url,omitempty"`
TaskUrl string `json:"taskUrl,omitempty"` TaskURL string `json:"taskUrl,omitempty"`
SourceFileLength int64 `json:"sourceFileLength,omitempty"` SourceFileLength int64 `json:"sourceFileLength,omitempty"`
CdnFileLength int64 `json:"cdnFileLength,omitempty"` CdnFileLength int64 `json:"cdnFileLength,omitempty"`
PieceSize int32 `json:"pieceSize,omitempty"` PieceSize int32 `json:"pieceSize,omitempty"`

View File

@ -19,7 +19,7 @@ package types
// TaskRegisterRequest // TaskRegisterRequest
type TaskRegisterRequest struct { type TaskRegisterRequest struct {
URL string `json:"rawURL,omitempty"` URL string `json:"rawURL,omitempty"`
TaskId string `json:"taskId,omitempty"` TaskID string `json:"taskId,omitempty"`
Md5 string `json:"md5,omitempty"` Md5 string `json:"md5,omitempty"`
Filter []string `json:"filter,omitempty"` Filter []string `json:"filter,omitempty"`
Header map[string]string `json:"header,omitempty"` Header map[string]string `json:"header,omitempty"`

View File

@ -173,7 +173,7 @@ func (cfg *ClientOption) checkOutput() error {
} }
dir, _ := path.Split(cfg.Output) dir, _ := path.Split(cfg.Output)
if err := MkdirAll(dir, 0777, basic.UserId, basic.UserGroup); err != nil { if err := MkdirAll(dir, 0777, basic.UserID, basic.UserGroup); err != nil {
return err return err
} }

View File

@ -21,11 +21,11 @@ package config
import "d7y.io/dragonfly/v2/pkg/unit" import "d7y.io/dragonfly/v2/pkg/unit"
var dfgetConfig = ClientOption{ var dfgetConfig = ClientOption{
URL: "", URL: "",
LockFile: "/tmp/dfget.lock", LockFile: "/tmp/dfget.lock",
Output: "", Output: "",
Timeout: 0, Timeout: 0,
BenchmarkRate: 128 * unit.KB, BenchmarkRate: 128 * unit.KB,
RateLimit: 0, RateLimit: 0,
Md5: "", Md5: "",
DigestMethod: "", DigestMethod: "",

View File

@ -19,9 +19,9 @@
package config package config
var dfgetConfig = ClientOption{ var dfgetConfig = ClientOption{
URL: "", URL: "",
LockFile: "/var/run/dfget.lock", LockFile: "/var/run/dfget.lock",
Output: "", Output: "",
Timeout: 0, Timeout: 0,
Md5: "", Md5: "",
DigestMethod: "", DigestMethod: "",

View File

@ -67,14 +67,14 @@ type peerTask struct {
// host info about current host // host info about current host
host *scheduler.PeerHost host *scheduler.PeerHost
// callback holds some actions, like init, done, fail actions // callback holds some actions, like init, done, fail actions
callback PeerTaskCallback callback TaskCallback
// schedule options // schedule options
schedulerOption config.SchedulerOption schedulerOption config.SchedulerOption
// peer task meta info // peer task meta info
peerId string peerID string
taskId string taskID string
contentLength int64 contentLength int64
totalPiece int32 totalPiece int32
completedLength int64 completedLength int64
@ -124,16 +124,16 @@ func (pt *peerTask) ReportPieceResult(pieceTask *base.PieceInfo, pieceResult *sc
panic("implement me") panic("implement me")
} }
func (pt *peerTask) SetCallback(callback PeerTaskCallback) { func (pt *peerTask) SetCallback(callback TaskCallback) {
pt.callback = callback pt.callback = callback
} }
func (pt *peerTask) GetPeerID() string { func (pt *peerTask) GetPeerID() string {
return pt.peerId return pt.peerID
} }
func (pt *peerTask) GetTaskID() string { func (pt *peerTask) GetTaskID() string {
return pt.taskId return pt.taskID
} }
func (pt *peerTask) GetContentLength() int64 { func (pt *peerTask) GetContentLength() int64 {
@ -164,7 +164,7 @@ func (pt *peerTask) Log() *logger.SugaredLoggerOnWith {
return pt.SugaredLoggerOnWith return pt.SugaredLoggerOnWith
} }
func (pt *peerTask) pullPieces(pti PeerTask, cleanUnfinishedFunc func()) { func (pt *peerTask) pullPieces(pti Task, cleanUnfinishedFunc func()) {
// when there is a single piece, try to download first // when there is a single piece, try to download first
if pt.singlePiece != nil { if pt.singlePiece != nil {
go pt.pullSinglePiece(pti, cleanUnfinishedFunc) go pt.pullSinglePiece(pti, cleanUnfinishedFunc)
@ -292,7 +292,7 @@ func (pt *peerTask) isExitPeerPacketCode(pp *scheduler.PeerPacket) bool {
return false return false
} }
func (pt *peerTask) pullSinglePiece(pti PeerTask, cleanUnfinishedFunc func()) { func (pt *peerTask) pullSinglePiece(pti Task, cleanUnfinishedFunc func()) {
pt.Infof("single piece, dest peer id: %s, piece num: %d, size: %d", pt.Infof("single piece, dest peer id: %s, piece num: %d, size: %d",
pt.singlePiece.DstPid, pt.singlePiece.PieceInfo.PieceNum, pt.singlePiece.PieceInfo.RangeSize) pt.singlePiece.DstPid, pt.singlePiece.PieceInfo.PieceNum, pt.singlePiece.PieceInfo.RangeSize)
@ -332,7 +332,7 @@ func (pt *peerTask) pullSinglePiece(pti PeerTask, cleanUnfinishedFunc func()) {
// TODO when main peer is not available, switch to steel peers // TODO when main peer is not available, switch to steel peers
// piece manager need peer task interface, pti make it compatibility for stream peer task // piece manager need peer task interface, pti make it compatibility for stream peer task
func (pt *peerTask) pullPiecesFromPeers(pti PeerTask, cleanUnfinishedFunc func()) { func (pt *peerTask) pullPiecesFromPeers(pti Task, cleanUnfinishedFunc func()) {
defer func() { defer func() {
close(pt.failedPieceCh) close(pt.failedPieceCh)
cleanUnfinishedFunc() cleanUnfinishedFunc()
@ -387,8 +387,8 @@ loop:
pt.Debugf("try to get pieces, number: %d, limit: %d", num, limit) pt.Debugf("try to get pieces, number: %d, limit: %d", num, limit)
piecePacket, err := pt.preparePieceTasks( piecePacket, err := pt.preparePieceTasks(
&base.PieceTaskRequest{ &base.PieceTaskRequest{
TaskId: pt.taskId, TaskId: pt.taskID,
SrcPid: pt.peerId, SrcPid: pt.peerID,
StartNum: num, StartNum: num,
Limit: limit, Limit: limit,
}) })
@ -507,7 +507,7 @@ loop:
} }
} }
func (pt *peerTask) downloadPieceWorker(id int32, pti PeerTask, requests chan *DownloadPieceRequest) { func (pt *peerTask) downloadPieceWorker(id int32, pti Task, requests chan *DownloadPieceRequest) {
for { for {
select { select {
case request := <-requests: case request := <-requests:
@ -630,8 +630,8 @@ retry:
} }
pt.Errorf("get piece task from peer(%s) error: %s, code: %d", peer.PeerId, err, code) pt.Errorf("get piece task from peer(%s) error: %s, code: %d", peer.PeerId, err, code)
perr := pt.peerPacketStream.Send(&scheduler.PieceResult{ perr := pt.peerPacketStream.Send(&scheduler.PieceResult{
TaskId: pt.taskId, TaskId: pt.taskID,
SrcPid: pt.peerId, SrcPid: pt.peerID,
DstPid: peer.PeerId, DstPid: peer.PeerId,
Success: false, Success: false,
Code: code, Code: code,
@ -672,8 +672,8 @@ func (pt *peerTask) getPieceTasks(span trace.Span, curPeerPacket *scheduler.Peer
if len(pp.PieceInfos) == 0 { if len(pp.PieceInfos) == 0 {
count++ count++
er := pt.peerPacketStream.Send(&scheduler.PieceResult{ er := pt.peerPacketStream.Send(&scheduler.PieceResult{
TaskId: pt.taskId, TaskId: pt.taskID,
SrcPid: pt.peerId, SrcPid: pt.peerID,
DstPid: peer.PeerId, DstPid: peer.PeerId,
Success: false, Success: false,
Code: dfcodes.ClientWaitPieceReady, Code: dfcodes.ClientWaitPieceReady,

View File

@ -42,7 +42,7 @@ type FilePeerTaskRequest struct {
// FilePeerTask represents a peer task to download a file // FilePeerTask represents a peer task to download a file
type FilePeerTask interface { type FilePeerTask interface {
PeerTask Task
// Start start the special peer task, return a *FilePeerTaskProgress channel for updating download progress // Start start the special peer task, return a *FilePeerTaskProgress channel for updating download progress
Start(ctx context.Context) (chan *FilePeerTaskProgress, error) Start(ctx context.Context) (chan *FilePeerTaskProgress, error)
} }
@ -62,7 +62,7 @@ type ProgressState struct {
type FilePeerTaskProgress struct { type FilePeerTaskProgress struct {
State *ProgressState State *ProgressState
TaskId string TaskID string
PeerID string PeerID string
ContentLength int64 ContentLength int64
CompletedLength int64 CompletedLength int64
@ -130,8 +130,8 @@ func newFilePeerTask(ctx context.Context,
if piece, ok := result.DirectPiece.(*scheduler.RegisterResult_PieceContent); ok { if piece, ok := result.DirectPiece.(*scheduler.RegisterResult_PieceContent); ok {
return ctx, nil, &TinyData{ return ctx, nil, &TinyData{
span: span, span: span,
TaskId: result.TaskId, TaskID: result.TaskId,
PeerId: request.PeerId, PeerID: request.PeerId,
Content: piece.PieceContent, Content: piece.PieceContent,
}, nil }, nil
} }
@ -165,8 +165,8 @@ func newFilePeerTask(ctx context.Context,
peerPacketStream: peerPacketStream, peerPacketStream: peerPacketStream,
pieceManager: pieceManager, pieceManager: pieceManager,
peerPacketReady: make(chan bool), peerPacketReady: make(chan bool),
peerId: request.PeerId, peerID: request.PeerId,
taskId: result.TaskId, taskID: result.TaskId,
singlePiece: singlePiece, singlePiece: singlePiece,
done: make(chan struct{}), done: make(chan struct{}),
span: span, span: span,
@ -245,8 +245,8 @@ func (pt *filePeerTask) ReportPieceResult(piece *base.PieceInfo, pieceResult *sc
Code: pieceResult.Code, Code: pieceResult.Code,
Msg: "downloading", Msg: "downloading",
}, },
TaskId: pt.taskId, TaskID: pt.taskID,
PeerID: pt.peerId, PeerID: pt.peerID,
ContentLength: pt.contentLength, ContentLength: pt.contentLength,
CompletedLength: pt.completedLength, CompletedLength: pt.completedLength,
PeerTaskDone: false, PeerTaskDone: false,
@ -275,7 +275,7 @@ func (pt *filePeerTask) finish() error {
defer pt.recoverFromPanic() defer pt.recoverFromPanic()
// send EOF piece result to scheduler // send EOF piece result to scheduler
_ = pt.peerPacketStream.Send( _ = pt.peerPacketStream.Send(
scheduler.NewEndPieceResult(pt.taskId, pt.peerId, pt.readyPieces.Settled())) scheduler.NewEndPieceResult(pt.taskID, pt.peerID, pt.readyPieces.Settled()))
pt.Debugf("finish end piece result sent") pt.Debugf("finish end piece result sent")
var ( var (
@ -299,8 +299,8 @@ func (pt *filePeerTask) finish() error {
Code: code, Code: code,
Msg: message, Msg: message,
}, },
TaskId: pt.taskId, TaskID: pt.taskID,
PeerID: pt.peerId, PeerID: pt.peerID,
ContentLength: pt.contentLength, ContentLength: pt.contentLength,
CompletedLength: pt.completedLength, CompletedLength: pt.completedLength,
PeerTaskDone: true, PeerTaskDone: true,
@ -345,7 +345,7 @@ func (pt *filePeerTask) cleanUnfinished() {
defer pt.recoverFromPanic() defer pt.recoverFromPanic()
// send EOF piece result to scheduler // send EOF piece result to scheduler
_ = pt.peerPacketStream.Send( _ = pt.peerPacketStream.Send(
scheduler.NewEndPieceResult(pt.taskId, pt.peerId, pt.readyPieces.Settled())) scheduler.NewEndPieceResult(pt.taskID, pt.peerID, pt.readyPieces.Settled()))
pt.Debugf("clean up end piece result sent") pt.Debugf("clean up end piece result sent")
pg := &FilePeerTaskProgress{ pg := &FilePeerTaskProgress{
@ -354,8 +354,8 @@ func (pt *filePeerTask) cleanUnfinished() {
Code: pt.failedCode, Code: pt.failedCode,
Msg: pt.failedReason, Msg: pt.failedReason,
}, },
TaskId: pt.taskId, TaskID: pt.taskID,
PeerID: pt.peerId, PeerID: pt.peerID,
ContentLength: pt.contentLength, ContentLength: pt.contentLength,
CompletedLength: pt.completedLength, CompletedLength: pt.completedLength,
PeerTaskDone: true, PeerTaskDone: true,

View File

@ -37,7 +37,7 @@ func (p *filePeerTaskCallback) GetStartTime() time.Time {
return p.start return p.start
} }
func (p *filePeerTaskCallback) Init(pt PeerTask) error { func (p *filePeerTaskCallback) Init(pt Task) error {
// prepare storage // prepare storage
err := p.ptm.storageManager.RegisterTask(p.ctx, err := p.ptm.storageManager.RegisterTask(p.ctx,
storage.RegisterTaskRequest{ storage.RegisterTaskRequest{
@ -55,7 +55,7 @@ func (p *filePeerTaskCallback) Init(pt PeerTask) error {
return err return err
} }
func (p *filePeerTaskCallback) Update(pt PeerTask) error { func (p *filePeerTaskCallback) Update(pt Task) error {
// update storage // update storage
err := p.ptm.storageManager.UpdateTask(p.ctx, err := p.ptm.storageManager.UpdateTask(p.ctx,
&storage.UpdateTaskRequest{ &storage.UpdateTaskRequest{
@ -72,7 +72,7 @@ func (p *filePeerTaskCallback) Update(pt PeerTask) error {
return err return err
} }
func (p *filePeerTaskCallback) Done(pt PeerTask) error { func (p *filePeerTaskCallback) Done(pt Task) error {
var cost = time.Now().Sub(p.start).Milliseconds() var cost = time.Now().Sub(p.start).Milliseconds()
pt.Log().Infof("file peer task done, cost: %dms", cost) pt.Log().Infof("file peer task done, cost: %dms", cost)
e := p.ptm.storageManager.Store( e := p.ptm.storageManager.Store(
@ -111,7 +111,7 @@ func (p *filePeerTaskCallback) Done(pt PeerTask) error {
return nil return nil
} }
func (p *filePeerTaskCallback) Fail(pt PeerTask, code base.Code, reason string) error { func (p *filePeerTaskCallback) Fail(pt Task, code base.Code, reason string) error {
p.ptm.PeerTaskDone(p.req.PeerId) p.ptm.PeerTaskDone(p.req.PeerId)
var end = time.Now() var end = time.Now()
pt.Log().Errorf("file peer task failed, code: %d, reason: %s", code, reason) pt.Log().Errorf("file peer task failed, code: %d, reason: %s", code, reason)

View File

@ -38,8 +38,8 @@ import (
schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client" schedulerclient "d7y.io/dragonfly/v2/pkg/rpc/scheduler/client"
) )
// PeerTaskManager processes all peer tasks request // TaskManager processes all peer tasks request
type PeerTaskManager interface { type TaskManager interface {
// StartFilePeerTask starts a peer task to download a file // StartFilePeerTask starts a peer task to download a file
// return a progress channel for request download progress // return a progress channel for request download progress
// tiny stands task file is tiny and task is done // tiny stands task file is tiny and task is done
@ -56,8 +56,8 @@ type PeerTaskManager interface {
Stop(ctx context.Context) error Stop(ctx context.Context) error
} }
// PeerTask represents common interface to operate a peer task // Task represents common interface to operate a peer task
type PeerTask interface { type Task interface {
Context() context.Context Context() context.Context
Log() *logger.SugaredLoggerOnWith Log() *logger.SugaredLoggerOnWith
ReportPieceResult(pieceTask *base.PieceInfo, pieceResult *scheduler.PieceResult) error ReportPieceResult(pieceTask *base.PieceInfo, pieceResult *scheduler.PieceResult) error
@ -67,25 +67,25 @@ type PeerTask interface {
GetContentLength() int64 GetContentLength() int64
// SetContentLength will called after download completed, when download from source without content length // SetContentLength will called after download completed, when download from source without content length
SetContentLength(int64) error SetContentLength(int64) error
SetCallback(PeerTaskCallback) SetCallback(TaskCallback)
AddTraffic(int64) AddTraffic(int64)
GetTraffic() int64 GetTraffic() int64
} }
// PeerTaskCallback inserts some operations for peer task download lifecycle // TaskCallback inserts some operations for peer task download lifecycle
type PeerTaskCallback interface { type TaskCallback interface {
Init(pt PeerTask) error Init(pt Task) error
Done(pt PeerTask) error Done(pt Task) error
Update(pt PeerTask) error Update(pt Task) error
Fail(pt PeerTask, code base.Code, reason string) error Fail(pt Task, code base.Code, reason string) error
GetStartTime() time.Time GetStartTime() time.Time
} }
type TinyData struct { type TinyData struct {
// span is used by peer task manager to record events without peer task // span is used by peer task manager to record events without peer task
span trace.Span span trace.Span
TaskId string TaskID string
PeerId string PeerID string
Content []byte Content []byte
} }
@ -113,7 +113,7 @@ func NewPeerTaskManager(
storageManager storage.Manager, storageManager storage.Manager,
schedulerClient schedulerclient.SchedulerClient, schedulerClient schedulerclient.SchedulerClient,
schedulerOption config.SchedulerOption, schedulerOption config.SchedulerOption,
perPeerRateLimit rate.Limit) (PeerTaskManager, error) { perPeerRateLimit rate.Limit) (TaskManager, error) {
ptm := &peerTaskManager{ ptm := &peerTaskManager{
host: host, host: host,
@ -138,7 +138,7 @@ func (ptm *peerTaskManager) StartFilePeerTask(ctx context.Context, req *FilePeer
// tiny file content is returned by scheduler, just write to output // tiny file content is returned by scheduler, just write to output
if tiny != nil { if tiny != nil {
defer tiny.span.End() defer tiny.span.End()
log := logger.With("peer", tiny.PeerId, "task", tiny.TaskId, "component", "peerTaskManager") log := logger.With("peer", tiny.PeerID, "task", tiny.TaskID, "component", "peerTaskManager")
_, err = os.Stat(req.Output) _, err = os.Stat(req.Output)
if err == nil { if err == nil {
// remove exist file // remove exist file

View File

@ -190,7 +190,7 @@ func (mr *MockPeerTaskMockRecorder) SetContentLength(arg0 interface{}) *gomock.C
} }
// SetCallback mocks base method // SetCallback mocks base method
func (m *MockPeerTask) SetCallback(arg0 PeerTaskCallback) { func (m *MockPeerTask) SetCallback(arg0 TaskCallback) {
m.ctrl.Call(m, "SetCallback", arg0) m.ctrl.Call(m, "SetCallback", arg0)
} }
@ -269,7 +269,7 @@ func (m *MockPeerTaskCallback) EXPECT() *MockPeerTaskCallbackMockRecorder {
} }
// Init mocks base method // Init mocks base method
func (m *MockPeerTaskCallback) Init(pt PeerTask) error { func (m *MockPeerTaskCallback) Init(pt Task) error {
ret := m.ctrl.Call(m, "Init", pt) ret := m.ctrl.Call(m, "Init", pt)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
@ -281,7 +281,7 @@ func (mr *MockPeerTaskCallbackMockRecorder) Init(pt interface{}) *gomock.Call {
} }
// Done mocks base method // Done mocks base method
func (m *MockPeerTaskCallback) Done(pt PeerTask) error { func (m *MockPeerTaskCallback) Done(pt Task) error {
ret := m.ctrl.Call(m, "Done", pt) ret := m.ctrl.Call(m, "Done", pt)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
@ -293,7 +293,7 @@ func (mr *MockPeerTaskCallbackMockRecorder) Done(pt interface{}) *gomock.Call {
} }
// Update mocks base method // Update mocks base method
func (m *MockPeerTaskCallback) Update(pt PeerTask) error { func (m *MockPeerTaskCallback) Update(pt Task) error {
ret := m.ctrl.Call(m, "Update", pt) ret := m.ctrl.Call(m, "Update", pt)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0
@ -305,7 +305,7 @@ func (mr *MockPeerTaskCallbackMockRecorder) Update(pt interface{}) *gomock.Call
} }
// Fail mocks base method // Fail mocks base method
func (m *MockPeerTaskCallback) Fail(pt PeerTask, reason string) error { func (m *MockPeerTaskCallback) Fail(pt Task, reason string) error {
ret := m.ctrl.Call(m, "Fail", pt, reason) ret := m.ctrl.Call(m, "Fail", pt, reason)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
return ret0 return ret0

View File

@ -41,7 +41,7 @@ import (
// StreamPeerTask represents a peer task with stream io for reading directly without once more disk io // StreamPeerTask represents a peer task with stream io for reading directly without once more disk io
type StreamPeerTask interface { type StreamPeerTask interface {
PeerTask Task
// Start start the special peer task, return a io.Reader for stream io // Start start the special peer task, return a io.Reader for stream io
// when all data transferred, reader return a io.EOF // when all data transferred, reader return a io.EOF
// attribute stands some extra data, like HTTP response Header // attribute stands some extra data, like HTTP response Header
@ -111,8 +111,8 @@ func newStreamPeerTask(ctx context.Context,
if piece, ok := result.DirectPiece.(*scheduler.RegisterResult_PieceContent); ok { if piece, ok := result.DirectPiece.(*scheduler.RegisterResult_PieceContent); ok {
return ctx, nil, &TinyData{ return ctx, nil, &TinyData{
span: span, span: span,
TaskId: result.TaskId, TaskID: result.TaskId,
PeerId: request.PeerId, PeerID: request.PeerId,
Content: piece.PieceContent, Content: piece.PieceContent,
}, nil }, nil
} }
@ -144,8 +144,8 @@ func newStreamPeerTask(ctx context.Context,
peerPacketStream: peerPacketStream, peerPacketStream: peerPacketStream,
pieceManager: pieceManager, pieceManager: pieceManager,
peerPacketReady: make(chan bool), peerPacketReady: make(chan bool),
peerId: request.PeerId, peerID: request.PeerId,
taskId: result.TaskId, taskID: result.TaskId,
singlePiece: singlePiece, singlePiece: singlePiece,
done: make(chan struct{}), done: make(chan struct{}),
span: span, span: span,
@ -266,8 +266,8 @@ func (s *streamPeerTask) Start(ctx context.Context) (io.Reader, map[string]strin
} else { } else {
attr[headers.TransferEncoding] = "chunked" attr[headers.TransferEncoding] = "chunked"
} }
attr[config.HeaderDragonflyTask] = s.taskId attr[config.HeaderDragonflyTask] = s.taskID
attr[config.HeaderDragonflyPeer] = s.peerId attr[config.HeaderDragonflyPeer] = s.peerID
go func(first int32) { go func(first int32) {
defer func() { defer func() {
@ -364,7 +364,7 @@ func (s *streamPeerTask) finish() error {
s.once.Do(func() { s.once.Do(func() {
// send EOF piece result to scheduler // send EOF piece result to scheduler
_ = s.peerPacketStream.Send( _ = s.peerPacketStream.Send(
scheduler.NewEndPieceResult(s.taskId, s.peerId, s.readyPieces.Settled())) scheduler.NewEndPieceResult(s.taskID, s.peerID, s.readyPieces.Settled()))
s.Debugf("end piece result sent") s.Debugf("end piece result sent")
close(s.done) close(s.done)
//close(s.successPieceCh) //close(s.successPieceCh)
@ -382,7 +382,7 @@ func (s *streamPeerTask) cleanUnfinished() {
s.once.Do(func() { s.once.Do(func() {
// send EOF piece result to scheduler // send EOF piece result to scheduler
_ = s.peerPacketStream.Send( _ = s.peerPacketStream.Send(
scheduler.NewEndPieceResult(s.taskId, s.peerId, s.readyPieces.Settled())) scheduler.NewEndPieceResult(s.taskID, s.peerID, s.readyPieces.Settled()))
s.Debugf("end piece result sent") s.Debugf("end piece result sent")
close(s.done) close(s.done)
//close(s.successPieceCh) //close(s.successPieceCh)
@ -407,8 +407,8 @@ func (s *streamPeerTask) SetContentLength(i int64) error {
func (s *streamPeerTask) writeTo(w io.Writer, pieceNum int32) (int64, error) { func (s *streamPeerTask) writeTo(w io.Writer, pieceNum int32) (int64, error) {
pr, pc, err := s.pieceManager.ReadPiece(s.ctx, &storage.ReadPieceRequest{ pr, pc, err := s.pieceManager.ReadPiece(s.ctx, &storage.ReadPieceRequest{
PeerTaskMetaData: storage.PeerTaskMetaData{ PeerTaskMetaData: storage.PeerTaskMetaData{
PeerID: s.peerId, PeerID: s.peerID,
TaskID: s.taskId, TaskID: s.taskID,
}, },
PieceMetaData: storage.PieceMetaData{ PieceMetaData: storage.PieceMetaData{
Num: pieceNum, Num: pieceNum,

View File

@ -37,7 +37,7 @@ func (p *streamPeerTaskCallback) GetStartTime() time.Time {
return p.start return p.start
} }
func (p *streamPeerTaskCallback) Init(pt PeerTask) error { func (p *streamPeerTaskCallback) Init(pt Task) error {
// prepare storage // prepare storage
err := p.ptm.storageManager.RegisterTask(p.ctx, err := p.ptm.storageManager.RegisterTask(p.ctx,
storage.RegisterTaskRequest{ storage.RegisterTaskRequest{
@ -55,7 +55,7 @@ func (p *streamPeerTaskCallback) Init(pt PeerTask) error {
return err return err
} }
func (p *streamPeerTaskCallback) Update(pt PeerTask) error { func (p *streamPeerTaskCallback) Update(pt Task) error {
// update storage // update storage
err := p.ptm.storageManager.UpdateTask(p.ctx, err := p.ptm.storageManager.UpdateTask(p.ctx,
&storage.UpdateTaskRequest{ &storage.UpdateTaskRequest{
@ -72,7 +72,7 @@ func (p *streamPeerTaskCallback) Update(pt PeerTask) error {
return err return err
} }
func (p *streamPeerTaskCallback) Done(pt PeerTask) error { func (p *streamPeerTaskCallback) Done(pt Task) error {
var cost = time.Now().Sub(p.start).Milliseconds() var cost = time.Now().Sub(p.start).Milliseconds()
pt.Log().Infof("stream peer task done, cost: %dms", cost) pt.Log().Infof("stream peer task done, cost: %dms", cost)
e := p.ptm.storageManager.Store( e := p.ptm.storageManager.Store(
@ -110,7 +110,7 @@ func (p *streamPeerTaskCallback) Done(pt PeerTask) error {
return nil return nil
} }
func (p *streamPeerTaskCallback) Fail(pt PeerTask, code base.Code, reason string) error { func (p *streamPeerTaskCallback) Fail(pt Task, code base.Code, reason string) error {
p.ptm.PeerTaskDone(p.req.PeerId) p.ptm.PeerTaskDone(p.req.PeerId)
var end = time.Now() var end = time.Now()
pt.Log().Errorf("stream peer task failed, code: %d, reason: %s", code, reason) pt.Log().Errorf("stream peer task failed, code: %d, reason: %s", code, reason)

View File

@ -62,6 +62,7 @@ func (o optimizedPieceDownloader) DownloadPiece(ctx context.Context, request *Do
panic(err) panic(err)
} }
// TODO refactor httputil.NewClientConn // TODO refactor httputil.NewClientConn
//nolint
client := httputil.NewClientConn(conn, nil) client := httputil.NewClientConn(conn, nil)
// add default timeout // add default timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second) ctx, cancel := context.WithTimeout(ctx, 30*time.Second)

View File

@ -26,6 +26,8 @@ import (
cdnconfig "d7y.io/dragonfly/v2/cdnsystem/config" cdnconfig "d7y.io/dragonfly/v2/cdnsystem/config"
"d7y.io/dragonfly/v2/cdnsystem/source" "d7y.io/dragonfly/v2/cdnsystem/source"
// Init http client
_ "d7y.io/dragonfly/v2/cdnsystem/source/httpprotocol" _ "d7y.io/dragonfly/v2/cdnsystem/source/httpprotocol"
"d7y.io/dragonfly/v2/client/clientutil" "d7y.io/dragonfly/v2/client/clientutil"
"d7y.io/dragonfly/v2/client/config" "d7y.io/dragonfly/v2/client/config"
@ -38,8 +40,8 @@ import (
) )
type PieceManager interface { type PieceManager interface {
DownloadSource(ctx context.Context, pt PeerTask, request *scheduler.PeerTaskRequest) error DownloadSource(ctx context.Context, pt Task, request *scheduler.PeerTaskRequest) error
DownloadPiece(ctx context.Context, peerTask PeerTask, request *DownloadPieceRequest) bool DownloadPiece(ctx context.Context, peerTask Task, request *DownloadPieceRequest) bool
ReadPiece(ctx context.Context, req *storage.ReadPieceRequest) (io.Reader, io.Closer, error) ReadPiece(ctx context.Context, req *storage.ReadPieceRequest) (io.Reader, io.Closer, error)
} }
@ -95,7 +97,7 @@ func WithLimiter(limiter *rate.Limiter) func(*pieceManager) {
} }
} }
func (pm *pieceManager) DownloadPiece(ctx context.Context, pt PeerTask, request *DownloadPieceRequest) (success bool) { func (pm *pieceManager) DownloadPiece(ctx context.Context, pt Task, request *DownloadPieceRequest) (success bool) {
var ( var (
start = time.Now().UnixNano() start = time.Now().UnixNano()
end int64 end int64
@ -163,7 +165,7 @@ func (pm *pieceManager) DownloadPiece(ctx context.Context, pt PeerTask, request
return return
} }
func (pm *pieceManager) pushSuccessResult(peerTask PeerTask, dstPid string, piece *base.PieceInfo, start int64, end int64) { func (pm *pieceManager) pushSuccessResult(peerTask Task, dstPid string, piece *base.PieceInfo, start int64, end int64) {
err := peerTask.ReportPieceResult( err := peerTask.ReportPieceResult(
piece, piece,
&scheduler.PieceResult{ &scheduler.PieceResult{
@ -183,7 +185,7 @@ func (pm *pieceManager) pushSuccessResult(peerTask PeerTask, dstPid string, piec
} }
} }
func (pm *pieceManager) pushFailResult(peerTask PeerTask, dstPid string, piece *base.PieceInfo, start int64, end int64) { func (pm *pieceManager) pushFailResult(peerTask Task, dstPid string, piece *base.PieceInfo, start int64, end int64) {
err := peerTask.ReportPieceResult( err := peerTask.ReportPieceResult(
piece, piece,
&scheduler.PieceResult{ &scheduler.PieceResult{
@ -207,7 +209,7 @@ func (pm *pieceManager) ReadPiece(ctx context.Context, req *storage.ReadPieceReq
return pm.storageManager.ReadPiece(ctx, req) return pm.storageManager.ReadPiece(ctx, req)
} }
func (pm *pieceManager) processPieceFromSource(pt PeerTask, func (pm *pieceManager) processPieceFromSource(pt Task,
reader io.Reader, contentLength int64, pieceNum int32, pieceOffset uint64, pieceSize int32) (int64, error) { reader io.Reader, contentLength int64, pieceNum int32, pieceOffset uint64, pieceSize int32) (int64, error) {
var ( var (
success bool success bool
@ -290,7 +292,7 @@ func (pm *pieceManager) processPieceFromSource(pt PeerTask,
return n, nil return n, nil
} }
func (pm *pieceManager) DownloadSource(ctx context.Context, pt PeerTask, request *scheduler.PeerTaskRequest) error { func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, request *scheduler.PeerTaskRequest) error {
if request.UrlMata == nil { if request.UrlMata == nil {
request.UrlMata = &base.UrlMeta{ request.UrlMata = &base.UrlMeta{
Header: map[string]string{}, Header: map[string]string{},

View File

@ -71,7 +71,7 @@ type peerHost struct {
StorageManager storage.Manager StorageManager storage.Manager
GCManager gc.Manager GCManager gc.Manager
PeerTaskManager peer.PeerTaskManager PeerTaskManager peer.TaskManager
PieceManager peer.PieceManager PieceManager peer.PieceManager
} }

View File

@ -72,7 +72,7 @@ type Proxy struct {
directHandler http.Handler directHandler http.Handler
// peerTaskManager is the peer task manager // peerTaskManager is the peer task manager
peerTaskManager peer.PeerTaskManager peerTaskManager peer.TaskManager
// peerHost is the peer host info // peerHost is the peer host info
peerHost *scheduler.PeerHost peerHost *scheduler.PeerHost
@ -104,7 +104,7 @@ func WithPeerHost(peerHost *scheduler.PeerHost) Option {
} }
// WithPeerTaskManager sets the peer.PeerTaskManager // WithPeerTaskManager sets the peer.PeerTaskManager
func WithPeerTaskManager(peerTaskManager peer.PeerTaskManager) Option { func WithPeerTaskManager(peerTaskManager peer.TaskManager) Option {
return func(p *Proxy) *Proxy { return func(p *Proxy) *Proxy {
p.peerTaskManager = peerTaskManager p.peerTaskManager = peerTaskManager
return p return p

View File

@ -48,7 +48,7 @@ type proxyManager struct {
config.ListenOption config.ListenOption
} }
func NewProxyManager(peerHost *scheduler.PeerHost, peerTaskManager peer.PeerTaskManager, opts *config.ProxyOption) (Manager, error) { func NewProxyManager(peerHost *scheduler.PeerHost, peerTaskManager peer.TaskManager, opts *config.ProxyOption) (Manager, error) {
registry := opts.RegistryMirror registry := opts.RegistryMirror
proxies := opts.Proxies proxies := opts.Proxies
hijackHTTPS := opts.HijackHTTPS hijackHTTPS := opts.HijackHTTPS

View File

@ -50,7 +50,7 @@ type Manager interface {
type manager struct { type manager struct {
clientutil.KeepAlive clientutil.KeepAlive
peerHost *scheduler.PeerHost peerHost *scheduler.PeerHost
peerTaskManager peer.PeerTaskManager peerTaskManager peer.TaskManager
storageManager storage.Manager storageManager storage.Manager
downloadServer rpc.Server downloadServer rpc.Server
@ -60,7 +60,7 @@ type manager struct {
var _ dfdaemonserver.DaemonServer = &manager{} var _ dfdaemonserver.DaemonServer = &manager{}
func NewManager(peerHost *scheduler.PeerHost, peerTaskManager peer.PeerTaskManager, storageManager storage.Manager, downloadOpts []grpc.ServerOption, peerOpts []grpc.ServerOption) (Manager, error) { func NewManager(peerHost *scheduler.PeerHost, peerTaskManager peer.TaskManager, storageManager storage.Manager, downloadOpts []grpc.ServerOption, peerOpts []grpc.ServerOption) (Manager, error) {
mgr := &manager{ mgr := &manager{
KeepAlive: clientutil.NewKeepAlive("service manager"), KeepAlive: clientutil.NewKeepAlive("service manager"),
peerHost: peerHost, peerHost: peerHost,
@ -155,8 +155,8 @@ func (m *manager) Download(ctx context.Context,
} }
if tiny != nil { if tiny != nil {
results <- &dfdaemongrpc.DownResult{ results <- &dfdaemongrpc.DownResult{
TaskId: tiny.TaskId, TaskId: tiny.TaskID,
PeerId: tiny.PeerId, PeerId: tiny.PeerID,
CompletedLength: uint64(len(tiny.Content)), CompletedLength: uint64(len(tiny.Content)),
Done: true, Done: true,
} }
@ -179,11 +179,11 @@ func (m *manager) Download(ctx context.Context,
return dferrors.New(dfcodes.UnknownError, err.Error()) return dferrors.New(dfcodes.UnknownError, err.Error())
} }
if !p.State.Success { if !p.State.Success {
log.Errorf("task %s/%s failed: %d/%s", p.PeerID, p.TaskId, p.State.Code, p.State.Msg) log.Errorf("task %s/%s failed: %d/%s", p.PeerID, p.TaskID, p.State.Code, p.State.Msg)
return dferrors.New(p.State.Code, p.State.Msg) return dferrors.New(p.State.Code, p.State.Msg)
} }
results <- &dfdaemongrpc.DownResult{ results <- &dfdaemongrpc.DownResult{
TaskId: p.TaskId, TaskId: p.TaskID,
PeerId: p.PeerID, PeerId: p.PeerID,
CompletedLength: uint64(p.CompletedLength), CompletedLength: uint64(p.CompletedLength),
Done: p.PeerTaskDone, Done: p.PeerTaskDone,
@ -191,7 +191,7 @@ func (m *manager) Download(ctx context.Context,
// peer task sets PeerTaskDone to true only once // peer task sets PeerTaskDone to true only once
if p.PeerTaskDone { if p.PeerTaskDone {
p.DoneCallback() p.DoneCallback()
log.Infof("task %s/%s done", p.PeerID, p.TaskId) log.Infof("task %s/%s done", p.PeerID, p.TaskID)
if req.Uid != 0 && req.Gid != 0 { if req.Uid != 0 && req.Gid != 0 {
log.Infof("change own to uid %d gid %d", req.Uid, req.Gid) log.Infof("change own to uid %d gid %d", req.Uid, req.Gid)
if err = os.Chown(req.Output, int(req.Uid), int(req.Gid)); err != nil { if err = os.Chown(req.Output, int(req.Uid), int(req.Gid)); err != nil {

View File

@ -61,7 +61,7 @@ func TestDownloadManager_ServeDownload(t *testing.T) {
State: &peer.ProgressState{ State: &peer.ProgressState{
Success: true, Success: true,
}, },
TaskId: "", TaskID: "",
PeerID: "", PeerID: "",
ContentLength: 100, ContentLength: 100,
CompletedLength: int64(i), CompletedLength: int64(i),

View File

@ -249,7 +249,7 @@ func (mr *MockPeerTaskMockRecorder) ReportPieceResult(pieceTask, pieceResult int
} }
// SetCallback mocks base method. // SetCallback mocks base method.
func (m *MockPeerTask) SetCallback(arg0 peer.PeerTaskCallback) { func (m *MockPeerTask) SetCallback(arg0 peer.TaskCallback) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
m.ctrl.Call(m, "SetCallback", arg0) m.ctrl.Call(m, "SetCallback", arg0)
} }
@ -298,7 +298,7 @@ func (m *MockPeerTaskCallback) EXPECT() *MockPeerTaskCallbackMockRecorder {
} }
// Done mocks base method. // Done mocks base method.
func (m *MockPeerTaskCallback) Done(pt peer.PeerTask) error { func (m *MockPeerTaskCallback) Done(pt peer.Task) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Done", pt) ret := m.ctrl.Call(m, "Done", pt)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
@ -312,7 +312,7 @@ func (mr *MockPeerTaskCallbackMockRecorder) Done(pt interface{}) *gomock.Call {
} }
// Fail mocks base method. // Fail mocks base method.
func (m *MockPeerTaskCallback) Fail(pt peer.PeerTask, code base.Code, reason string) error { func (m *MockPeerTaskCallback) Fail(pt peer.Task, code base.Code, reason string) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Fail", pt, code, reason) ret := m.ctrl.Call(m, "Fail", pt, code, reason)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
@ -340,7 +340,7 @@ func (mr *MockPeerTaskCallbackMockRecorder) GetStartTime() *gomock.Call {
} }
// Init mocks base method. // Init mocks base method.
func (m *MockPeerTaskCallback) Init(pt peer.PeerTask) error { func (m *MockPeerTaskCallback) Init(pt peer.Task) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Init", pt) ret := m.ctrl.Call(m, "Init", pt)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)
@ -354,7 +354,7 @@ func (mr *MockPeerTaskCallbackMockRecorder) Init(pt interface{}) *gomock.Call {
} }
// Update mocks base method. // Update mocks base method.
func (m *MockPeerTaskCallback) Update(pt peer.PeerTask) error { func (m *MockPeerTaskCallback) Update(pt peer.Task) error {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Update", pt) ret := m.ctrl.Call(m, "Update", pt)
ret0, _ := ret[0].(error) ret0, _ := ret[0].(error)

View File

@ -48,7 +48,7 @@ type transport struct {
shouldUseDragonfly func(req *http.Request) bool shouldUseDragonfly func(req *http.Request) bool
// peerTaskManager is the peer task manager // peerTaskManager is the peer task manager
peerTaskManager peer.PeerTaskManager peerTaskManager peer.TaskManager
// peerHost is the peer host info // peerHost is the peer host info
peerHost *scheduler.PeerHost peerHost *scheduler.PeerHost
@ -72,7 +72,7 @@ func WithPeerHost(peerHost *scheduler.PeerHost) Option {
} }
// WithHTTPSHosts sets the rules for hijacking https requests // WithHTTPSHosts sets the rules for hijacking https requests
func WithPeerTaskManager(peerTaskManager peer.PeerTaskManager) Option { func WithPeerTaskManager(peerTaskManager peer.TaskManager) Option {
return func(rt *transport) *transport { return func(rt *transport) *transport {
rt.peerTaskManager = peerTaskManager rt.peerTaskManager = peerTaskManager
return rt return rt

View File

@ -33,6 +33,8 @@ import (
logger "d7y.io/dragonfly/v2/pkg/dflog" logger "d7y.io/dragonfly/v2/pkg/dflog"
"d7y.io/dragonfly/v2/pkg/rpc/base" "d7y.io/dragonfly/v2/pkg/rpc/base"
dfdaemongrpc "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon" dfdaemongrpc "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon"
// Init daemon rpc client
_ "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" _ "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client"
dfclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client" dfclient "d7y.io/dragonfly/v2/pkg/rpc/dfdaemon/client"
"github.com/go-http-utils/headers" "github.com/go-http-utils/headers"
@ -74,7 +76,7 @@ func Download(cfg *config.DfgetConfig, client dfclient.DaemonClient) error {
Output: output, Output: output,
BizId: cfg.CallSystem, BizId: cfg.CallSystem,
Filter: filter, Filter: filter,
Uid: int64(basic.UserId), Uid: int64(basic.UserID),
Gid: int64(basic.UserGroup), Gid: int64(basic.UserGroup),
} }
var ( var (
@ -165,8 +167,8 @@ func downloadFromSource(cfg *config.DfgetConfig, hdr map[string]string) (err err
end = time.Now() end = time.Now()
fmt.Printf("Download from source success, time cost: %dms\n", end.Sub(start).Milliseconds()) fmt.Printf("Download from source success, time cost: %dms\n", end.Sub(start).Milliseconds())
// change permission // change permission
logger.Infof("change own to uid %d gid %d", basic.UserId, basic.UserGroup) logger.Infof("change own to uid %d gid %d", basic.UserID, basic.UserGroup)
if err = os.Chown(cfg.Output, basic.UserId, basic.UserGroup); err != nil { if err = os.Chown(cfg.Output, basic.UserID, basic.UserGroup); err != nil {
logger.Errorf("change own failed: %s", err) logger.Errorf("change own failed: %s", err)
return err return err
} }

View File

@ -109,18 +109,18 @@ func InitMonitor(verbose bool, pprofPort int, jaeger string) func() {
vm := statsview.New() vm := statsview.New()
if err := vm.Start(); err != nil { if err := vm.Start(); err != nil {
logger.Warnf("serve pprof error:%v", err) logger.Warnf("serve pprof error:%v", err)
} else {
fc <- func() { vm.Stop() }
} }
fc <- func() { vm.Stop() }
}() }()
} }
if jaeger != "" { if jaeger != "" {
if ff, err := initJaegerTracer(jaeger); err != nil { ff, err := initJaegerTracer(jaeger)
if err != nil {
logger.Warnf("init jaeger tracer error:%v", err) logger.Warnf("init jaeger tracer error:%v", err)
} else {
fc <- ff
} }
fc <- ff
} }
return func() { return func() {

View File

@ -160,10 +160,9 @@ func runDfget() error {
daemonClient, err := checkAndSpawnDaemon() daemonClient, err := checkAndSpawnDaemon()
if err != nil { if err != nil {
logger.Errorf("check and spawn daemon error:%v", err) logger.Errorf("check and spawn daemon error:%v", err)
} else {
logger.Info("check and spawn daemon success")
} }
logger.Info("check and spawn daemon success")
return dfget.Download(dfgetConfig, daemonClient) return dfget.Download(dfgetConfig, daemonClient)
} }

View File

@ -45,8 +45,8 @@ $ tail -f log/**/*.log
==> log/dragonfly/scheduler/core.log <== ==> log/dragonfly/scheduler/core.log <==
{"level":"info","ts":"2021-02-26 05:43:37.332","caller":"cmd/root.go:57","msg":"start to run scheduler"} {"level":"info","ts":"2021-02-26 05:43:37.332","caller":"cmd/root.go:57","msg":"start to run scheduler"}
{"level":"info","ts":"2021-02-26 05:43:37.338","caller":"server/server.go:35","msg":"start server at port %!s(int=8002)"} {"level":"info","ts":"2021-02-26 05:43:37.338","caller":"server/server.go:35","msg":"start server at port %!s(int=8002)"}
{"level":"info","ts":"2021-02-26 05:43:37.342","caller":"schedule_worker/sender.go:49","msg":"start sender worker : 50"} {"level":"info","ts":"2021-02-26 05:43:37.342","caller":"worker/sender.go:49","msg":"start sender worker : 50"}
{"level":"info","ts":"2021-02-26 05:43:37.343","caller":"schedule_worker/worker_group.go:64","msg":"start scheduler worker number:6"} {"level":"info","ts":"2021-02-26 05:43:37.343","caller":"worker/worker_group.go:64","msg":"start scheduler worker number:6"}
``` ```
### Step 4: Stop dragonfly ### Step 4: Stop dragonfly

2
go.mod
View File

@ -47,7 +47,7 @@ require (
go.opentelemetry.io/otel/trace v0.20.0 go.opentelemetry.io/otel/trace v0.20.0
go.uber.org/atomic v1.6.0 go.uber.org/atomic v1.6.0
go.uber.org/zap v1.16.0 go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 // indirect golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 // indirect
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf

BIN
pkg/.DS_Store vendored

Binary file not shown.

View File

@ -29,7 +29,7 @@ var (
HomeDir string HomeDir string
TmpDir string TmpDir string
Username string Username string
UserId int UserID int
UserGroup int UserGroup int
) )
@ -40,7 +40,7 @@ func init() {
} }
Username = u.Username Username = u.Username
UserId, _ = strconv.Atoi(u.Uid) UserID, _ = strconv.Atoi(u.Uid)
UserGroup, _ = strconv.Atoi(u.Gid) UserGroup, _ = strconv.Atoi(u.Gid)
HomeDir = u.HomeDir HomeDir = u.HomeDir

View File

@ -124,7 +124,7 @@ func (sc *schedulerClient) RegisterPeerTask(ctx context.Context, ptr *scheduler.
func (sc *schedulerClient) doRegisterPeerTask(ctx context.Context, ptr *scheduler.PeerTaskRequest, exclusiveNodes []string, func (sc *schedulerClient) doRegisterPeerTask(ctx context.Context, ptr *scheduler.PeerTaskRequest, exclusiveNodes []string,
opts []grpc.CallOption) (rr *scheduler.RegisterResult, err error) { opts []grpc.CallOption) (rr *scheduler.RegisterResult, err error) {
var ( var (
taskId string taskID string
suc bool suc bool
code base.Code code base.Code
schedulerNode string schedulerNode string
@ -142,12 +142,12 @@ func (sc *schedulerClient) doRegisterPeerTask(ctx context.Context, ptr *schedule
return client.RegisterPeerTask(ctx, ptr, opts...) return client.RegisterPeerTask(ctx, ptr, opts...)
}, 0.5, 5.0, 5, nil); err == nil { }, 0.5, 5.0, 5, nil); err == nil {
rr = res.(*scheduler.RegisterResult) rr = res.(*scheduler.RegisterResult)
taskId = rr.TaskId taskID = rr.TaskId
suc = true suc = true
code = dfcodes.Success code = dfcodes.Success
if taskId != key { if taskID != key {
logger.WithPeerID(ptr.PeerId).Warnf("register peer task correct taskId from %s to %s", key, taskId) logger.WithPeerID(ptr.PeerId).Warnf("register peer task correct taskId from %s to %s", key, taskID)
sc.Connection.CorrectKey2NodeRelation(key, taskId) sc.Connection.CorrectKey2NodeRelation(key, taskID)
} }
} else { } else {
if de, ok := err.(*dferrors.DfError); ok { if de, ok := err.(*dferrors.DfError); ok {
@ -156,7 +156,7 @@ func (sc *schedulerClient) doRegisterPeerTask(ctx context.Context, ptr *schedule
} }
logger.With("peerId", ptr.PeerId, "errMsg", err). logger.With("peerId", ptr.PeerId, "errMsg", err).
Infof("register peer task result:%t[%d] for taskId:%s,url:%s,peerIp:%s,securityDomain:%s,idc:%s,scheduler:%s", Infof("register peer task result:%t[%d] for taskId:%s,url:%s,peerIp:%s,securityDomain:%s,idc:%s,scheduler:%s",
suc, int32(code), taskId, ptr.Url, ptr.PeerHost.Ip, ptr.PeerHost.SecurityDomain, ptr.PeerHost.Idc, schedulerNode) suc, int32(code), taskID, ptr.Url, ptr.PeerHost.Ip, ptr.PeerHost.SecurityDomain, ptr.PeerHost.Idc, schedulerNode)
if err != nil { if err != nil {
var preNode string var preNode string

View File

@ -52,9 +52,8 @@ func CtimeSec(info os.FileInfo) int64 {
func GetSysStat(info os.FileInfo) *syscall.Stat_t { func GetSysStat(info os.FileInfo) *syscall.Stat_t {
if stat, ok := info.Sys().(*syscall.Stat_t); ok { if stat, ok := info.Sys().(*syscall.Stat_t); ok {
return stat return stat
} else {
return nil
} }
return nil
} }
func FreeSpace(diskPath string) (unit.Bytes, error) { func FreeSpace(diskPath string) (unit.Bytes, error) {

View File

@ -134,13 +134,13 @@ func (cm *CDNManager) TriggerTask(task *types.Task, callback func(peerTask *type
go safe.Call(func() { go safe.Call(func() {
stream, err := cm.client.ObtainSeeds(context.TODO(), &cdnsystem.SeedRequest{ stream, err := cm.client.ObtainSeeds(context.TODO(), &cdnsystem.SeedRequest{
TaskId: task.TaskId, TaskId: task.TaskID,
Url: task.Url, Url: task.URL,
Filter: task.Filter, Filter: task.Filter,
UrlMeta: task.UrlMata, UrlMeta: task.URLMata,
}) })
if err != nil { if err != nil {
logger.Warnf("receive a failure state from cdn: taskId[%s] error:%v", task.TaskId, err) logger.Warnf("receive a failure state from cdn: taskId[%s] error:%v", task.TaskID, err)
e, ok := err.(*dferrors.DfError) e, ok := err.(*dferrors.DfError)
if !ok { if !ok {
e = dferrors.New(dfcodes.CdnError, err.Error()) e = dferrors.New(dfcodes.CdnError, err.Error())
@ -176,7 +176,7 @@ func (cm *CDNManager) doCallback(task *types.Task, err *dferrors.DfError) {
if err != nil { if err != nil {
time.Sleep(time.Second * 5) time.Sleep(time.Second * 5)
cm.taskManager.Delete(task.TaskId) cm.taskManager.Delete(task.TaskID)
cm.taskManager.PeerTask.DeleteTask(task) cm.taskManager.PeerTask.DeleteTask(task)
} }
}) })
@ -219,20 +219,20 @@ func (cm *CDNManager) Work(task *types.Task, stream *client.PieceSeedStream) {
if !ok { if !ok {
dferr = dferrors.New(dfcodes.CdnError, err.Error()) dferr = dferrors.New(dfcodes.CdnError, err.Error())
} }
logger.Warnf("receive a failure state from cdn: taskId[%s] error:%v", task.TaskId, err) logger.Warnf("receive a failure state from cdn: taskId[%s] error:%v", task.TaskID, err)
cm.doCallback(task, dferr) cm.doCallback(task, dferr)
return return
} }
if ps == nil { if ps == nil {
logger.Warnf("receive a nil pieceSeed or state from cdn: taskId[%s]", task.TaskId) logger.Warnf("receive a nil pieceSeed or state from cdn: taskId[%s]", task.TaskID)
} else { } else {
pieceNum := int32(-1) pieceNum := int32(-1)
if ps.PieceInfo != nil { if ps.PieceInfo != nil {
pieceNum = ps.PieceInfo.PieceNum pieceNum = ps.PieceInfo.PieceNum
} }
cm.processPieceSeed(task, ps) cm.processPieceSeed(task, ps)
logger.Debugf("receive a pieceSeed from cdn: taskId[%s]-%d done [%v]", task.TaskId, pieceNum, ps.Done) logger.Debugf("receive a pieceSeed from cdn: taskId[%s]-%d done [%v]", task.TaskID, pieceNum, ps.Done)
if waitCallback { if waitCallback {
waitCallback = false waitCallback = false
@ -322,13 +322,20 @@ func (cm *CDNManager) getHostUUID(ps *cdnsystem.PieceSeed) string {
func (cm *CDNManager) createPiece(task *types.Task, ps *cdnsystem.PieceSeed, pt *types.PeerTask) *types.Piece { func (cm *CDNManager) createPiece(task *types.Task, ps *cdnsystem.PieceSeed, pt *types.PeerTask) *types.Piece {
p := task.GetOrCreatePiece(ps.PieceInfo.PieceNum) p := task.GetOrCreatePiece(ps.PieceInfo.PieceNum)
p.PieceInfo = *ps.PieceInfo p.PieceInfo = base.PieceInfo{
PieceNum: ps.PieceInfo.PieceNum,
RangeStart: ps.PieceInfo.RangeStart,
RangeSize: ps.PieceInfo.RangeSize,
PieceMd5: ps.PieceInfo.PieceMd5,
PieceOffset: ps.PieceInfo.PieceOffset,
PieceStyle: ps.PieceInfo.PieceStyle,
}
return p return p
} }
func (cm *CDNManager) getTinyFileContent(task *types.Task, cdnHost *types.Host) (content []byte, err error) { func (cm *CDNManager) getTinyFileContent(task *types.Task, cdnHost *types.Host) (content []byte, err error) {
resp, err := cm.client.GetPieceTasks(context.TODO(), dfnet.NetAddr{Type: dfnet.TCP, Addr: fmt.Sprintf("%s:%d", cdnHost.Ip, cdnHost.RpcPort)}, &base.PieceTaskRequest{ resp, err := cm.client.GetPieceTasks(context.TODO(), dfnet.NetAddr{Type: dfnet.TCP, Addr: fmt.Sprintf("%s:%d", cdnHost.Ip, cdnHost.RpcPort)}, &base.PieceTaskRequest{
TaskId: task.TaskId, TaskId: task.TaskID,
SrcPid: "scheduler", SrcPid: "scheduler",
StartNum: 0, StartNum: 0,
Limit: 2, Limit: 2,
@ -343,7 +350,7 @@ func (cm *CDNManager) getTinyFileContent(task *types.Task, cdnHost *types.Host)
// TODO download the tiny file // TODO download the tiny file
// http://host:port/download/{taskId 前3位}/{taskId}?peerId={peerId}; // http://host:port/download/{taskId 前3位}/{taskId}?peerId={peerId};
url := fmt.Sprintf("http://%s:%d/download/%s/%s?peerId=scheduler", url := fmt.Sprintf("http://%s:%d/download/%s/%s?peerId=scheduler",
cdnHost.Ip, cdnHost.DownPort, task.TaskId[:3], task.TaskId) cdnHost.Ip, cdnHost.DownPort, task.TaskID[:3], task.TaskID)
client := &http.Client{ client := &http.Client{
Timeout: time.Second * 5, Timeout: time.Second * 5,
} }

View File

@ -25,6 +25,14 @@ import (
) )
func TestCDNHostsToServers(t *testing.T) { func TestCDNHostsToServers(t *testing.T) {
mockServerInfo := &manager.ServerInfo{
HostInfo: &manager.HostInfo{
HostName: "foo",
},
RpcPort: 8002,
DownPort: 8001,
}
tests := []struct { tests := []struct {
name string name string
hosts []*manager.ServerInfo hosts []*manager.ServerInfo
@ -44,13 +52,7 @@ func TestCDNHostsToServers(t *testing.T) {
expect: func(t *testing.T, data interface{}) { expect: func(t *testing.T, data interface{}) {
assert := testifyassert.New(t) assert := testifyassert.New(t)
assert.EqualValues(map[string]*manager.ServerInfo{ assert.EqualValues(map[string]*manager.ServerInfo{
"foo": &manager.ServerInfo{ "foo": mockServerInfo,
HostInfo: &manager.HostInfo{
HostName: "foo",
},
RpcPort: 8002,
DownPort: 8001,
},
}, data) }, data)
}, },
}, },

View File

@ -43,11 +43,11 @@ func (m *HostManager) Add(host *types.Host) *types.Host {
return v.(*types.Host) return v.(*types.Host)
} }
copyHost := types.CopyHost(host) h := types.Init(host)
m.CalculateLoad(copyHost) m.CalculateLoad(h)
m.data.Store(host.Uuid, copyHost) m.data.Store(host.Uuid, h)
return copyHost return h
} }
func (m *HostManager) Delete(uuid string) { func (m *HostManager) Delete(uuid string) {
@ -72,8 +72,7 @@ func (m *HostManager) CalculateLoad(host *types.Host) {
if host.Type == types.HostTypePeer { if host.Type == types.HostTypePeer {
host.SetTotalUploadLoad(HostLoadPeer) host.SetTotalUploadLoad(HostLoadPeer)
host.SetTotalDownloadLoad(HostLoadPeer) host.SetTotalDownloadLoad(HostLoadPeer)
} else {
host.SetTotalUploadLoad(HostLoadCDN)
host.SetTotalDownloadLoad(HostLoadCDN)
} }
host.SetTotalUploadLoad(HostLoadCDN)
host.SetTotalDownloadLoad(HostLoadCDN)
} }

View File

@ -86,7 +86,7 @@ func (m *PeerTask) Add(pid string, task *types.Task, host *types.Host) *types.Pe
pt := types.NewPeerTask(pid, task, host, m.addToGCQueue) pt := types.NewPeerTask(pid, task, host, m.addToGCQueue)
m.data.Store(pid, pt) m.data.Store(pid, pt)
m.taskManager.Touch(task.TaskId) m.taskManager.Touch(task.TaskID)
r, ok := m.dataRanger.Load(pt.Task) r, ok := m.dataRanger.Load(pt.Task)
if ok { if ok {
@ -345,7 +345,7 @@ func (m *PeerTask) printDebugInfo() string {
} }
func (m *PeerTask) RefreshDownloadMonitor(pt *types.PeerTask) { func (m *PeerTask) RefreshDownloadMonitor(pt *types.PeerTask) {
logger.Debugf("[%s][%s] downloadMonitorWorkingLoop refresh ", pt.Task.TaskId, pt.Pid) logger.Debugf("[%s][%s] downloadMonitorWorkingLoop refresh ", pt.Task.TaskID, pt.Pid)
status := pt.GetNodeStatus() status := pt.GetNodeStatus()
if status != types.PeerTaskStatusHealth { if status != types.PeerTaskStatusHealth {
m.downloadMonitorQueue.AddAfter(pt, time.Second*2) m.downloadMonitorQueue.AddAfter(pt, time.Second*2)
@ -363,9 +363,8 @@ func (m *PeerTask) RefreshDownloadMonitor(pt *types.PeerTask) {
func (m *PeerTask) CDNCallback(pt *types.PeerTask, err *dferrors.DfError) { func (m *PeerTask) CDNCallback(pt *types.PeerTask, err *dferrors.DfError) {
if err != nil { if err != nil {
pt.SendError(err) pt.SendError(err)
} else {
m.downloadMonitorQueue.Add(pt)
} }
m.downloadMonitorQueue.Add(pt)
} }
func (m *PeerTask) SetDownloadingMonitorCallBack(callback func(*types.PeerTask)) { func (m *PeerTask) SetDownloadingMonitorCallBack(callback func(*types.PeerTask)) {
@ -381,7 +380,7 @@ func (m *PeerTask) downloadMonitorWorkingLoop() {
if m.downloadMonitorCallBack != nil { if m.downloadMonitorCallBack != nil {
pt, _ := v.(*types.PeerTask) pt, _ := v.(*types.PeerTask)
if pt != nil { if pt != nil {
logger.Debugf("[%s][%s] downloadMonitorWorkingLoop status[%d]", pt.Task.TaskId, pt.Pid, pt.GetNodeStatus()) logger.Debugf("[%s][%s] downloadMonitorWorkingLoop status[%d]", pt.Task.TaskID, pt.Pid, pt.GetNodeStatus())
if pt.Success || (pt.Host != nil && pt.Host.Type == types.HostTypeCdn) { if pt.Success || (pt.Host != nil && pt.Host.Type == types.HostTypeCdn) {
// clear from monitor // clear from monitor
} else { } else {

View File

@ -40,7 +40,7 @@ func TestTaskManager_Set(t *testing.T) {
}, },
key: "foo", key: "foo",
task: &types.Task{ task: &types.Task{
TaskId: "bar", TaskID: "bar",
}, },
expect: func(t *testing.T, d interface{}) { expect: func(t *testing.T, d interface{}) {
assert := assert.New(t) assert := assert.New(t)
@ -68,7 +68,7 @@ func TestTaskManager_Set(t *testing.T) {
}, },
key: "", key: "",
task: &types.Task{ task: &types.Task{
TaskId: "bar", TaskID: "bar",
}, },
expect: func(t *testing.T, d interface{}) { expect: func(t *testing.T, d interface{}) {
assert := assert.New(t) assert := assert.New(t)
@ -101,7 +101,7 @@ func TestTaskManager_Add(t *testing.T) {
}, },
key: "foo", key: "foo",
task: &types.Task{ task: &types.Task{
TaskId: "bar", TaskID: "bar",
}, },
expect: func(t *testing.T, d interface{}, err error) { expect: func(t *testing.T, d interface{}, err error) {
assert := assert.New(t) assert := assert.New(t)
@ -129,7 +129,7 @@ func TestTaskManager_Add(t *testing.T) {
}, },
key: "", key: "",
task: &types.Task{ task: &types.Task{
TaskId: "bar", TaskID: "bar",
}, },
expect: func(t *testing.T, d interface{}, err error) { expect: func(t *testing.T, d interface{}, err error) {
assert := assert.New(t) assert := assert.New(t)
@ -144,7 +144,7 @@ func TestTaskManager_Add(t *testing.T) {
}, },
key: "foo", key: "foo",
task: &types.Task{ task: &types.Task{
TaskId: "bar", TaskID: "bar",
}, },
expect: func(t *testing.T, d interface{}, err error) { expect: func(t *testing.T, d interface{}, err error) {
assert := assert.New(t) assert := assert.New(t)
@ -162,6 +162,10 @@ func TestTaskManager_Add(t *testing.T) {
} }
func TestTaskManager_Get(t *testing.T) { func TestTaskManager_Get(t *testing.T) {
mockTask := &types.Task{
TaskID: "bar",
}
tests := []struct { tests := []struct {
name string name string
taskManager *TaskManager taskManager *TaskManager
@ -172,15 +176,13 @@ func TestTaskManager_Get(t *testing.T) {
name: "get existing task", name: "get existing task",
taskManager: &TaskManager{ taskManager: &TaskManager{
lock: new(sync.RWMutex), lock: new(sync.RWMutex),
data: map[string]*types.Task{"foo": &types.Task{ data: map[string]*types.Task{"foo": mockTask},
TaskId: "bar",
}},
}, },
key: "foo", key: "foo",
expect: func(t *testing.T, task *types.Task, found bool) { expect: func(t *testing.T, task *types.Task, found bool) {
assert := assert.New(t) assert := assert.New(t)
assert.Equal(true, found) assert.Equal(true, found)
assert.Equal("bar", task.TaskId) assert.Equal("bar", task.TaskID)
}, },
}, },
{ {
@ -250,6 +252,10 @@ func TestTaskManager_Delete(t *testing.T) {
} }
func TestTaskManager_Touch(t *testing.T) { func TestTaskManager_Touch(t *testing.T) {
mockTask := &types.Task{
TaskID: "bar",
}
tests := []struct { tests := []struct {
name string name string
taskManager *TaskManager taskManager *TaskManager
@ -261,9 +267,7 @@ func TestTaskManager_Touch(t *testing.T) {
name: "touch existing task", name: "touch existing task",
taskManager: &TaskManager{ taskManager: &TaskManager{
lock: new(sync.RWMutex), lock: new(sync.RWMutex),
data: map[string]*types.Task{"foo": &types.Task{ data: map[string]*types.Task{"foo": mockTask},
TaskId: "bar",
}},
}, },
key: "foo", key: "foo",
expect: func(t *testing.T, task *types.Task, found bool) { expect: func(t *testing.T, task *types.Task, found bool) {

View File

@ -183,7 +183,7 @@ func (e *evaluator) selectParentCandidates(peer *types.PeerTask) (list []*types.
if pt == nil { if pt == nil {
return true return true
} else if peer.Task != pt.Task { } else if peer.Task != pt.Task {
msg = append(msg, fmt.Sprintf("%s task[%s] not same", pt.Pid, pt.Task.TaskId)) msg = append(msg, fmt.Sprintf("%s task[%s] not same", pt.Pid, pt.Task.TaskID))
return true return true
} else if pt.IsDown() { } else if pt.IsDown() {
msg = append(msg, fmt.Sprintf("%s is down", pt.Pid)) msg = append(msg, fmt.Sprintf("%s is down", pt.Pid))
@ -213,7 +213,7 @@ func (e *evaluator) selectParentCandidates(peer *types.PeerTask) (list []*types.
return true return true
}) })
if len(list) == 0 { if len(list) == 0 {
logger.Debugf("[%s][%s] scheduler failed: \n%s", peer.Task.TaskId, peer.Pid, strings.Join(msg, "\n")) logger.Debugf("[%s][%s] scheduler failed: \n%s", peer.Task.TaskID, peer.Pid, strings.Join(msg, "\n"))
} }
return return

View File

@ -68,7 +68,7 @@ func (ef *evaluatorFactory) get(task *types.Task) Evaluator {
if ef.abtest { if ef.abtest {
name := "" name := ""
if strings.HasSuffix(task.TaskId, idgen.TwinsBSuffix) { if strings.HasSuffix(task.TaskID, idgen.TwinsBSuffix) {
if ef.bscheduler != "" { if ef.bscheduler != "" {
name = ef.bscheduler name = ef.bscheduler
} }

View File

@ -125,15 +125,14 @@ func (s *Scheduler) ScheduleParent(peer *types.PeerTask) (primary *types.PeerTas
peer.AddParent(primary, 1) peer.AddParent(primary, 1)
s.taskManager.PeerTask.Update(primary) s.taskManager.PeerTask.Update(primary)
s.taskManager.PeerTask.Update(oldParent) s.taskManager.PeerTask.Update(oldParent)
} else {
logger.Debugf("[%s][%s]SchedulerParent scheduler a empty parent", peer.Task.TaskId, peer.Pid)
} }
logger.Debugf("[%s][%s]SchedulerParent scheduler a empty parent", peer.Task.TaskID, peer.Pid)
return return
} }
func (s *Scheduler) ScheduleBadNode(peer *types.PeerTask) (adjustNodes []*types.PeerTask, err error) { func (s *Scheduler) ScheduleBadNode(peer *types.PeerTask) (adjustNodes []*types.PeerTask, err error) {
logger.Debugf("[%s][%s]SchedulerBadNode scheduler node is bad", peer.Task.TaskId, peer.Pid) logger.Debugf("[%s][%s]SchedulerBadNode scheduler node is bad", peer.Task.TaskID, peer.Pid)
parent := peer.GetParent() parent := peer.GetParent()
if parent != nil && parent.DstPeerTask != nil { if parent != nil && parent.DstPeerTask != nil {
pNode := parent.DstPeerTask pNode := parent.DstPeerTask
@ -155,7 +154,7 @@ func (s *Scheduler) ScheduleBadNode(peer *types.PeerTask) (adjustNodes []*types.
if node.GetParent() != nil { if node.GetParent() != nil {
parentID = node.GetParent().DstPeerTask.Pid parentID = node.GetParent().DstPeerTask.Pid
} }
logger.Debugf("[%s][%s]SchedulerBadNode [%s] scheduler a new parent [%s]", peer.Task.TaskId, peer.Pid, logger.Debugf("[%s][%s]SchedulerBadNode [%s] scheduler a new parent [%s]", peer.Task.TaskID, peer.Pid,
node.Pid, parentID) node.Pid, parentID)
} }

View File

@ -28,13 +28,13 @@ import (
"d7y.io/dragonfly/v2/pkg/rpc/scheduler" "d7y.io/dragonfly/v2/pkg/rpc/scheduler"
"d7y.io/dragonfly/v2/scheduler/config" "d7y.io/dragonfly/v2/scheduler/config"
"d7y.io/dragonfly/v2/scheduler/service" "d7y.io/dragonfly/v2/scheduler/service"
"d7y.io/dragonfly/v2/scheduler/service/schedule_worker" "d7y.io/dragonfly/v2/scheduler/service/worker"
"d7y.io/dragonfly/v2/scheduler/types" "d7y.io/dragonfly/v2/scheduler/types"
) )
type SchedulerServer struct { type SchedulerServer struct {
service *service.SchedulerService service *service.SchedulerService
worker schedule_worker.IWorker worker worker.IWorker
config config.SchedulerConfig config config.SchedulerConfig
} }
@ -50,8 +50,8 @@ func WithSchedulerService(service *service.SchedulerService) Option {
} }
} }
// WithWorker sets the schedule_worker.IWorker // WithWorker sets the worker.IWorker
func WithWorker(worker schedule_worker.IWorker) Option { func WithWorker(worker worker.IWorker) Option {
return func(p *SchedulerServer) *SchedulerServer { return func(p *SchedulerServer) *SchedulerServer {
p.worker = worker p.worker = worker
@ -101,11 +101,11 @@ func (s *SchedulerServer) RegisterPeerTask(ctx context.Context, request *schedul
task, ok := s.service.GetTask(pkg.TaskId) task, ok := s.service.GetTask(pkg.TaskId)
if !ok { if !ok {
task, err = s.service.AddTask(&types.Task{ task, err = s.service.AddTask(&types.Task{
TaskId: pkg.TaskId, TaskID: pkg.TaskId,
Url: request.Url, URL: request.Url,
Filter: request.Filter, Filter: request.Filter,
BizId: request.BizId, BizID: request.BizId,
UrlMata: request.UrlMata, URLMata: request.UrlMata,
}) })
if err != nil { if err != nil {
dferror, _ := err.(*dferrors.DfError) dferror, _ := err.(*dferrors.DfError)
@ -122,7 +122,7 @@ func (s *SchedulerServer) RegisterPeerTask(ctx context.Context, request *schedul
return return
} }
pkg.TaskId = task.TaskId pkg.TaskId = task.TaskID
pkg.SizeScope = task.SizeScope pkg.SizeScope = task.SizeScope
// case base.SizeScope_TINY // case base.SizeScope_TINY
@ -134,10 +134,22 @@ func (s *SchedulerServer) RegisterPeerTask(ctx context.Context, request *schedul
// get or create host // get or create host
hostID := request.PeerHost.Uuid hostID := request.PeerHost.Uuid
host, _ := s.service.GetHost(hostID) host, _ := s.service.GetHost(hostID)
peerHost := request.PeerHost
if host == nil { if host == nil {
host = &types.Host{ host = &types.Host{
Type: types.HostTypePeer, Type: types.HostTypePeer,
PeerHost: *request.PeerHost, PeerHost: scheduler.PeerHost{
Uuid: peerHost.Uuid,
Ip: peerHost.Ip,
RpcPort: peerHost.RpcPort,
DownPort: peerHost.DownPort,
HostName: peerHost.HostName,
SecurityDomain: peerHost.SecurityDomain,
Location: peerHost.Location,
Idc: peerHost.Idc,
NetTopology: peerHost.NetTopology,
},
} }
if isCdn { if isCdn {
host.Type = types.HostTypeCdn host.Type = types.HostTypeCdn
@ -212,7 +224,7 @@ func (s *SchedulerServer) ReportPieceResult(stream scheduler.Scheduler_ReportPie
} }
return return
}() }()
err = schedule_worker.NewClient(stream, s.worker, s.service).Serve() err = worker.NewClient(stream, s.worker, s.service).Serve()
return return
} }
@ -233,7 +245,7 @@ func (s *SchedulerServer) ReportPeerResult(ctx context.Context, result *schedule
return return
}() }()
logger.Infof("[%s][%s]: receive a peer result [%+v]", result.TaskId, result.PeerId, *result) logger.Infof("[%s][%s]: receive a peer result [%+v]", result.TaskId, result.PeerId, result)
pid := result.PeerId pid := result.PeerId
peerTask, err := s.service.GetPeerTask(pid) peerTask, err := s.service.GetPeerTask(pid)

View File

@ -24,16 +24,18 @@ import (
"d7y.io/dragonfly/v2/pkg/rpc" "d7y.io/dragonfly/v2/pkg/rpc"
"d7y.io/dragonfly/v2/pkg/rpc/manager" "d7y.io/dragonfly/v2/pkg/rpc/manager"
"d7y.io/dragonfly/v2/pkg/rpc/manager/client" "d7y.io/dragonfly/v2/pkg/rpc/manager/client"
// Server registered to grpc
_ "d7y.io/dragonfly/v2/pkg/rpc/scheduler/server" _ "d7y.io/dragonfly/v2/pkg/rpc/scheduler/server"
"d7y.io/dragonfly/v2/pkg/util/net/iputils" "d7y.io/dragonfly/v2/pkg/util/net/iputils"
"d7y.io/dragonfly/v2/scheduler/config" "d7y.io/dragonfly/v2/scheduler/config"
"d7y.io/dragonfly/v2/scheduler/service" "d7y.io/dragonfly/v2/scheduler/service"
"d7y.io/dragonfly/v2/scheduler/service/schedule_worker" "d7y.io/dragonfly/v2/scheduler/service/worker"
) )
type Server struct { type Server struct {
service *service.SchedulerService service *service.SchedulerService
worker schedule_worker.IWorker worker worker.IWorker
server *SchedulerServer server *SchedulerServer
config config.ServerConfig config config.ServerConfig
managerClient client.ManagerClient managerClient client.ManagerClient
@ -90,7 +92,7 @@ func New(cfg *config.Config) (*Server, error) {
return nil, err return nil, err
} }
s.worker = schedule_worker.NewWorkerGroup(cfg, s.service) s.worker = worker.NewGroup(cfg, s.service)
s.server = NewSchedulerServer(cfg, WithSchedulerService(s.service), s.server = NewSchedulerServer(cfg, WithSchedulerService(s.service),
WithWorker(s.worker)) WithWorker(s.worker))

View File

@ -64,13 +64,13 @@ func (s *SchedulerService) GetTask(taskID string) (*types.Task, bool) {
func (s *SchedulerService) AddTask(task *types.Task) (*types.Task, error) { func (s *SchedulerService) AddTask(task *types.Task) (*types.Task, error) {
// Task already exists // Task already exists
if ret, ok := s.TaskManager.Get(task.TaskId); ok { if ret, ok := s.TaskManager.Get(task.TaskID); ok {
s.TaskManager.PeerTask.AddTask(ret) s.TaskManager.PeerTask.AddTask(ret)
return ret, nil return ret, nil
} }
// Task does not exist // Task does not exist
ret := s.TaskManager.Set(task.TaskId, task) ret := s.TaskManager.Set(task.TaskID, task)
if err := s.CDNManager.TriggerTask(ret, s.TaskManager.PeerTask.CDNCallback); err != nil { if err := s.CDNManager.TriggerTask(ret, s.TaskManager.PeerTask.CDNCallback); err != nil {
return nil, err return nil, err
} }

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package schedule_worker package worker
import ( import (
"io" "io"

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package schedule_worker package worker
import ( import (
"fmt" "fmt"
@ -107,14 +107,14 @@ func (s *Sender) doSend() {
err = peerTask.Send() err = peerTask.Send()
}() }()
if err != nil && err.Error() == "empty client" { if err != nil && err.Error() == "empty client" {
logger.Warnf("[%s][%s]: client is empty : %v", peerTask.Task.TaskId, peerTask.Pid, err.Error()) logger.Warnf("[%s][%s]: client is empty : %v", peerTask.Task.TaskID, peerTask.Pid, err.Error())
break break
} else if err != nil { } else if err != nil {
//TODO error //TODO error
logger.Warnf("[%s][%s]: send result failed : %v", peerTask.Task.TaskId, peerTask.Pid, err.Error()) logger.Warnf("[%s][%s]: send result failed : %v", peerTask.Task.TaskID, peerTask.Pid, err.Error())
break break
} else { } else {
logger.Debugf("[%s][%s]: send result success", peerTask.Task.TaskId, peerTask.Pid) logger.Debugf("[%s][%s]: send result success", peerTask.Task.TaskID, peerTask.Pid)
} }
if peerTask.Success { if peerTask.Success {
break break

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package schedule_worker package worker
import ( import (
"fmt" "fmt"
@ -187,13 +187,13 @@ func (w *Worker) doSchedule(peerTask *types.PeerTask) {
startTm := time.Now() startTm := time.Now()
status := peerTask.GetNodeStatus() status := peerTask.GetNodeStatus()
logger.Debugf("[%s][%s]: begin do schedule [%d]", peerTask.Task.TaskId, peerTask.Pid, status) logger.Debugf("[%s][%s]: begin do schedule [%d]", peerTask.Task.TaskID, peerTask.Pid, status)
defer func() { defer func() {
err := recover() err := recover()
if err != nil { if err != nil {
logger.Errorf("[%s][%s]: do schedule panic: %v", peerTask.Task.TaskId, peerTask.Pid, err) logger.Errorf("[%s][%s]: do schedule panic: %v", peerTask.Task.TaskID, peerTask.Pid, err)
} }
logger.Infof("[%s][%s]: end do schedule [%d] cost: %d", peerTask.Task.TaskId, peerTask.Pid, status, time.Now().Sub(startTm).Nanoseconds()) logger.Infof("[%s][%s]: end do schedule [%d] cost: %d", peerTask.Task.TaskID, peerTask.Pid, status, time.Now().Sub(startTm).Nanoseconds())
}() }()
switch status { switch status {
@ -209,7 +209,7 @@ func (w *Worker) doSchedule(peerTask *types.PeerTask) {
case types.PeerTaskStatusNeedParent: case types.PeerTaskStatusNeedParent:
parent, _, err := w.schedulerService.Scheduler.ScheduleParent(peerTask) parent, _, err := w.schedulerService.Scheduler.ScheduleParent(peerTask)
if err != nil { if err != nil {
logger.Debugf("[%s][%s]: schedule parent failed: %v", peerTask.Task.TaskId, peerTask.Pid, err) logger.Debugf("[%s][%s]: schedule parent failed: %v", peerTask.Task.TaskID, peerTask.Pid, err)
} }
// retry scheduler parent later when this is no parent // retry scheduler parent later when this is no parent
if parent == nil || err != nil { if parent == nil || err != nil {
@ -222,7 +222,7 @@ func (w *Worker) doSchedule(peerTask *types.PeerTask) {
case types.PeerTaskStatusNeedChildren: case types.PeerTaskStatusNeedChildren:
children, err := w.schedulerService.Scheduler.ScheduleChildren(peerTask) children, err := w.schedulerService.Scheduler.ScheduleChildren(peerTask)
if err != nil { if err != nil {
logger.Debugf("[%s][%s]: schedule children failed: %v", peerTask.Task.TaskId, peerTask.Pid, err) logger.Debugf("[%s][%s]: schedule children failed: %v", peerTask.Task.TaskID, peerTask.Pid, err)
return return
} }
for i := range children { for i := range children {
@ -238,7 +238,7 @@ func (w *Worker) doSchedule(peerTask *types.PeerTask) {
case types.PeerTaskStatusBadNode: case types.PeerTaskStatusBadNode:
adjustNodes, err := w.schedulerService.Scheduler.ScheduleBadNode(peerTask) adjustNodes, err := w.schedulerService.Scheduler.ScheduleBadNode(peerTask)
if err != nil { if err != nil {
logger.Debugf("[%s][%s]: schedule bad node failed: %v", peerTask.Task.TaskId, peerTask.Pid, err) logger.Debugf("[%s][%s]: schedule bad node failed: %v", peerTask.Task.TaskID, peerTask.Pid, err)
w.sendJobLater(peerTask) w.sendJobLater(peerTask)
return return
} }
@ -258,7 +258,7 @@ func (w *Worker) doSchedule(peerTask *types.PeerTask) {
case types.PeerTaskStatusNeedAdjustNode: case types.PeerTaskStatusNeedAdjustNode:
_, _, err := w.schedulerService.Scheduler.ScheduleAdjustParentNode(peerTask) _, _, err := w.schedulerService.Scheduler.ScheduleAdjustParentNode(peerTask)
if err != nil { if err != nil {
logger.Debugf("[%s][%s]: schedule adjust node failed: %v", peerTask.Task.TaskId, peerTask.Pid, err) logger.Debugf("[%s][%s]: schedule adjust node failed: %v", peerTask.Task.TaskID, peerTask.Pid, err)
w.sendJobLater(peerTask) w.sendJobLater(peerTask)
return return
} }
@ -269,7 +269,7 @@ func (w *Worker) doSchedule(peerTask *types.PeerTask) {
if w.schedulerService.Scheduler.IsNodeBad(peerTask) && peerTask.GetSubTreeNodesNum() > 1 { if w.schedulerService.Scheduler.IsNodeBad(peerTask) && peerTask.GetSubTreeNodesNum() > 1 {
adjustNodes, err := w.schedulerService.Scheduler.ScheduleBadNode(peerTask) adjustNodes, err := w.schedulerService.Scheduler.ScheduleBadNode(peerTask)
if err != nil { if err != nil {
logger.Debugf("[%s][%s]: schedule bad node failed: %v", peerTask.Task.TaskId, peerTask.Pid, err) logger.Debugf("[%s][%s]: schedule bad node failed: %v", peerTask.Task.TaskID, peerTask.Pid, err)
peerTask.SetNodeStatus(types.PeerTaskStatusBadNode) peerTask.SetNodeStatus(types.PeerTaskStatusBadNode)
w.sendJobLater(peerTask) w.sendJobLater(peerTask)
return return
@ -286,7 +286,7 @@ func (w *Worker) doSchedule(peerTask *types.PeerTask) {
} else if w.schedulerService.Scheduler.NeedAdjustParent(peerTask) { } else if w.schedulerService.Scheduler.NeedAdjustParent(peerTask) {
_, _, err := w.schedulerService.Scheduler.ScheduleAdjustParentNode(peerTask) _, _, err := w.schedulerService.Scheduler.ScheduleAdjustParentNode(peerTask)
if err != nil { if err != nil {
logger.Debugf("[%s][%s]: schedule adjust node failed: %v", peerTask.Task.TaskId, peerTask.Pid, err) logger.Debugf("[%s][%s]: schedule adjust node failed: %v", peerTask.Task.TaskID, peerTask.Pid, err)
return return
} }
w.sendScheduleResult(peerTask) w.sendScheduleResult(peerTask)
@ -296,7 +296,7 @@ func (w *Worker) doSchedule(peerTask *types.PeerTask) {
case types.PeerTaskStatusDone: case types.PeerTaskStatusDone:
parent, err := w.schedulerService.Scheduler.ScheduleDone(peerTask) parent, err := w.schedulerService.Scheduler.ScheduleDone(peerTask)
if err != nil { if err != nil {
logger.Debugf("[%s][%s]: schedule adjust node failed: %v", peerTask.Task.TaskId, peerTask.Pid, err) logger.Debugf("[%s][%s]: schedule adjust node failed: %v", peerTask.Task.TaskID, peerTask.Pid, err)
w.sendJobLater(peerTask) w.sendJobLater(peerTask)
return return
} }
@ -308,12 +308,12 @@ func (w *Worker) doSchedule(peerTask *types.PeerTask) {
case types.PeerTaskStatusLeaveNode, types.PeerTaskStatusNodeGone: case types.PeerTaskStatusLeaveNode, types.PeerTaskStatusNodeGone:
adjustNodes, err := w.schedulerService.Scheduler.ScheduleLeaveNode(peerTask) adjustNodes, err := w.schedulerService.Scheduler.ScheduleLeaveNode(peerTask)
if err != nil { if err != nil {
logger.Debugf("[%s][%s]: schedule adjust node failed: %v", peerTask.Task.TaskId, peerTask.Pid, err) logger.Debugf("[%s][%s]: schedule adjust node failed: %v", peerTask.Task.TaskID, peerTask.Pid, err)
w.sendJobLater(peerTask) w.sendJobLater(peerTask)
return return
} }
w.schedulerService.TaskManager.PeerTask.Delete(peerTask.Pid) w.schedulerService.TaskManager.PeerTask.Delete(peerTask.Pid)
logger.Debugf("[%s][%s]: PeerTaskStatusLeaveNode", peerTask.Task.TaskId, peerTask.Pid) logger.Debugf("[%s][%s]: PeerTaskStatusLeaveNode", peerTask.Task.TaskID, peerTask.Pid)
for i := range adjustNodes { for i := range adjustNodes {
if adjustNodes[i].GetParent() != nil { if adjustNodes[i].GetParent() != nil {
w.sendScheduleResult(adjustNodes[i]) w.sendScheduleResult(adjustNodes[i])
@ -339,7 +339,7 @@ func (w *Worker) sendScheduleResult(peerTask *types.PeerTask) {
if peerTask.GetParent() != nil && peerTask.GetParent().DstPeerTask != nil { if peerTask.GetParent() != nil && peerTask.GetParent().DstPeerTask != nil {
parent = peerTask.GetParent().DstPeerTask.Pid parent = peerTask.GetParent().DstPeerTask.Pid
} }
logger.Infof("[%s][%s]: sendScheduleResult parent[%s] active time[%d] deep [%d]", peerTask.Task.TaskId, peerTask.Pid, parent, time.Now().UnixNano()-peerTask.GetStartTime(), peerTask.GetDeep()) logger.Infof("[%s][%s]: sendScheduleResult parent[%s] active time[%d] deep [%d]", peerTask.Task.TaskID, peerTask.Pid, parent, time.Now().UnixNano()-peerTask.GetStartTime(), peerTask.GetDeep())
w.sender.Send(peerTask) w.sender.Send(peerTask)
return return
} }

View File

@ -14,7 +14,7 @@
* limitations under the License. * limitations under the License.
*/ */
package schedule_worker package worker
import ( import (
"hash/crc32" "hash/crc32"
@ -34,7 +34,7 @@ type IWorker interface {
ReceiveUpdatePieceResult(pr *scheduler2.PieceResult) ReceiveUpdatePieceResult(pr *scheduler2.PieceResult)
} }
type WorkerGroup struct { type Group struct {
workerNum int workerNum int
chanSize int chanSize int
workerList []*Worker workerList []*Worker
@ -46,8 +46,8 @@ type WorkerGroup struct {
schedulerService *service.SchedulerService schedulerService *service.SchedulerService
} }
func NewWorkerGroup(cfg *config.Config, schedulerService *service.SchedulerService) *WorkerGroup { func NewGroup(cfg *config.Config, schedulerService *service.SchedulerService) *Group {
return &WorkerGroup{ return &Group{
workerNum: cfg.Worker.WorkerNum, workerNum: cfg.Worker.WorkerNum,
chanSize: cfg.Worker.WorkerJobPoolSize, chanSize: cfg.Worker.WorkerJobPoolSize,
sender: NewSender(cfg.Worker, schedulerService), sender: NewSender(cfg.Worker, schedulerService),
@ -56,7 +56,7 @@ func NewWorkerGroup(cfg *config.Config, schedulerService *service.SchedulerServi
} }
} }
func (wg *WorkerGroup) Serve() { func (wg *Group) Serve() {
wg.stopCh = make(chan struct{}) wg.stopCh = make(chan struct{})
wg.schedulerService.TaskManager.PeerTask.SetDownloadingMonitorCallBack(func(pt *types.PeerTask) { wg.schedulerService.TaskManager.PeerTask.SetDownloadingMonitorCallBack(func(pt *types.PeerTask) {
@ -85,22 +85,22 @@ func (wg *WorkerGroup) Serve() {
logger.Infof("start scheduler worker number:%d", wg.workerNum) logger.Infof("start scheduler worker number:%d", wg.workerNum)
} }
func (wg *WorkerGroup) Stop() { func (wg *Group) Stop() {
close(wg.stopCh) close(wg.stopCh)
wg.sender.Stop() wg.sender.Stop()
wg.triggerLoadQueue.ShutDown() wg.triggerLoadQueue.ShutDown()
logger.Infof("stop scheduler worker") logger.Infof("stop scheduler worker")
} }
func (wg *WorkerGroup) ReceiveJob(job *types.PeerTask) { func (wg *Group) ReceiveJob(job *types.PeerTask) {
if job == nil { if job == nil {
return return
} }
choiceWorkerID := crc32.ChecksumIEEE([]byte(job.Task.TaskId)) % uint32(wg.workerNum) choiceWorkerID := crc32.ChecksumIEEE([]byte(job.Task.TaskID)) % uint32(wg.workerNum)
wg.workerList[choiceWorkerID].ReceiveJob(job) wg.workerList[choiceWorkerID].ReceiveJob(job)
} }
func (wg *WorkerGroup) ReceiveUpdatePieceResult(pr *scheduler2.PieceResult) { func (wg *Group) ReceiveUpdatePieceResult(pr *scheduler2.PieceResult) {
if pr == nil { if pr == nil {
return return
} }

View File

@ -45,11 +45,10 @@ type Host struct {
ServiceDownTime int64 ServiceDownTime int64
} }
func CopyHost(h *Host) *Host { func Init(h *Host) *Host {
copyHost := *h h.loadLock = &sync.Mutex{}
copyHost.peerTaskMap = new(sync.Map) h.peerTaskMap = &sync.Map{}
copyHost.loadLock = new(sync.Mutex) return h
return &copyHost
} }
func (h *Host) AddPeerTask(peerTask *PeerTask) { func (h *Host) AddPeerTask(peerTask *PeerTask) {

View File

@ -299,7 +299,7 @@ func (pt *PeerTask) GetSendPkg() (pkg *scheduler.PeerPacket) {
// if pt != nil && pt.client != nil { // if pt != nil && pt.client != nil {
pkg = &scheduler.PeerPacket{ pkg = &scheduler.PeerPacket{
Code: dfcodes.Success, Code: dfcodes.Success,
TaskId: pt.Task.TaskId, TaskId: pt.Task.TaskID,
// source peer id // source peer id
SrcPid: pt.Pid, SrcPid: pt.Pid,
// concurrent downloading count from main peer // concurrent downloading count from main peer
@ -308,10 +308,9 @@ func (pt *PeerTask) GetSendPkg() (pkg *scheduler.PeerPacket) {
defer pt.lock.Unlock() defer pt.lock.Unlock()
if pt.parent != nil && pt.parent.DstPeerTask != nil && pt.parent.DstPeerTask.Host != nil { if pt.parent != nil && pt.parent.DstPeerTask != nil && pt.parent.DstPeerTask.Host != nil {
pkg.ParallelCount = int32(pt.parent.Concurrency) pkg.ParallelCount = int32(pt.parent.Concurrency)
peerHost := pt.parent.DstPeerTask.Host.PeerHost
pkg.MainPeer = &scheduler.PeerPacket_DestPeer{ pkg.MainPeer = &scheduler.PeerPacket_DestPeer{
Ip: peerHost.Ip, Ip: pt.parent.DstPeerTask.Host.PeerHost.Ip,
RpcPort: peerHost.RpcPort, RpcPort: pt.parent.DstPeerTask.Host.PeerHost.RpcPort,
PeerId: pt.parent.DstPeerTask.Pid, PeerId: pt.parent.DstPeerTask.Pid,
} }
} }

View File

@ -27,14 +27,14 @@ import (
) )
type Task struct { type Task struct {
TaskId string `json:"task_id,omitempty"` TaskID string `json:"task_id,omitempty"`
Url string `json:"url,omitempty"` URL string `json:"url,omitempty"`
// regex format, used for task id generator, assimilating different urls // regex format, used for task id generator, assimilating different urls
Filter string `json:"filter,omitempty"` Filter string `json:"filter,omitempty"`
// biz_id and md5 are used for task id generator to distinguish the same urls // biz_id and md5 are used for task id generator to distinguish the same urls
// md5 is also used to check consistency about file content // md5 is also used to check consistency about file content
BizId string `json:"biz_id,omitempty"` // caller's biz id that can be any string BizID string `json:"biz_id,omitempty"` // caller's biz id that can be any string
UrlMata *base.UrlMeta `json:"url_mata,omitempty"` // downloaded file content md5 URLMata *base.UrlMeta `json:"url_mata,omitempty"` // downloaded file content md5
SizeScope base.SizeScope SizeScope base.SizeScope
DirectPiece *scheduler.RegisterResult_PieceContent DirectPiece *scheduler.RegisterResult_PieceContent