chore: add lint errcheck and fix errcheck(#766)

* feat: add lint errcheck and fix errcheck

Signed-off-by: Gaius <gaius.qi@gmail.com>

* replace assert to require

Signed-off-by: 孙伟鹏 <weipeng.swp@alibaba-inc.com>

Co-authored-by: 孙伟鹏 <weipeng.swp@alibaba-inc.com>
This commit is contained in:
Gaius 2021-11-15 15:41:03 +08:00
parent f310425962
commit e837ae9bdf
No known key found for this signature in database
GPG Key ID: 8B4E5D1290FA2FFB
50 changed files with 416 additions and 212 deletions

View File

@ -21,6 +21,7 @@ linters:
- deadcode - deadcode
- gocyclo - gocyclo
- staticcheck - staticcheck
- errcheck
output: output:
format: colored-line-number format: colored-line-number

View File

@ -51,16 +51,19 @@ func (s *PluginsTestSuite) TestPluginBuilder() {
manager := NewManager() manager := NewManager()
var testFunc = func(pt PluginType, name string, b Builder, result bool) { var testFunc = func(pt PluginType, name string, b Builder, result bool) {
manager.AddBuilder(pt, name, b) err := manager.AddBuilder(pt, name, b)
obj, _ := manager.GetBuilder(pt, name) if !result {
if result { s.Require().NotNil(err)
s.NotNil(obj) }
obj, ok := manager.GetBuilder(pt, name)
if ok {
s.Require().NotNil(obj)
objVal := reflect.ValueOf(obj) objVal := reflect.ValueOf(obj)
bVal := reflect.ValueOf(b) bVal := reflect.ValueOf(b)
s.Equal(objVal.Pointer(), bVal.Pointer()) s.Require().Equal(objVal.Pointer(), bVal.Pointer())
manager.DeleteBuilder(pt, name) manager.DeleteBuilder(pt, name)
} else { } else {
s.Nil(obj) s.Require().Nil(obj)
} }
} }
@ -76,14 +79,17 @@ func (s *PluginsTestSuite) TestManagerPlugin() {
manager := NewManager() manager := NewManager()
var testFunc = func(p Plugin, result bool) { var testFunc = func(p Plugin, result bool) {
manager.AddPlugin(p) err := manager.AddPlugin(p)
obj, _ := manager.GetPlugin(p.Type(), p.Name()) if !result {
if result { s.Require().NotNil(err)
s.NotNil(obj) }
s.Equal(obj, p) obj, ok := manager.GetPlugin(p.Type(), p.Name())
if ok {
s.Require().NotNil(obj)
s.Require().Equal(obj, p)
manager.DeletePlugin(p.Type(), p.Name()) manager.DeletePlugin(p.Type(), p.Name())
} else { } else {
s.Nil(obj) s.Require().Nil(obj)
} }
} }
@ -128,16 +134,19 @@ func (s *PluginsTestSuite) TestRepositoryIml() {
repo := NewRepository() repo := NewRepository()
for _, v := range cases { for _, v := range cases {
repo.Add(v.pt, v.name, v.data) err := repo.Add(v.pt, v.name, v.data)
data, _ := repo.Get(v.pt, v.name) if !v.addResult {
if v.addResult { s.Require().NotNil(err)
s.NotNil(data) }
s.Equal(data, v.data) data, ok := repo.Get(v.pt, v.name)
if ok {
s.Require().NotNil(data)
s.Require().Equal(data, v.data)
repo.Delete(v.pt, v.name) repo.Delete(v.pt, v.name)
data, _ = repo.Get(v.pt, v.name) data, _ = repo.Get(v.pt, v.name)
s.Nil(data) s.Require().Nil(data)
} else { } else {
s.Nil(data) s.Require().Nil(data)
} }
} }
} }
@ -159,7 +168,7 @@ func (s *PluginsTestSuite) TestValidate() {
) )
} }
for _, v := range cases { for _, v := range cases {
s.Equal(validate(v.pt, v.name), v.expected) s.Require().Equal(validate(v.pt, v.name), v.expected)
} }
} }

View File

@ -43,8 +43,13 @@ const (
var fileLocker = synclock.NewLockerPool() var fileLocker = synclock.NewLockerPool()
func init() { func init() {
storedriver.Register(DiskDriverName, NewStorageDriver) if err := storedriver.Register(DiskDriverName, NewStorageDriver); err != nil {
storedriver.Register(MemoryDriverName, NewStorageDriver) logger.CoreLogger.Error(err)
}
if err := storedriver.Register(MemoryDriverName, NewStorageDriver); err != nil {
logger.CoreLogger.Error(err)
}
} }
// driver is one of the implementations of storage Driver using local file system. // driver is one of the implementations of storage Driver using local file system.

View File

@ -50,7 +50,9 @@ var (
) )
func init() { func init() {
storage.Register(StorageMode, newStorageManager) if err := storage.Register(StorageMode, newStorageManager); err != nil {
logger.CoreLogger.Error(err)
}
} }
func newStorageManager(cfg *storage.Config) (storage.Manager, error) { func newStorageManager(cfg *storage.Config) (storage.Manager, error) {
@ -265,13 +267,20 @@ func (s *diskStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
return nil return nil
}, },
} }
s.diskDriver.Walk(r) if err := s.diskDriver.Walk(r); err != nil {
return false, err
}
enoughSpace := freeSpace.ToNumber()-remainder.Load() > fileLength enoughSpace := freeSpace.ToNumber()-remainder.Load() > fileLength
if !enoughSpace { if !enoughSpace {
s.cleaner.GC("disk", true) if _, err := s.cleaner.GC("disk", true); err != nil {
return false, err
}
remainder.Store(0) remainder.Store(0)
s.diskDriver.Walk(r) if err := s.diskDriver.Walk(r); err != nil {
return false, err
}
freeSpace, err = s.diskDriver.GetFreeSpace() freeSpace, err = s.diskDriver.GetFreeSpace()
if err != nil { if err != nil {
return false, err return false, err

View File

@ -50,7 +50,9 @@ var _ storage.Manager = (*hybridStorageMgr)(nil)
var _ gc.Executor = (*hybridStorageMgr)(nil) var _ gc.Executor = (*hybridStorageMgr)(nil)
func init() { func init() {
storage.Register(StorageMode, newStorageManager) if err := storage.Register(StorageMode, newStorageManager); err != nil {
logger.CoreLogger.Error(err)
}
} }
// NewStorageManager performs initialization for storage manager and return a storage Manager. // NewStorageManager performs initialization for storage manager and return a storage Manager.
@ -325,13 +327,20 @@ func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
return nil return nil
}, },
} }
h.diskDriver.Walk(r) if err := h.diskDriver.Walk(r); err != nil {
return false, err
}
enoughSpace := diskFreeSpace.ToNumber()-remainder.Load() > fileLength enoughSpace := diskFreeSpace.ToNumber()-remainder.Load() > fileLength
if !enoughSpace { if !enoughSpace {
h.diskDriverCleaner.GC("hybrid", true) if _, err := h.diskDriverCleaner.GC("hybrid", true); err != nil {
return false, err
}
remainder.Store(0) remainder.Store(0)
h.diskDriver.Walk(r) if err := h.diskDriver.Walk(r); err != nil {
return false, err
}
diskFreeSpace, err = h.diskDriver.GetFreeSpace() diskFreeSpace, err = h.diskDriver.GetFreeSpace()
if err != nil { if err != nil {
return false, err return false, err
@ -397,7 +406,7 @@ func (h *hybridStorageMgr) deleteTaskFiles(taskID string, deleteUploadPath bool,
func (h *hybridStorageMgr) tryShmSpace(url, taskID string, fileLength int64) (string, error) { func (h *hybridStorageMgr) tryShmSpace(url, taskID string, fileLength int64) (string, error) {
if h.shmSwitch.check(url, fileLength) && h.hasShm { if h.shmSwitch.check(url, fileLength) && h.hasShm {
remainder := atomic.NewInt64(0) remainder := atomic.NewInt64(0)
h.memoryDriver.Walk(&storedriver.Raw{ if err := h.memoryDriver.Walk(&storedriver.Raw{
WalkFn: func(filePath string, info os.FileInfo, err error) error { WalkFn: func(filePath string, info os.FileInfo, err error) error {
if fileutils.IsRegular(filePath) { if fileutils.IsRegular(filePath) {
taskID := strings.Split(path.Base(filePath), ".")[0] taskID := strings.Split(path.Base(filePath), ".")[0]
@ -416,12 +425,18 @@ func (h *hybridStorageMgr) tryShmSpace(url, taskID string, fileLength int64) (st
} }
return nil return nil
}, },
}) }); err != nil {
return "", err
}
canUseShm := h.getMemoryUsableSpace()-unit.Bytes(remainder.Load())-secureLevel >= unit.Bytes( canUseShm := h.getMemoryUsableSpace()-unit.Bytes(remainder.Load())-secureLevel >= unit.Bytes(
fileLength) fileLength)
if !canUseShm { if !canUseShm {
// 如果剩余空间过小则强制执行一次fullgc后在检查是否满足 // 如果剩余空间过小则强制执行一次fullgc后在检查是否满足
h.memoryDriverCleaner.GC("hybrid", true) if _, err := h.memoryDriverCleaner.GC("hybrid", true); err != nil {
return "", err
}
canUseShm = h.getMemoryUsableSpace()-unit.Bytes(remainder.Load())-secureLevel >= unit.Bytes( canUseShm = h.getMemoryUsableSpace()-unit.Bytes(remainder.Load())-secureLevel >= unit.Bytes(
fileLength) fileLength)
} }

View File

@ -190,7 +190,9 @@ func (tm Manager) Delete(taskID string) error {
tm.accessTimeMap.Delete(taskID) tm.accessTimeMap.Delete(taskID)
tm.taskURLUnReachableStore.Delete(taskID) tm.taskURLUnReachableStore.Delete(taskID)
tm.taskStore.Delete(taskID) tm.taskStore.Delete(taskID)
tm.progressMgr.Clear(taskID) if err := tm.progressMgr.Clear(taskID); err != nil {
return err
}
return nil return nil
} }
@ -227,7 +229,10 @@ func (tm *Manager) GC() error {
} }
// gc task memory data // gc task memory data
logger.GcLogger.With("type", "meta").Infof("gc task: start to deal with task: %s", taskID) logger.GcLogger.With("type", "meta").Infof("gc task: start to deal with task: %s", taskID)
tm.Delete(taskID) if err := tm.Delete(taskID); err != nil {
logger.GcLogger.With("type", "meta").Infof("gc task: failed to delete task: %s", taskID)
continue
}
removedTaskCount++ removedTaskCount++
} }

View File

@ -93,7 +93,9 @@ func (tm *Manager) addOrUpdateTask(ctx context.Context, request *types.TaskRegis
if err != nil { if err != nil {
task.Log().Errorf("failed to get url (%s) content length: %v", task.URL, err) task.Log().Errorf("failed to get url (%s) content length: %v", task.URL, err)
if cdnerrors.IsURLNotReachable(err) { if cdnerrors.IsURLNotReachable(err) {
tm.taskURLUnReachableStore.Add(taskID, time.Now()) if err := tm.taskURLUnReachableStore.Add(taskID, time.Now()); err != nil {
task.Log().Errorf("failed to add url (%s) to unreachable store: %v", task.URL, err)
}
return nil, err return nil, err
} }
} }
@ -119,9 +121,11 @@ func (tm *Manager) addOrUpdateTask(ctx context.Context, request *types.TaskRegis
pieceSize := cdnutil.ComputePieceSize(task.SourceFileLength) pieceSize := cdnutil.ComputePieceSize(task.SourceFileLength)
task.PieceSize = pieceSize task.PieceSize = pieceSize
} }
tm.taskStore.Add(task.TaskID, task) if err := tm.taskStore.Add(task.TaskID, task); err != nil {
logger.Debugf("success add task: %+v into taskStore", task) return nil, err
}
logger.Debugf("success add task: %+v into taskStore", task)
return task, nil return task, nil
} }

View File

@ -441,10 +441,14 @@ func (cd *clientDaemon) Stop() {
close(cd.done) close(cd.done)
cd.GCManager.Stop() cd.GCManager.Stop()
cd.RPCManager.Stop() cd.RPCManager.Stop()
cd.UploadManager.Stop() if err := cd.UploadManager.Stop(); err != nil {
logger.Errorf("upload manager stop failed %s", err)
}
if cd.ProxyManager.IsEnabled() { if cd.ProxyManager.IsEnabled() {
cd.ProxyManager.Stop() if err := cd.ProxyManager.Stop(); err != nil {
logger.Errorf("proxy manager stop failed %s", err)
}
} }
if !cd.Option.KeepStorage { if !cd.Option.KeepStorage {

View File

@ -415,9 +415,13 @@ loop:
if pt.failedCode == failedCodeNotSet { if pt.failedCode == failedCodeNotSet {
pt.failedReason = reasonContextCanceled pt.failedReason = reasonContextCanceled
pt.failedCode = dfcodes.ClientContextCanceled pt.failedCode = dfcodes.ClientContextCanceled
pt.callback.Fail(pt, pt.failedCode, pt.ctx.Err().Error()) if err := pt.callback.Fail(pt, pt.failedCode, pt.ctx.Err().Error()); err != nil {
pt.Errorf("peer task callback failed %s", err)
}
} else { } else {
pt.callback.Fail(pt, pt.failedCode, pt.failedReason) if err := pt.callback.Fail(pt, pt.failedCode, pt.failedReason); err != nil {
pt.Errorf("peer task callback failed %s", err)
}
} }
} }
break loop break loop
@ -661,7 +665,7 @@ func (pt *peerTask) downloadPieceWorker(id int32, pti Task, requests chan *Downl
pt.Errorf("request limiter error: %s", err) pt.Errorf("request limiter error: %s", err)
waitSpan.RecordError(err) waitSpan.RecordError(err)
waitSpan.End() waitSpan.End()
pti.ReportPieceResult(&pieceTaskResult{ if err := pti.ReportPieceResult(&pieceTaskResult{
piece: request.piece, piece: request.piece,
pieceResult: &scheduler.PieceResult{ pieceResult: &scheduler.PieceResult{
TaskId: pt.GetTaskID(), TaskId: pt.GetTaskID(),
@ -674,7 +678,10 @@ func (pt *peerTask) downloadPieceWorker(id int32, pti Task, requests chan *Downl
FinishedCount: 0, // update by peer task FinishedCount: 0, // update by peer task
}, },
err: err, err: err,
}) }); err != nil {
pt.Errorf("report piece result failed %s", err)
}
pt.failedReason = err.Error() pt.failedReason = err.Error()
pt.failedCode = dfcodes.ClientRequestLimitFail pt.failedCode = dfcodes.ClientRequestLimitFail
pt.cancel() pt.cancel()

View File

@ -99,7 +99,13 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio
Type: "tcp", Type: "tcp",
Addr: fmt.Sprintf("0.0.0.0:%d", port), Addr: fmt.Sprintf("0.0.0.0:%d", port),
}) })
go daemonserver.New(daemon).Serve(ln)
go func() {
if err := daemonserver.New(daemon).Serve(ln); err != nil {
panic(err)
}
}()
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
// 2. setup a scheduler // 2. setup a scheduler

View File

@ -22,7 +22,9 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log"
"math" "math"
"net"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -94,7 +96,11 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte,
Type: "tcp", Type: "tcp",
Addr: fmt.Sprintf("0.0.0.0:%d", port), Addr: fmt.Sprintf("0.0.0.0:%d", port),
}) })
go daemonserver.New(daemon).Serve(ln) go func(daemon *mock_daemon.MockDaemonServer, ln net.Listener) {
if err := daemonserver.New(daemon).Serve(ln); err != nil {
log.Fatal(err)
}
}(daemon, ln)
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
// 2. setup a scheduler // 2. setup a scheduler

View File

@ -59,7 +59,9 @@ func TestPieceDownloader_DownloadPiece(t *testing.T) {
assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-0", r.URL.Path) assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-0", r.URL.Path)
data := []byte("test test ") data := []byte("test test ")
w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", len(data))) w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", len(data)))
w.Write(data) if _, err := w.Write(data); err != nil {
t.Error(err)
}
}, },
taskID: "task-0", taskID: "task-0",
pieceRange: "bytes=0-9", pieceRange: "bytes=0-9",
@ -72,7 +74,9 @@ func TestPieceDownloader_DownloadPiece(t *testing.T) {
assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-1", r.URL.Path) assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-1", r.URL.Path)
rg := clientutil.MustParseRange(r.Header.Get("Range"), math.MaxInt64) rg := clientutil.MustParseRange(r.Header.Get("Range"), math.MaxInt64)
w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", rg.Length)) w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", rg.Length))
w.Write(testData[rg.Start : rg.Start+rg.Length]) if _, err := w.Write(testData[rg.Start : rg.Start+rg.Length]); err != nil {
t.Error(err)
}
}, },
taskID: "task-1", taskID: "task-1",
pieceRange: "bytes=0-99", pieceRange: "bytes=0-99",
@ -85,7 +89,9 @@ func TestPieceDownloader_DownloadPiece(t *testing.T) {
assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-2", r.URL.Path) assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-2", r.URL.Path)
rg := clientutil.MustParseRange(r.Header.Get("Range"), math.MaxInt64) rg := clientutil.MustParseRange(r.Header.Get("Range"), math.MaxInt64)
w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", rg.Length)) w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", rg.Length))
w.Write(testData[rg.Start : rg.Start+rg.Length]) if _, err := w.Write(testData[rg.Start : rg.Start+rg.Length]); err != nil {
t.Error(err)
}
}, },
taskID: "task-2", taskID: "task-2",
pieceRange: fmt.Sprintf("bytes=512-%d", len(testData)-1), pieceRange: fmt.Sprintf("bytes=512-%d", len(testData)-1),
@ -98,7 +104,9 @@ func TestPieceDownloader_DownloadPiece(t *testing.T) {
assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-3", r.URL.Path) assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-3", r.URL.Path)
rg := clientutil.MustParseRange(r.Header.Get("Range"), math.MaxInt64) rg := clientutil.MustParseRange(r.Header.Get("Range"), math.MaxInt64)
w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", rg.Length)) w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", rg.Length))
w.Write(testData[rg.Start : rg.Start+rg.Length]) if _, err := w.Write(testData[rg.Start : rg.Start+rg.Length]); err != nil {
t.Error(err)
}
}, },
taskID: "task-3", taskID: "task-3",
pieceRange: "bytes=512-1024", pieceRange: "bytes=512-1024",

View File

@ -360,7 +360,7 @@ func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, request *sc
// last piece, piece size maybe 0 // last piece, piece size maybe 0
if n < int64(size) { if n < int64(size) {
contentLength = int64(pieceNum*pieceSize) + n contentLength = int64(pieceNum*pieceSize) + n
pm.storageManager.UpdateTask(ctx, if err := pm.storageManager.UpdateTask(ctx,
&storage.UpdateTaskRequest{ &storage.UpdateTaskRequest{
PeerTaskMetaData: storage.PeerTaskMetaData{ PeerTaskMetaData: storage.PeerTaskMetaData{
PeerID: pt.GetPeerID(), PeerID: pt.GetPeerID(),
@ -368,7 +368,9 @@ func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, request *sc
}, },
ContentLength: contentLength, ContentLength: contentLength,
GenPieceDigest: true, GenPieceDigest: true,
}) }); err != nil {
log.Errorf("update task failed %s", err)
}
pt.SetTotalPieces(pieceNum + 1) pt.SetTotalPieces(pieceNum + 1)
return pt.SetContentLength(contentLength) return pt.SetContentLength(contentLength)
} }
@ -397,8 +399,11 @@ func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, request *sc
} }
} }
pt.SetTotalPieces(maxPieceNum) pt.SetTotalPieces(maxPieceNum)
pt.SetContentLength(contentLength) if err := pt.SetContentLength(contentLength); err != nil {
pm.storageManager.UpdateTask(ctx, log.Errorf("set content length failed %s", err)
}
if err := pm.storageManager.UpdateTask(ctx,
&storage.UpdateTaskRequest{ &storage.UpdateTaskRequest{
PeerTaskMetaData: storage.PeerTaskMetaData{ PeerTaskMetaData: storage.PeerTaskMetaData{
PeerID: pt.GetPeerID(), PeerID: pt.GetPeerID(),
@ -407,7 +412,9 @@ func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, request *sc
ContentLength: contentLength, ContentLength: contentLength,
TotalPieces: maxPieceNum, TotalPieces: maxPieceNum,
GenPieceDigest: true, GenPieceDigest: true,
}) }); err != nil {
log.Errorf("update task failed %s", err)
}
log.Infof("download from source ok") log.Infof("download from source ok")
return nil return nil
} }

View File

@ -463,7 +463,9 @@ func (proxy *Proxy) mirrorRegistry(w http.ResponseWriter, r *http.Request) {
reverseProxy.ErrorHandler = func(rw http.ResponseWriter, req *http.Request, err error) { reverseProxy.ErrorHandler = func(rw http.ResponseWriter, req *http.Request, err error) {
rw.WriteHeader(http.StatusInternalServerError) rw.WriteHeader(http.StatusInternalServerError)
// write error string to response body // write error string to response body
rw.Write([]byte(err.Error())) if _, err := rw.Write([]byte(err.Error())); err != nil {
logger.Errorf("write error string to response body failed %s", err)
}
} }
reverseProxy.ServeHTTP(w, r) reverseProxy.ServeHTTP(w, r)
} }
@ -567,8 +569,15 @@ func tunnelHTTPS(w http.ResponseWriter, r *http.Request) {
http.Error(w, err.Error(), http.StatusServiceUnavailable) http.Error(w, err.Error(), http.StatusServiceUnavailable)
} }
go copyAndClose(dst, clientConn) go func() {
copyAndClose(clientConn, dst) if err := copyAndClose(dst, clientConn); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}()
if err := copyAndClose(clientConn, dst); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
} }
func copyAndClose(dst io.WriteCloser, src io.ReadCloser) error { func copyAndClose(dst io.WriteCloser, src io.ReadCloser) error {

View File

@ -86,7 +86,9 @@ func TestDownloadManager_ServeDownload(t *testing.T) {
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) ln, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
assert.Nil(err, "get free port should be ok") assert.Nil(err, "get free port should be ok")
go func() { go func() {
m.ServeDownload(ln) if err := m.ServeDownload(ln); err != nil {
t.Error(err)
}
}() }()
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
@ -169,7 +171,9 @@ func TestDownloadManager_ServePeer(t *testing.T) {
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) ln, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
assert.Nil(err, "get free port should be ok") assert.Nil(err, "get free port should be ok")
go func() { go func() {
m.ServePeer(ln) if err := m.ServePeer(ln); err != nil {
t.Error(err)
}
}() }()
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)

View File

@ -64,7 +64,9 @@ func TestUploadManager_Serve(t *testing.T) {
addr := listen.Addr().String() addr := listen.Addr().String()
go func() { go func() {
um.Serve(listen) if err := um.Serve(listen); err != nil {
t.Error(err)
}
}() }()
tests := []struct { tests := []struct {

View File

@ -131,7 +131,11 @@ func runDaemon() error {
break break
} }
} }
defer lock.Unlock() defer func() {
if err := lock.Unlock(); err != nil {
logger.Errorf("flock unlock failed %s", err)
}
}()
logger.Infof("daemon is launched by pid: %d", viper.GetInt("launcher")) logger.Infof("daemon is launched by pid: %d", viper.GetInt("launcher"))

View File

@ -200,8 +200,15 @@ func checkAndSpawnDaemon() (client.DaemonClient, error) {
} }
lock := flock.New(dfpath.DfgetLockPath) lock := flock.New(dfpath.DfgetLockPath)
lock.Lock() if err := lock.Lock(); err != nil {
defer lock.Unlock() return nil, err
}
defer func() {
if err := lock.Unlock(); err != nil {
logger.Errorf("flock unlock failed %s", err)
}
}()
// 2.Check with lock // 2.Check with lock
if daemonClient.CheckHealth(context.Background(), target) == nil { if daemonClient.CheckHealth(context.Background(), target) == nil {

View File

@ -156,6 +156,10 @@ func Warnf(template string, args ...interface{}) {
CoreLogger.Warnf(template, args...) CoreLogger.Warnf(template, args...)
} }
func Warn(args ...interface{}) {
CoreLogger.Warn(args...)
}
func Errorf(template string, args ...interface{}) { func Errorf(template string, args ...interface{}) {
CoreLogger.Errorf(template, args...) CoreLogger.Errorf(template, args...)
} }

View File

@ -20,6 +20,7 @@ import (
"errors" "errors"
"time" "time"
logger "d7y.io/dragonfly/v2/internal/dflog"
"d7y.io/dragonfly/v2/pkg/cache" "d7y.io/dragonfly/v2/pkg/cache"
) )
@ -58,7 +59,9 @@ func (d *dynconfigManager) get() (interface{}, error) {
// Cache has expired // Cache has expired
// Reload and ignore client request error // Reload and ignore client request error
d.load() if err := d.load(); err != nil {
logger.Warn("reload failed", err)
}
dynconfig, ok := d.cache.Get(defaultCacheKey) dynconfig, ok := d.cache.Get(defaultCacheKey)
if !ok { if !ok {

View File

@ -66,17 +66,23 @@ func TestDynconfigUnmarshal_ManagerSourceType(t *testing.T) {
cleanFileCache: func(t *testing.T) {}, cleanFileCache: func(t *testing.T) {},
mock: func(m *mock_manager_client.MockmanagerClientMockRecorder) { mock: func(m *mock_manager_client.MockmanagerClientMockRecorder) {
var d map[string]interface{} var d map[string]interface{}
mapstructure.Decode(TestDynconfig{ if err := mapstructure.Decode(TestDynconfig{
Scheduler: SchedulerOption{ Scheduler: SchedulerOption{
Name: schedulerName, Name: schedulerName,
}, },
}, &d) }, &d); err != nil {
t.Error(err)
}
m.Get().Return(d, nil).AnyTimes() m.Get().Return(d, nil).AnyTimes()
}, },
expect: func(t *testing.T, data interface{}) { expect: func(t *testing.T, data interface{}) {
assert := assert.New(t) assert := assert.New(t)
var d TestDynconfig var d TestDynconfig
mapstructure.Decode(data, &d) if err := mapstructure.Decode(data, &d); err != nil {
t.Error(err)
}
assert.EqualValues(d, TestDynconfig{ assert.EqualValues(d, TestDynconfig{
Scheduler: SchedulerOption{ Scheduler: SchedulerOption{
Name: schedulerName, Name: schedulerName,
@ -102,11 +108,14 @@ func TestDynconfigUnmarshal_ManagerSourceType(t *testing.T) {
}, },
mock: func(m *mock_manager_client.MockmanagerClientMockRecorder) { mock: func(m *mock_manager_client.MockmanagerClientMockRecorder) {
var d map[string]interface{} var d map[string]interface{}
mapstructure.Decode(TestDynconfig{ if err := mapstructure.Decode(TestDynconfig{
Scheduler: SchedulerOption{ Scheduler: SchedulerOption{
Name: schedulerName, Name: schedulerName,
}, },
}, &d) }, &d); err != nil {
t.Error(err)
}
m.Get().Return(d, nil).Times(1) m.Get().Return(d, nil).Times(1)
}, },
expect: func(t *testing.T, data interface{}) { expect: func(t *testing.T, data interface{}) {
@ -136,11 +145,14 @@ func TestDynconfigUnmarshal_ManagerSourceType(t *testing.T) {
}, },
mock: func(m *mock_manager_client.MockmanagerClientMockRecorder) { mock: func(m *mock_manager_client.MockmanagerClientMockRecorder) {
var d map[string]interface{} var d map[string]interface{}
mapstructure.Decode(TestDynconfig{ if err := mapstructure.Decode(TestDynconfig{
Scheduler: SchedulerOption{ Scheduler: SchedulerOption{
Name: schedulerName, Name: schedulerName,
}, },
}, &d) }, &d); err != nil {
t.Error(err)
}
m.Get().Return(d, nil).Times(1) m.Get().Return(d, nil).Times(1)
m.Get().Return(nil, errors.New("manager serivce error")).Times(1) m.Get().Return(nil, errors.New("manager serivce error")).Times(1)
}, },
@ -208,7 +220,10 @@ func TestDynconfigUnmarshal_LocalSourceType(t *testing.T) {
expect: func(t *testing.T, data interface{}) { expect: func(t *testing.T, data interface{}) {
assert := assert.New(t) assert := assert.New(t)
var d TestDynconfig var d TestDynconfig
mapstructure.Decode(data, &d) if err := mapstructure.Decode(data, &d); err != nil {
t.Error(err)
}
assert.EqualValues(d, TestDynconfig{ assert.EqualValues(d, TestDynconfig{
Scheduler: SchedulerOption{ Scheduler: SchedulerOption{
Name: schedulerName, Name: schedulerName,

View File

@ -47,10 +47,13 @@ func newGithub(name, clientID, clientSecret, redirectURL string) *oauthGithub {
} }
} }
func (g *oauthGithub) AuthCodeURL() string { func (g *oauthGithub) AuthCodeURL() (string, error) {
b := make([]byte, 16) b := make([]byte, 16)
rand.Read(b) if _, err := rand.Read(b); err != nil {
return g.Config.AuthCodeURL(base64.URLEncoding.EncodeToString(b)) return "", err
}
return g.Config.AuthCodeURL(base64.URLEncoding.EncodeToString(b)), nil
} }
func (g *oauthGithub) Exchange(code string) (*oauth2.Token, error) { func (g *oauthGithub) Exchange(code string) (*oauth2.Token, error) {

View File

@ -48,10 +48,13 @@ func newGoogle(name, clientID, clientSecret, redirectURL string) *oauthGoogle {
} }
} }
func (g *oauthGoogle) AuthCodeURL() string { func (g *oauthGoogle) AuthCodeURL() (string, error) {
b := make([]byte, 16) b := make([]byte, 16)
rand.Read(b) if _, err := rand.Read(b); err != nil {
return g.Config.AuthCodeURL(base64.URLEncoding.EncodeToString(b)) return "", err
}
return g.Config.AuthCodeURL(base64.URLEncoding.EncodeToString(b)), nil
} }
func (g *oauthGoogle) Exchange(code string) (*oauth2.Token, error) { func (g *oauthGoogle) Exchange(code string) (*oauth2.Token, error) {

View File

@ -39,7 +39,7 @@ type User struct {
} }
type Oauth interface { type Oauth interface {
AuthCodeURL() string AuthCodeURL() (string, error)
Exchange(string) (*oauth2.Token, error) Exchange(string) (*oauth2.Token, error)
GetUser(*oauth2.Token) (*User, error) GetUser(*oauth2.Token) (*User, error)
} }
@ -62,7 +62,7 @@ func New(name, clientID, clientSecret, redirectURL string) (Oauth, error) {
return o, nil return o, nil
} }
func (g *oauth) AuthCodeURL() string { func (g *oauth) AuthCodeURL() (string, error) {
return g.Oauth.AuthCodeURL() return g.Oauth.AuthCodeURL()
} }

View File

@ -76,7 +76,13 @@ func TestManagerConfig_Load(t *testing.T) {
managerConfigYAML := &Config{} managerConfigYAML := &Config{}
contentYAML, _ := ioutil.ReadFile("./testdata/manager.yaml") contentYAML, _ := ioutil.ReadFile("./testdata/manager.yaml")
var dataYAML map[string]interface{} var dataYAML map[string]interface{}
yaml.Unmarshal(contentYAML, &dataYAML) if err := yaml.Unmarshal(contentYAML, &dataYAML); err != nil {
mapstructure.Decode(dataYAML, &managerConfigYAML) t.Fatal(err)
}
if err := mapstructure.Decode(dataYAML, &managerConfigYAML); err != nil {
t.Fatal(err)
}
assert.EqualValues(config, managerConfigYAML) assert.EqualValues(config, managerConfigYAML)
} }

View File

@ -46,7 +46,7 @@ func (h *Handlers) CreateCDN(ctx *gin.Context) {
cdn, err := h.service.CreateCDN(ctx.Request.Context(), json) cdn, err := h.service.CreateCDN(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -72,7 +72,7 @@ func (h *Handlers) DestroyCDN(ctx *gin.Context) {
} }
if err := h.service.DestroyCDN(ctx.Request.Context(), params.ID); err != nil { if err := h.service.DestroyCDN(ctx.Request.Context(), params.ID); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -94,19 +94,19 @@ func (h *Handlers) DestroyCDN(ctx *gin.Context) {
func (h *Handlers) UpdateCDN(ctx *gin.Context) { func (h *Handlers) UpdateCDN(ctx *gin.Context) {
var params types.CDNParams var params types.CDNParams
if err := ctx.ShouldBindUri(&params); err != nil { if err := ctx.ShouldBindUri(&params); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
var json types.UpdateCDNRequest var json types.UpdateCDNRequest
if err := ctx.ShouldBindJSON(&json); err != nil { if err := ctx.ShouldBindJSON(&json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
cdn, err := h.service.UpdateCDN(ctx.Request.Context(), params.ID, json) cdn, err := h.service.UpdateCDN(ctx.Request.Context(), params.ID, json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -133,7 +133,7 @@ func (h *Handlers) GetCDN(ctx *gin.Context) {
cdn, err := h.service.GetCDN(ctx.Request.Context(), params.ID) cdn, err := h.service.GetCDN(ctx.Request.Context(), params.ID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -162,7 +162,7 @@ func (h *Handlers) GetCDNs(ctx *gin.Context) {
h.setPaginationDefault(&query.Page, &query.PerPage) h.setPaginationDefault(&query.Page, &query.PerPage)
cdns, count, err := h.service.GetCDNs(ctx.Request.Context(), query) cdns, count, err := h.service.GetCDNs(ctx.Request.Context(), query)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }

View File

@ -47,7 +47,7 @@ func (h *Handlers) CreateCDNCluster(ctx *gin.Context) {
if json.SecurityGroupDomain != "" { if json.SecurityGroupDomain != "" {
cdn, err := h.service.CreateCDNClusterWithSecurityGroupDomain(ctx.Request.Context(), json) cdn, err := h.service.CreateCDNClusterWithSecurityGroupDomain(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -57,7 +57,7 @@ func (h *Handlers) CreateCDNCluster(ctx *gin.Context) {
cdnCluster, err := h.service.CreateCDNCluster(ctx.Request.Context(), json) cdnCluster, err := h.service.CreateCDNCluster(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -83,7 +83,7 @@ func (h *Handlers) DestroyCDNCluster(ctx *gin.Context) {
} }
if err := h.service.DestroyCDNCluster(ctx.Request.Context(), params.ID); err != nil { if err := h.service.DestroyCDNCluster(ctx.Request.Context(), params.ID); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -105,20 +105,20 @@ func (h *Handlers) DestroyCDNCluster(ctx *gin.Context) {
func (h *Handlers) UpdateCDNCluster(ctx *gin.Context) { func (h *Handlers) UpdateCDNCluster(ctx *gin.Context) {
var params types.CDNClusterParams var params types.CDNClusterParams
if err := ctx.ShouldBindUri(&params); err != nil { if err := ctx.ShouldBindUri(&params); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
var json types.UpdateCDNClusterRequest var json types.UpdateCDNClusterRequest
if err := ctx.ShouldBindJSON(&json); err != nil { if err := ctx.ShouldBindJSON(&json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
if json.SecurityGroupDomain != "" { if json.SecurityGroupDomain != "" {
cdn, err := h.service.UpdateCDNClusterWithSecurityGroupDomain(ctx.Request.Context(), params.ID, json) cdn, err := h.service.UpdateCDNClusterWithSecurityGroupDomain(ctx.Request.Context(), params.ID, json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -128,7 +128,7 @@ func (h *Handlers) UpdateCDNCluster(ctx *gin.Context) {
cdnCluster, err := h.service.UpdateCDNCluster(ctx.Request.Context(), params.ID, json) cdnCluster, err := h.service.UpdateCDNCluster(ctx.Request.Context(), params.ID, json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -155,7 +155,7 @@ func (h *Handlers) GetCDNCluster(ctx *gin.Context) {
cdnCluster, err := h.service.GetCDNCluster(ctx.Request.Context(), params.ID) cdnCluster, err := h.service.GetCDNCluster(ctx.Request.Context(), params.ID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -184,7 +184,7 @@ func (h *Handlers) GetCDNClusters(ctx *gin.Context) {
h.setPaginationDefault(&query.Page, &query.PerPage) h.setPaginationDefault(&query.Page, &query.PerPage)
cdns, count, err := h.service.GetCDNClusters(ctx.Request.Context(), query) cdns, count, err := h.service.GetCDNClusters(ctx.Request.Context(), query)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -212,7 +212,7 @@ func (h *Handlers) AddCDNToCDNCluster(ctx *gin.Context) {
} }
if err := h.service.AddCDNToCDNCluster(ctx.Request.Context(), params.ID, params.CDNID); err != nil { if err := h.service.AddCDNToCDNCluster(ctx.Request.Context(), params.ID, params.CDNID); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -239,7 +239,7 @@ func (h *Handlers) AddSchedulerClusterToCDNCluster(ctx *gin.Context) {
} }
if err := h.service.AddSchedulerClusterToCDNCluster(ctx.Request.Context(), params.ID, params.SchedulerClusterID); err != nil { if err := h.service.AddSchedulerClusterToCDNCluster(ctx.Request.Context(), params.ID, params.SchedulerClusterID); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }

View File

@ -46,7 +46,7 @@ func (h *Handlers) CreateConfig(ctx *gin.Context) {
config, err := h.service.CreateConfig(ctx.Request.Context(), json) config, err := h.service.CreateConfig(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -72,7 +72,7 @@ func (h *Handlers) DestroyConfig(ctx *gin.Context) {
} }
if err := h.service.DestroyConfig(ctx.Request.Context(), params.ID); err != nil { if err := h.service.DestroyConfig(ctx.Request.Context(), params.ID); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -94,19 +94,19 @@ func (h *Handlers) DestroyConfig(ctx *gin.Context) {
func (h *Handlers) UpdateConfig(ctx *gin.Context) { func (h *Handlers) UpdateConfig(ctx *gin.Context) {
var params types.ConfigParams var params types.ConfigParams
if err := ctx.ShouldBindUri(&params); err != nil { if err := ctx.ShouldBindUri(&params); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
var json types.UpdateConfigRequest var json types.UpdateConfigRequest
if err := ctx.ShouldBindJSON(&json); err != nil { if err := ctx.ShouldBindJSON(&json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
config, err := h.service.UpdateConfig(ctx.Request.Context(), params.ID, json) config, err := h.service.UpdateConfig(ctx.Request.Context(), params.ID, json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -133,7 +133,7 @@ func (h *Handlers) GetConfig(ctx *gin.Context) {
config, err := h.service.GetConfig(ctx.Request.Context(), params.ID) config, err := h.service.GetConfig(ctx.Request.Context(), params.ID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -162,7 +162,7 @@ func (h *Handlers) GetConfigs(ctx *gin.Context) {
h.setPaginationDefault(&query.Page, &query.PerPage) h.setPaginationDefault(&query.Page, &query.PerPage)
configs, count, err := h.service.GetConfigs(ctx.Request.Context(), query) configs, count, err := h.service.GetConfigs(ctx.Request.Context(), query)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }

View File

@ -39,7 +39,7 @@ func (h *Handlers) CreateJob(ctx *gin.Context) {
job, err := h.service.CreatePreheatJob(ctx.Request.Context(), json) job, err := h.service.CreatePreheatJob(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -68,7 +68,7 @@ func (h *Handlers) DestroyJob(ctx *gin.Context) {
} }
if err := h.service.DestroyJob(ctx.Request.Context(), params.ID); err != nil { if err := h.service.DestroyJob(ctx.Request.Context(), params.ID); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -90,19 +90,19 @@ func (h *Handlers) DestroyJob(ctx *gin.Context) {
func (h *Handlers) UpdateJob(ctx *gin.Context) { func (h *Handlers) UpdateJob(ctx *gin.Context) {
var params types.JobParams var params types.JobParams
if err := ctx.ShouldBindUri(&params); err != nil { if err := ctx.ShouldBindUri(&params); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
var json types.UpdateJobRequest var json types.UpdateJobRequest
if err := ctx.ShouldBindJSON(&json); err != nil { if err := ctx.ShouldBindJSON(&json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
job, err := h.service.UpdateJob(ctx.Request.Context(), params.ID, json) job, err := h.service.UpdateJob(ctx.Request.Context(), params.ID, json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -129,7 +129,7 @@ func (h *Handlers) GetJob(ctx *gin.Context) {
job, err := h.service.GetJob(ctx.Request.Context(), params.ID) job, err := h.service.GetJob(ctx.Request.Context(), params.ID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -158,7 +158,7 @@ func (h *Handlers) GetJobs(ctx *gin.Context) {
h.setPaginationDefault(&query.Page, &query.PerPage) h.setPaginationDefault(&query.Page, &query.PerPage)
jobs, count, err := h.service.GetJobs(ctx.Request.Context(), query) jobs, count, err := h.service.GetJobs(ctx.Request.Context(), query)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }

View File

@ -46,7 +46,7 @@ func (h *Handlers) CreateOauth(ctx *gin.Context) {
oauth, err := h.service.CreateOauth(ctx.Request.Context(), json) oauth, err := h.service.CreateOauth(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -72,7 +72,7 @@ func (h *Handlers) DestroyOauth(ctx *gin.Context) {
} }
if err := h.service.DestroyOauth(ctx.Request.Context(), params.ID); err != nil { if err := h.service.DestroyOauth(ctx.Request.Context(), params.ID); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -94,19 +94,19 @@ func (h *Handlers) DestroyOauth(ctx *gin.Context) {
func (h *Handlers) UpdateOauth(ctx *gin.Context) { func (h *Handlers) UpdateOauth(ctx *gin.Context) {
var params types.OauthParams var params types.OauthParams
if err := ctx.ShouldBindUri(&params); err != nil { if err := ctx.ShouldBindUri(&params); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
var json types.UpdateOauthRequest var json types.UpdateOauthRequest
if err := ctx.ShouldBindJSON(&json); err != nil { if err := ctx.ShouldBindJSON(&json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
oauth, err := h.service.UpdateOauth(ctx.Request.Context(), params.ID, json) oauth, err := h.service.UpdateOauth(ctx.Request.Context(), params.ID, json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -133,7 +133,7 @@ func (h *Handlers) GetOauth(ctx *gin.Context) {
oauth, err := h.service.GetOauth(ctx.Request.Context(), params.ID) oauth, err := h.service.GetOauth(ctx.Request.Context(), params.ID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -162,7 +162,7 @@ func (h *Handlers) GetOauths(ctx *gin.Context) {
h.setPaginationDefault(&query.Page, &query.PerPage) h.setPaginationDefault(&query.Page, &query.PerPage)
oauth, count, err := h.service.GetOauths(ctx.Request.Context(), query) oauth, count, err := h.service.GetOauths(ctx.Request.Context(), query)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }

View File

@ -46,7 +46,7 @@ func (h *Handlers) CreateV1Preheat(ctx *gin.Context) {
preheat, err := h.service.CreateV1Preheat(ctx.Request.Context(), json) preheat, err := h.service.CreateV1Preheat(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -73,7 +73,7 @@ func (h *Handlers) GetV1Preheat(ctx *gin.Context) {
preheat, err := h.service.GetV1Preheat(ctx.Request.Context(), params.ID) preheat, err := h.service.GetV1Preheat(ctx.Request.Context(), params.ID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }

View File

@ -44,7 +44,7 @@ func (h *Handlers) CreateRole(ctx *gin.Context) {
} }
if err := h.service.CreateRole(ctx.Request.Context(), json); err != nil { if err := h.service.CreateRole(ctx.Request.Context(), json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -69,7 +69,7 @@ func (h *Handlers) DestroyRole(ctx *gin.Context) {
} }
if ok, err := h.service.DestroyRole(ctx.Request.Context(), params.Role); err != nil { if ok, err := h.service.DestroyRole(ctx.Request.Context(), params.Role); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} else if !ok { } else if !ok {
ctx.Status(http.StatusNotFound) ctx.Status(http.StatusNotFound)
@ -138,7 +138,7 @@ func (h *Handlers) AddPermissionForRole(ctx *gin.Context) {
} }
if ok, err := h.service.AddPermissionForRole(ctx.Request.Context(), params.Role, json); err != nil { if ok, err := h.service.AddPermissionForRole(ctx.Request.Context(), params.Role, json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} else if !ok { } else if !ok {
ctx.Status(http.StatusConflict) ctx.Status(http.StatusConflict)
@ -173,7 +173,7 @@ func (h *Handlers) DeletePermissionForRole(ctx *gin.Context) {
} }
if ok, err := h.service.DeletePermissionForRole(ctx.Request.Context(), params.Role, json); err != nil { if ok, err := h.service.DeletePermissionForRole(ctx.Request.Context(), params.Role, json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} else if !ok { } else if !ok {
ctx.Status(http.StatusNotFound) ctx.Status(http.StatusNotFound)

View File

@ -46,7 +46,7 @@ func (h *Handlers) CreateScheduler(ctx *gin.Context) {
scheduler, err := h.service.CreateScheduler(ctx.Request.Context(), json) scheduler, err := h.service.CreateScheduler(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -72,7 +72,7 @@ func (h *Handlers) DestroyScheduler(ctx *gin.Context) {
} }
if err := h.service.DestroyScheduler(ctx.Request.Context(), params.ID); err != nil { if err := h.service.DestroyScheduler(ctx.Request.Context(), params.ID); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -94,19 +94,19 @@ func (h *Handlers) DestroyScheduler(ctx *gin.Context) {
func (h *Handlers) UpdateScheduler(ctx *gin.Context) { func (h *Handlers) UpdateScheduler(ctx *gin.Context) {
var params types.SchedulerParams var params types.SchedulerParams
if err := ctx.ShouldBindUri(&params); err != nil { if err := ctx.ShouldBindUri(&params); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
var json types.UpdateSchedulerRequest var json types.UpdateSchedulerRequest
if err := ctx.ShouldBindJSON(&json); err != nil { if err := ctx.ShouldBindJSON(&json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
scheduler, err := h.service.UpdateScheduler(ctx.Request.Context(), params.ID, json) scheduler, err := h.service.UpdateScheduler(ctx.Request.Context(), params.ID, json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -133,7 +133,7 @@ func (h *Handlers) GetScheduler(ctx *gin.Context) {
scheduler, err := h.service.GetScheduler(ctx.Request.Context(), params.ID) scheduler, err := h.service.GetScheduler(ctx.Request.Context(), params.ID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -162,7 +162,7 @@ func (h *Handlers) GetSchedulers(ctx *gin.Context) {
h.setPaginationDefault(&query.Page, &query.PerPage) h.setPaginationDefault(&query.Page, &query.PerPage)
schedulers, count, err := h.service.GetSchedulers(ctx.Request.Context(), query) schedulers, count, err := h.service.GetSchedulers(ctx.Request.Context(), query)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }

View File

@ -47,7 +47,7 @@ func (h *Handlers) CreateSchedulerCluster(ctx *gin.Context) {
if json.SecurityGroupDomain != "" { if json.SecurityGroupDomain != "" {
scheduler, err := h.service.CreateSchedulerClusterWithSecurityGroupDomain(ctx.Request.Context(), json) scheduler, err := h.service.CreateSchedulerClusterWithSecurityGroupDomain(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -57,7 +57,7 @@ func (h *Handlers) CreateSchedulerCluster(ctx *gin.Context) {
schedulerCluster, err := h.service.CreateSchedulerCluster(ctx.Request.Context(), json) schedulerCluster, err := h.service.CreateSchedulerCluster(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -83,7 +83,7 @@ func (h *Handlers) DestroySchedulerCluster(ctx *gin.Context) {
} }
if err := h.service.DestroySchedulerCluster(ctx.Request.Context(), params.ID); err != nil { if err := h.service.DestroySchedulerCluster(ctx.Request.Context(), params.ID); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -105,20 +105,20 @@ func (h *Handlers) DestroySchedulerCluster(ctx *gin.Context) {
func (h *Handlers) UpdateSchedulerCluster(ctx *gin.Context) { func (h *Handlers) UpdateSchedulerCluster(ctx *gin.Context) {
var params types.SchedulerClusterParams var params types.SchedulerClusterParams
if err := ctx.ShouldBindUri(&params); err != nil { if err := ctx.ShouldBindUri(&params); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
var json types.UpdateSchedulerClusterRequest var json types.UpdateSchedulerClusterRequest
if err := ctx.ShouldBindJSON(&json); err != nil { if err := ctx.ShouldBindJSON(&json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
if json.SecurityGroupDomain != "" { if json.SecurityGroupDomain != "" {
scheduler, err := h.service.UpdateSchedulerClusterWithSecurityGroupDomain(ctx.Request.Context(), params.ID, json) scheduler, err := h.service.UpdateSchedulerClusterWithSecurityGroupDomain(ctx.Request.Context(), params.ID, json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -128,7 +128,7 @@ func (h *Handlers) UpdateSchedulerCluster(ctx *gin.Context) {
schedulerCluster, err := h.service.UpdateSchedulerCluster(ctx.Request.Context(), params.ID, json) schedulerCluster, err := h.service.UpdateSchedulerCluster(ctx.Request.Context(), params.ID, json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -155,7 +155,7 @@ func (h *Handlers) GetSchedulerCluster(ctx *gin.Context) {
schedulerCluster, err := h.service.GetSchedulerCluster(ctx.Request.Context(), params.ID) schedulerCluster, err := h.service.GetSchedulerCluster(ctx.Request.Context(), params.ID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -184,7 +184,7 @@ func (h *Handlers) GetSchedulerClusters(ctx *gin.Context) {
h.setPaginationDefault(&query.Page, &query.PerPage) h.setPaginationDefault(&query.Page, &query.PerPage)
schedulerClusters, count, err := h.service.GetSchedulerClusters(ctx.Request.Context(), query) schedulerClusters, count, err := h.service.GetSchedulerClusters(ctx.Request.Context(), query)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -213,7 +213,7 @@ func (h *Handlers) AddSchedulerToSchedulerCluster(ctx *gin.Context) {
err := h.service.AddSchedulerToSchedulerCluster(ctx.Request.Context(), params.ID, params.SchedulerID) err := h.service.AddSchedulerToSchedulerCluster(ctx.Request.Context(), params.ID, params.SchedulerID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }

View File

@ -46,7 +46,7 @@ func (h *Handlers) CreateSecurityGroup(ctx *gin.Context) {
securityGroup, err := h.service.CreateSecurityGroup(ctx.Request.Context(), json) securityGroup, err := h.service.CreateSecurityGroup(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -72,7 +72,7 @@ func (h *Handlers) DestroySecurityGroup(ctx *gin.Context) {
} }
if err := h.service.DestroySecurityGroup(ctx.Request.Context(), params.ID); err != nil { if err := h.service.DestroySecurityGroup(ctx.Request.Context(), params.ID); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -94,19 +94,19 @@ func (h *Handlers) DestroySecurityGroup(ctx *gin.Context) {
func (h *Handlers) UpdateSecurityGroup(ctx *gin.Context) { func (h *Handlers) UpdateSecurityGroup(ctx *gin.Context) {
var params types.SecurityGroupParams var params types.SecurityGroupParams
if err := ctx.ShouldBindUri(&params); err != nil { if err := ctx.ShouldBindUri(&params); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
var json types.UpdateSecurityGroupRequest var json types.UpdateSecurityGroupRequest
if err := ctx.ShouldBindJSON(&json); err != nil { if err := ctx.ShouldBindJSON(&json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
securityGroup, err := h.service.UpdateSecurityGroup(ctx.Request.Context(), params.ID, json) securityGroup, err := h.service.UpdateSecurityGroup(ctx.Request.Context(), params.ID, json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -133,7 +133,7 @@ func (h *Handlers) GetSecurityGroup(ctx *gin.Context) {
securityGroup, err := h.service.GetSecurityGroup(ctx.Request.Context(), params.ID) securityGroup, err := h.service.GetSecurityGroup(ctx.Request.Context(), params.ID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -162,7 +162,7 @@ func (h *Handlers) GetSecurityGroups(ctx *gin.Context) {
h.setPaginationDefault(&query.Page, &query.PerPage) h.setPaginationDefault(&query.Page, &query.PerPage)
securityGroups, count, err := h.service.GetSecurityGroups(ctx.Request.Context(), query) securityGroups, count, err := h.service.GetSecurityGroups(ctx.Request.Context(), query)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -191,7 +191,7 @@ func (h *Handlers) AddSchedulerClusterToSecurityGroup(ctx *gin.Context) {
err := h.service.AddSchedulerClusterToSecurityGroup(ctx.Request.Context(), params.ID, params.SchedulerClusterID) err := h.service.AddSchedulerClusterToSecurityGroup(ctx.Request.Context(), params.ID, params.SchedulerClusterID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -219,7 +219,7 @@ func (h *Handlers) AddCDNClusterToSecurityGroup(ctx *gin.Context) {
err := h.service.AddCDNClusterToSecurityGroup(ctx.Request.Context(), params.ID, params.CDNClusterID) err := h.service.AddCDNClusterToSecurityGroup(ctx.Request.Context(), params.ID, params.CDNClusterID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }

View File

@ -47,7 +47,7 @@ func (h *Handlers) GetUser(ctx *gin.Context) {
user, err := h.service.GetUser(ctx.Request.Context(), params.ID) user, err := h.service.GetUser(ctx.Request.Context(), params.ID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -76,7 +76,7 @@ func (h *Handlers) GetUsers(ctx *gin.Context) {
h.setPaginationDefault(&query.Page, &query.PerPage) h.setPaginationDefault(&query.Page, &query.PerPage)
users, count, err := h.service.GetUsers(ctx.Request.Context(), query) users, count, err := h.service.GetUsers(ctx.Request.Context(), query)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -103,7 +103,7 @@ func (h *Handlers) SignUp(ctx *gin.Context) {
user, err := h.service.SignUp(ctx.Request.Context(), json) user, err := h.service.SignUp(ctx.Request.Context(), json)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -134,7 +134,7 @@ func (h *Handlers) ResetPassword(ctx *gin.Context) {
} }
if err := h.service.ResetPassword(ctx.Request.Context(), params.ID, json); err != nil { if err := h.service.ResetPassword(ctx.Request.Context(), params.ID, json); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -161,7 +161,7 @@ func (h *Handlers) OauthSignin(ctx *gin.Context) {
authURL, err := h.service.OauthSignin(ctx.Request.Context(), params.Name) authURL, err := h.service.OauthSignin(ctx.Request.Context(), params.Name)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -194,7 +194,7 @@ func (h *Handlers) OauthSigninCallback(j *jwt.GinJWTMiddleware) func(*gin.Contex
user, err := h.service.OauthSigninCallback(ctx.Request.Context(), params.Name, query.Code) user, err := h.service.OauthSigninCallback(ctx.Request.Context(), params.Name, query.Code)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -221,7 +221,7 @@ func (h *Handlers) GetRolesForUser(ctx *gin.Context) {
roles, err := h.service.GetRolesForUser(ctx.Request.Context(), params.ID) roles, err := h.service.GetRolesForUser(ctx.Request.Context(), params.ID)
if err != nil { if err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} }
@ -247,7 +247,7 @@ func (h *Handlers) AddRoleToUser(ctx *gin.Context) {
} }
if ok, err := h.service.AddRoleForUser(ctx.Request.Context(), params); err != nil { if ok, err := h.service.AddRoleForUser(ctx.Request.Context(), params); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} else if !ok { } else if !ok {
ctx.Status(http.StatusConflict) ctx.Status(http.StatusConflict)
@ -276,7 +276,7 @@ func (h *Handlers) DeleteRoleForUser(ctx *gin.Context) {
} }
if ok, err := h.service.DeleteRoleForUser(ctx.Request.Context(), params); err != nil { if ok, err := h.service.DeleteRoleForUser(ctx.Request.Context(), params); err != nil {
ctx.Error(err) ctx.Error(err) // nolint: errcheck
return return
} else if !ok { } else if !ok {
ctx.Status(http.StatusNotFound) ctx.Status(http.StatusNotFound)

View File

@ -180,7 +180,11 @@ func (p *preheat) getLayers(ctx context.Context, url string, filter string, head
if resp.StatusCode/100 != 2 { if resp.StatusCode/100 != 2 {
if resp.StatusCode == http.StatusUnauthorized { if resp.StatusCode == http.StatusUnauthorized {
token := getAuthToken(ctx, resp.Header) token, err := getAuthToken(ctx, resp.Header)
if err != nil {
return nil, err
}
bearer := "Bearer " + token bearer := "Bearer " + token
header.Add("Authorization", bearer) header.Add("Authorization", bearer)
@ -253,18 +257,18 @@ func (p *preheat) parseLayers(resp *http.Response, url, filter string, header ht
return layers, nil return layers, nil
} }
func getAuthToken(ctx context.Context, header http.Header) (token string) { func getAuthToken(ctx context.Context, header http.Header) (string, error) {
ctx, span := tracer.Start(ctx, config.SpanAuthWithRegistry, trace.WithSpanKind(trace.SpanKindProducer)) ctx, span := tracer.Start(ctx, config.SpanAuthWithRegistry, trace.WithSpanKind(trace.SpanKindProducer))
defer span.End() defer span.End()
authURL := authURL(header.Values("WWW-Authenticate")) authURL := authURL(header.Values("WWW-Authenticate"))
if len(authURL) == 0 { if len(authURL) == 0 {
return return "", errors.New("authURL is empty")
} }
req, err := http.NewRequestWithContext(ctx, "GET", authURL, nil) req, err := http.NewRequestWithContext(ctx, "GET", authURL, nil)
if err != nil { if err != nil {
return return "", err
} }
client := &http.Client{ client := &http.Client{
@ -276,17 +280,23 @@ func getAuthToken(ctx context.Context, header http.Header) (token string) {
resp, err := client.Do(req) resp, err := client.Do(req)
if err != nil { if err != nil {
return return "", err
} }
defer resp.Body.Close() defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body) body, _ := ioutil.ReadAll(resp.Body)
var result map[string]interface{} var result map[string]interface{}
json.Unmarshal(body, &result) if err := json.Unmarshal(body, &result); err != nil {
if result["token"] != nil { return "", err
token = fmt.Sprintf("%v", result["token"])
} }
return
if result["token"] == nil {
return "", errors.New("token is empty")
}
token := fmt.Sprintf("%v", result["token"])
return token, nil
} }
func authURL(wwwAuth []string) string { func authURL(wwwAuth []string) string {

View File

@ -100,7 +100,7 @@ func (s *rest) CreatePreheatJob(ctx context.Context, json types.CreatePreheatJob
func (s *rest) pollingJob(ctx context.Context, id uint, taskID string) { func (s *rest) pollingJob(ctx context.Context, id uint, taskID string) {
var job model.Job var job model.Job
retry.Run(ctx, func() (interface{}, bool, error) { if _, _, err := retry.Run(ctx, func() (interface{}, bool, error) {
groupJob, err := s.job.GetGroupJobState(taskID) groupJob, err := s.job.GetGroupJobState(taskID)
if err != nil { if err != nil {
logger.Errorf("polling job %d and task %s failed: %v", id, taskID, err) logger.Errorf("polling job %d and task %s failed: %v", id, taskID, err)
@ -124,9 +124,11 @@ func (s *rest) pollingJob(ctx context.Context, id uint, taskID string) {
default: default:
return nil, false, fmt.Errorf("polling job %d and task %s status is %s", id, taskID, job.Status) return nil, false, fmt.Errorf("polling job %d and task %s status is %s", id, taskID, job.Status)
} }
}, 5, 10, 120, nil) }, 5, 10, 120, nil); err != nil {
logger.Errorf("polling job %d and task %s failed %s", id, taskID, err)
}
// Polling timeout // Polling timeout and failed
if job.Status != machineryv1tasks.StateSuccess && job.Status != machineryv1tasks.StateFailure { if job.Status != machineryv1tasks.StateSuccess && job.Status != machineryv1tasks.StateFailure {
job := model.Job{} job := model.Job{}
if err := s.db.WithContext(ctx).First(&job, id).Updates(model.Job{ if err := s.db.WithContext(ctx).First(&job, id).Updates(model.Job{

View File

@ -133,7 +133,7 @@ func (s *rest) OauthSignin(ctx context.Context, name string) (string, error) {
return "", err return "", err
} }
return o.AuthCodeURL(), nil return o.AuthCodeURL()
} }
func (s *rest) OauthSigninCallback(ctx context.Context, name, code string) (*model.User, error) { func (s *rest) OauthSigninCallback(ctx context.Context, name, code string) (*model.User, error) {

View File

@ -324,26 +324,39 @@ func testFillAndSerialize(t *testing.T, tc Cache) {
func TestFileSerialization(t *testing.T) { func TestFileSerialization(t *testing.T) {
tc := New(DefaultExpiration, 0) tc := New(DefaultExpiration, 0)
tc.Add("a", "a", DefaultExpiration) if err := tc.Add("a", "a", DefaultExpiration); err != nil {
tc.Add("b", "b", DefaultExpiration) t.Error(err)
}
if err := tc.Add("b", "b", DefaultExpiration); err != nil {
t.Error(err)
}
f, err := ioutil.TempFile("", "go-cache-cache.dat") f, err := ioutil.TempFile("", "go-cache-cache.dat")
if err != nil { if err != nil {
t.Fatal("Couldn't create cache file:", err) t.Fatal("Couldn't create cache file:", err)
} }
fname := f.Name() fname := f.Name()
f.Close() f.Close()
tc.SaveFile(fname)
if err := tc.SaveFile(fname); err != nil {
t.Fatal(err)
}
oc := New(DefaultExpiration, 0) oc := New(DefaultExpiration, 0)
oc.Add("a", "aa", 0) // this should not be overwritten // this should not be overwritten
err = oc.LoadFile(fname) if err := oc.Add("a", "aa", 0); err != nil {
if err != nil {
t.Error(err) t.Error(err)
} }
if err := oc.LoadFile(fname); err != nil {
t.Fatal(err)
}
a, found := oc.Get("a") a, found := oc.Get("a")
if !found { if !found {
t.Error("a was not found") t.Error("a was not found")
} }
astr := a.(string) astr := a.(string)
if astr != "aa" { if astr != "aa" {
if astr == "a" { if astr == "a" {
@ -352,10 +365,12 @@ func TestFileSerialization(t *testing.T) {
t.Error("a is not aa") t.Error("a is not aa")
} }
} }
b, found := oc.Get("b") b, found := oc.Get("b")
if !found { if !found {
t.Error("b was not found") t.Error("b was not found")
} }
if b.(string) != "b" { if b.(string) != "b" {
t.Error("b is not b") t.Error("b is not b")
} }

View File

@ -126,7 +126,9 @@ func TestGCRun(t *testing.T) {
ml.EXPECT().Infof(gomock.Any(), gomock.Eq("foo")).Do(func(template interface{}, args ...interface{}) { wg.Done() }).Times(1), ml.EXPECT().Infof(gomock.Any(), gomock.Eq("foo")).Do(func(template interface{}, args ...interface{}) { wg.Done() }).Times(1),
) )
gc.Run(id) if err := gc.Run(id); err != nil {
t.Error(err)
}
}, },
}, },
{ {
@ -149,7 +151,9 @@ func TestGCRun(t *testing.T) {
ml.EXPECT().Infof(gomock.Any(), gomock.Eq("foo")).Do(func(template interface{}, args ...interface{}) { wg.Done() }).Times(1), ml.EXPECT().Infof(gomock.Any(), gomock.Eq("foo")).Do(func(template interface{}, args ...interface{}) { wg.Done() }).Times(1),
) )
gc.Run(id) if err := gc.Run(id); err != nil {
t.Error(err)
}
}, },
}, },
{ {

View File

@ -138,7 +138,10 @@ retry:
SourceType: keepalive.SourceType, SourceType: keepalive.SourceType,
ClusterId: keepalive.ClusterId, ClusterId: keepalive.ClusterId,
}); err != nil { }); err != nil {
stream.CloseAndRecv() if _, err := stream.CloseAndRecv(); err != nil {
logger.Errorf("hostname %s cluster id %s close and recv stream failed", keepalive.HostName, keepalive.ClusterId, err)
}
cancel() cancel()
goto retry goto retry
} }

View File

@ -155,8 +155,7 @@ func (sc *schedulerClient) ReportPieceResult(ctx context.Context, taskID string,
logger.With("peerId", ptr.PeerId, "errMsg", err).Infof("start to report piece result for taskID: %s", taskID) logger.With("peerId", ptr.PeerId, "errMsg", err).Infof("start to report piece result for taskID: %s", taskID)
// trigger scheduling // trigger scheduling
pps.Send(scheduler.NewZeroPieceResult(taskID, ptr.PeerId)) return pps, pps.Send(scheduler.NewZeroPieceResult(taskID, ptr.PeerId))
return pps, err
} }
func (sc *schedulerClient) ReportPeerResult(ctx context.Context, pr *scheduler.PeerResult, opts ...grpc.CallOption) error { func (sc *schedulerClient) ReportPeerResult(ctx context.Context, pr *scheduler.PeerResult, opts ...grpc.CallOption) error {

View File

@ -73,21 +73,24 @@ func newPeerPacketStream(ctx context.Context, sc *schedulerClient, hashKey strin
return pps, nil return pps, nil
} }
func (pps *peerPacketStream) Send(pr *scheduler.PieceResult) (err error) { func (pps *peerPacketStream) Send(pr *scheduler.PieceResult) error {
pps.lastPieceResult = pr pps.lastPieceResult = pr
pps.sc.UpdateAccessNodeMapByHashKey(pps.hashKey) pps.sc.UpdateAccessNodeMapByHashKey(pps.hashKey)
err = pps.stream.Send(pr)
if err := pps.stream.Send(pr); err != nil {
if err := pps.closeSend(); err != nil {
return err
}
return err
}
if pr.PieceInfo.PieceNum == common.EndOfPiece { if pr.PieceInfo.PieceNum == common.EndOfPiece {
pps.closeSend() if err := pps.closeSend(); err != nil {
return return err
}
} }
if err != nil { return nil
pps.closeSend()
}
return
} }
func (pps *peerPacketStream) closeSend() error { func (pps *peerPacketStream) closeSend() error {

View File

@ -58,7 +58,10 @@ func TestHashFile(t *testing.T) {
f, err := fileutils.OpenFile(path, syscall.O_CREAT|syscall.O_TRUNC|syscall.O_RDWR, 0644) f, err := fileutils.OpenFile(path, syscall.O_CREAT|syscall.O_TRUNC|syscall.O_RDWR, 0644)
assert.Nil(t, err) assert.Nil(t, err)
f.Write([]byte("hello")) if _, err := f.Write([]byte("hello")); err != nil {
t.Error(err)
}
f.Close() f.Close()
assert.Equal(t, expected, HashFile(path, Md5Hash)) assert.Equal(t, expected, HashFile(path, Md5Hash))

View File

@ -59,14 +59,16 @@ func MoveFile(src, dst string) error {
return errors.Errorf("move %s to %s: src is not a regular file", src, dst) return errors.Errorf("move %s to %s: src is not a regular file", src, dst)
} }
var err error if err := os.Rename(src, dst); err != nil {
if err = os.Rename(src, dst); err != nil { if _, err := CopyFile(src, dst); err != nil {
if _, err = CopyFile(src, dst); err == nil { return errors.Wrapf(err, "failed to copy %s to %s", src, dst)
fileutils.DeleteFile(src) }
if err := fileutils.DeleteFile(src); err != nil {
return errors.Wrapf(err, "failed to delete %s", src)
} }
} }
return errors.Wrapf(err, "failed to move %s to %s", src, dst) return nil
} }
// CleanFile cleans content of the file. // CleanFile cleans content of the file.

View File

@ -132,7 +132,9 @@ func (s *FileUtilsTestSuite) TestIsEmptyDir() {
_, err = fileutils.IsEmptyDir(s.testDir) _, err = fileutils.IsEmptyDir(s.testDir)
s.Require().NotNil(err) s.Require().NotNil(err)
fileutils.MkdirAll(s.testDir) err = fileutils.MkdirAll(s.testDir)
s.Require().Nil(err)
b, err := fileutils.IsEmptyDir(s.testDir) b, err := fileutils.IsEmptyDir(s.testDir)
s.Require().Nil(err) s.Require().Nil(err)
s.Require().True(b) s.Require().True(b)
@ -144,7 +146,9 @@ func (s *FileUtilsTestSuite) TestCopyFile() {
f, err := fileutils.OpenFile(s.testFile, syscall.O_WRONLY|syscall.O_CREAT, 0644) f, err := fileutils.OpenFile(s.testFile, syscall.O_WRONLY|syscall.O_CREAT, 0644)
s.Require().Nil(err) s.Require().Nil(err)
f.WriteString("hello,world")
_, err = f.WriteString("hello,world")
s.Require().Nil(err)
f.Close() f.Close()
_, err = filerw.CopyFile(s.testFile, s.testFile+".new") _, err = filerw.CopyFile(s.testFile, s.testFile+".new")
@ -162,12 +166,14 @@ func (s *FileUtilsTestSuite) TestTryLock() {
f2, err := fileutils.NewFileLock(s.testFile) f2, err := fileutils.NewFileLock(s.testFile)
s.Require().Nil(err) s.Require().Nil(err)
f1.Lock() err = f1.Lock()
s.Require().Nil(err)
err = f2.TryLock() err = f2.TryLock()
s.Require().NotNil(err) s.Require().NotNil(err)
f1.Unlock() err = f1.Unlock()
s.Require().Nil(err)
err = f2.TryLock() err = f2.TryLock()
s.Require().Nil(err) s.Require().Nil(err)

View File

@ -81,8 +81,13 @@ func TestSchedulerConfig_Load(t *testing.T) {
schedulerConfigYAML := &Config{} schedulerConfigYAML := &Config{}
contentYAML, _ := ioutil.ReadFile("./testdata/scheduler.yaml") contentYAML, _ := ioutil.ReadFile("./testdata/scheduler.yaml")
var dataYAML map[string]interface{} var dataYAML map[string]interface{}
yaml.Unmarshal(contentYAML, &dataYAML) if err := yaml.Unmarshal(contentYAML, &dataYAML); err != nil {
mapstructure.Decode(dataYAML, &schedulerConfigYAML) t.Fatal(err)
}
if err := mapstructure.Decode(dataYAML, &schedulerConfigYAML); err != nil {
t.Fatal(err)
}
assert.True(reflect.DeepEqual(config, schedulerConfigYAML)) assert.True(reflect.DeepEqual(config, schedulerConfigYAML))
} }

View File

@ -301,7 +301,9 @@ func (d *dynconfig) watch() {
for { for {
select { select {
case <-tick.C: case <-tick.C:
d.Notify() if err := d.Notify(); err != nil {
logger.Error("dynconfig notify failed", err)
}
case <-d.done: case <-d.done:
return return
} }

View File

@ -131,7 +131,9 @@ loop:
func debug() { func debug() {
debugAddr := fmt.Sprintf("%s:%d", iputils.HostIP, 18066) debugAddr := fmt.Sprintf("%s:%d", iputils.HostIP, 18066)
viewer.SetConfiguration(viewer.WithAddr(debugAddr)) viewer.SetConfiguration(viewer.WithAddr(debugAddr))
statsview.New().Start() if err := statsview.New().Start(); err != nil {
log.Println("stat view start failed", err)
}
} }
func forceExit(signals chan os.Signal) { func forceExit(signals chan os.Signal) {
@ -278,8 +280,10 @@ func saveToOutput(results []*Result) {
if v.PeerID == "" { if v.PeerID == "" {
v.PeerID = "unknown" v.PeerID = "unknown"
} }
out.WriteString(fmt.Sprintf("%s %s %d %v %d %d %s\n", if _, err := out.WriteString(fmt.Sprintf("%s %s %d %v %d %d %s\n",
v.TaskID, v.PeerID, v.StatusCode, v.Cost, v.TaskID, v.PeerID, v.StatusCode, v.Cost,
v.StartTime.UnixNano()/100, v.EndTime.UnixNano()/100, v.Message)) v.StartTime.UnixNano()/100, v.EndTime.UnixNano()/100, v.Message)); err != nil {
log.Panicln("write string failed", err)
}
} }
} }