chore: add lint errcheck and fix errcheck(#766)
* feat: add lint errcheck and fix errcheck Signed-off-by: Gaius <gaius.qi@gmail.com> * replace assert to require Signed-off-by: 孙伟鹏 <weipeng.swp@alibaba-inc.com> Co-authored-by: 孙伟鹏 <weipeng.swp@alibaba-inc.com>
This commit is contained in:
parent
f310425962
commit
e837ae9bdf
|
|
@ -21,6 +21,7 @@ linters:
|
|||
- deadcode
|
||||
- gocyclo
|
||||
- staticcheck
|
||||
- errcheck
|
||||
|
||||
output:
|
||||
format: colored-line-number
|
||||
|
|
|
|||
|
|
@ -51,16 +51,19 @@ func (s *PluginsTestSuite) TestPluginBuilder() {
|
|||
manager := NewManager()
|
||||
|
||||
var testFunc = func(pt PluginType, name string, b Builder, result bool) {
|
||||
manager.AddBuilder(pt, name, b)
|
||||
obj, _ := manager.GetBuilder(pt, name)
|
||||
if result {
|
||||
s.NotNil(obj)
|
||||
err := manager.AddBuilder(pt, name, b)
|
||||
if !result {
|
||||
s.Require().NotNil(err)
|
||||
}
|
||||
obj, ok := manager.GetBuilder(pt, name)
|
||||
if ok {
|
||||
s.Require().NotNil(obj)
|
||||
objVal := reflect.ValueOf(obj)
|
||||
bVal := reflect.ValueOf(b)
|
||||
s.Equal(objVal.Pointer(), bVal.Pointer())
|
||||
s.Require().Equal(objVal.Pointer(), bVal.Pointer())
|
||||
manager.DeleteBuilder(pt, name)
|
||||
} else {
|
||||
s.Nil(obj)
|
||||
s.Require().Nil(obj)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -76,14 +79,17 @@ func (s *PluginsTestSuite) TestManagerPlugin() {
|
|||
manager := NewManager()
|
||||
|
||||
var testFunc = func(p Plugin, result bool) {
|
||||
manager.AddPlugin(p)
|
||||
obj, _ := manager.GetPlugin(p.Type(), p.Name())
|
||||
if result {
|
||||
s.NotNil(obj)
|
||||
s.Equal(obj, p)
|
||||
err := manager.AddPlugin(p)
|
||||
if !result {
|
||||
s.Require().NotNil(err)
|
||||
}
|
||||
obj, ok := manager.GetPlugin(p.Type(), p.Name())
|
||||
if ok {
|
||||
s.Require().NotNil(obj)
|
||||
s.Require().Equal(obj, p)
|
||||
manager.DeletePlugin(p.Type(), p.Name())
|
||||
} else {
|
||||
s.Nil(obj)
|
||||
s.Require().Nil(obj)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -128,16 +134,19 @@ func (s *PluginsTestSuite) TestRepositoryIml() {
|
|||
|
||||
repo := NewRepository()
|
||||
for _, v := range cases {
|
||||
repo.Add(v.pt, v.name, v.data)
|
||||
data, _ := repo.Get(v.pt, v.name)
|
||||
if v.addResult {
|
||||
s.NotNil(data)
|
||||
s.Equal(data, v.data)
|
||||
err := repo.Add(v.pt, v.name, v.data)
|
||||
if !v.addResult {
|
||||
s.Require().NotNil(err)
|
||||
}
|
||||
data, ok := repo.Get(v.pt, v.name)
|
||||
if ok {
|
||||
s.Require().NotNil(data)
|
||||
s.Require().Equal(data, v.data)
|
||||
repo.Delete(v.pt, v.name)
|
||||
data, _ = repo.Get(v.pt, v.name)
|
||||
s.Nil(data)
|
||||
s.Require().Nil(data)
|
||||
} else {
|
||||
s.Nil(data)
|
||||
s.Require().Nil(data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -159,7 +168,7 @@ func (s *PluginsTestSuite) TestValidate() {
|
|||
)
|
||||
}
|
||||
for _, v := range cases {
|
||||
s.Equal(validate(v.pt, v.name), v.expected)
|
||||
s.Require().Equal(validate(v.pt, v.name), v.expected)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -43,8 +43,13 @@ const (
|
|||
var fileLocker = synclock.NewLockerPool()
|
||||
|
||||
func init() {
|
||||
storedriver.Register(DiskDriverName, NewStorageDriver)
|
||||
storedriver.Register(MemoryDriverName, NewStorageDriver)
|
||||
if err := storedriver.Register(DiskDriverName, NewStorageDriver); err != nil {
|
||||
logger.CoreLogger.Error(err)
|
||||
}
|
||||
|
||||
if err := storedriver.Register(MemoryDriverName, NewStorageDriver); err != nil {
|
||||
logger.CoreLogger.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// driver is one of the implementations of storage Driver using local file system.
|
||||
|
|
|
|||
|
|
@ -50,7 +50,9 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
storage.Register(StorageMode, newStorageManager)
|
||||
if err := storage.Register(StorageMode, newStorageManager); err != nil {
|
||||
logger.CoreLogger.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func newStorageManager(cfg *storage.Config) (storage.Manager, error) {
|
||||
|
|
@ -265,13 +267,20 @@ func (s *diskStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
|
|||
return nil
|
||||
},
|
||||
}
|
||||
s.diskDriver.Walk(r)
|
||||
if err := s.diskDriver.Walk(r); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
enoughSpace := freeSpace.ToNumber()-remainder.Load() > fileLength
|
||||
if !enoughSpace {
|
||||
s.cleaner.GC("disk", true)
|
||||
if _, err := s.cleaner.GC("disk", true); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
remainder.Store(0)
|
||||
s.diskDriver.Walk(r)
|
||||
if err := s.diskDriver.Walk(r); err != nil {
|
||||
return false, err
|
||||
}
|
||||
freeSpace, err = s.diskDriver.GetFreeSpace()
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
|
|||
|
|
@ -50,7 +50,9 @@ var _ storage.Manager = (*hybridStorageMgr)(nil)
|
|||
var _ gc.Executor = (*hybridStorageMgr)(nil)
|
||||
|
||||
func init() {
|
||||
storage.Register(StorageMode, newStorageManager)
|
||||
if err := storage.Register(StorageMode, newStorageManager); err != nil {
|
||||
logger.CoreLogger.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// NewStorageManager performs initialization for storage manager and return a storage Manager.
|
||||
|
|
@ -325,13 +327,20 @@ func (h *hybridStorageMgr) TryFreeSpace(fileLength int64) (bool, error) {
|
|||
return nil
|
||||
},
|
||||
}
|
||||
h.diskDriver.Walk(r)
|
||||
if err := h.diskDriver.Walk(r); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
enoughSpace := diskFreeSpace.ToNumber()-remainder.Load() > fileLength
|
||||
if !enoughSpace {
|
||||
h.diskDriverCleaner.GC("hybrid", true)
|
||||
if _, err := h.diskDriverCleaner.GC("hybrid", true); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
remainder.Store(0)
|
||||
h.diskDriver.Walk(r)
|
||||
if err := h.diskDriver.Walk(r); err != nil {
|
||||
return false, err
|
||||
}
|
||||
diskFreeSpace, err = h.diskDriver.GetFreeSpace()
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
|
@ -397,7 +406,7 @@ func (h *hybridStorageMgr) deleteTaskFiles(taskID string, deleteUploadPath bool,
|
|||
func (h *hybridStorageMgr) tryShmSpace(url, taskID string, fileLength int64) (string, error) {
|
||||
if h.shmSwitch.check(url, fileLength) && h.hasShm {
|
||||
remainder := atomic.NewInt64(0)
|
||||
h.memoryDriver.Walk(&storedriver.Raw{
|
||||
if err := h.memoryDriver.Walk(&storedriver.Raw{
|
||||
WalkFn: func(filePath string, info os.FileInfo, err error) error {
|
||||
if fileutils.IsRegular(filePath) {
|
||||
taskID := strings.Split(path.Base(filePath), ".")[0]
|
||||
|
|
@ -416,12 +425,18 @@ func (h *hybridStorageMgr) tryShmSpace(url, taskID string, fileLength int64) (st
|
|||
}
|
||||
return nil
|
||||
},
|
||||
})
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
canUseShm := h.getMemoryUsableSpace()-unit.Bytes(remainder.Load())-secureLevel >= unit.Bytes(
|
||||
fileLength)
|
||||
if !canUseShm {
|
||||
// 如果剩余空间过小,则强制执行一次fullgc后在检查是否满足
|
||||
h.memoryDriverCleaner.GC("hybrid", true)
|
||||
if _, err := h.memoryDriverCleaner.GC("hybrid", true); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
canUseShm = h.getMemoryUsableSpace()-unit.Bytes(remainder.Load())-secureLevel >= unit.Bytes(
|
||||
fileLength)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -190,7 +190,9 @@ func (tm Manager) Delete(taskID string) error {
|
|||
tm.accessTimeMap.Delete(taskID)
|
||||
tm.taskURLUnReachableStore.Delete(taskID)
|
||||
tm.taskStore.Delete(taskID)
|
||||
tm.progressMgr.Clear(taskID)
|
||||
if err := tm.progressMgr.Clear(taskID); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -227,7 +229,10 @@ func (tm *Manager) GC() error {
|
|||
}
|
||||
// gc task memory data
|
||||
logger.GcLogger.With("type", "meta").Infof("gc task: start to deal with task: %s", taskID)
|
||||
tm.Delete(taskID)
|
||||
if err := tm.Delete(taskID); err != nil {
|
||||
logger.GcLogger.With("type", "meta").Infof("gc task: failed to delete task: %s", taskID)
|
||||
continue
|
||||
}
|
||||
removedTaskCount++
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -93,7 +93,9 @@ func (tm *Manager) addOrUpdateTask(ctx context.Context, request *types.TaskRegis
|
|||
if err != nil {
|
||||
task.Log().Errorf("failed to get url (%s) content length: %v", task.URL, err)
|
||||
if cdnerrors.IsURLNotReachable(err) {
|
||||
tm.taskURLUnReachableStore.Add(taskID, time.Now())
|
||||
if err := tm.taskURLUnReachableStore.Add(taskID, time.Now()); err != nil {
|
||||
task.Log().Errorf("failed to add url (%s) to unreachable store: %v", task.URL, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
@ -119,9 +121,11 @@ func (tm *Manager) addOrUpdateTask(ctx context.Context, request *types.TaskRegis
|
|||
pieceSize := cdnutil.ComputePieceSize(task.SourceFileLength)
|
||||
task.PieceSize = pieceSize
|
||||
}
|
||||
tm.taskStore.Add(task.TaskID, task)
|
||||
logger.Debugf("success add task: %+v into taskStore", task)
|
||||
if err := tm.taskStore.Add(task.TaskID, task); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Debugf("success add task: %+v into taskStore", task)
|
||||
return task, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -441,10 +441,14 @@ func (cd *clientDaemon) Stop() {
|
|||
close(cd.done)
|
||||
cd.GCManager.Stop()
|
||||
cd.RPCManager.Stop()
|
||||
cd.UploadManager.Stop()
|
||||
if err := cd.UploadManager.Stop(); err != nil {
|
||||
logger.Errorf("upload manager stop failed %s", err)
|
||||
}
|
||||
|
||||
if cd.ProxyManager.IsEnabled() {
|
||||
cd.ProxyManager.Stop()
|
||||
if err := cd.ProxyManager.Stop(); err != nil {
|
||||
logger.Errorf("proxy manager stop failed %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !cd.Option.KeepStorage {
|
||||
|
|
|
|||
|
|
@ -415,9 +415,13 @@ loop:
|
|||
if pt.failedCode == failedCodeNotSet {
|
||||
pt.failedReason = reasonContextCanceled
|
||||
pt.failedCode = dfcodes.ClientContextCanceled
|
||||
pt.callback.Fail(pt, pt.failedCode, pt.ctx.Err().Error())
|
||||
if err := pt.callback.Fail(pt, pt.failedCode, pt.ctx.Err().Error()); err != nil {
|
||||
pt.Errorf("peer task callback failed %s", err)
|
||||
}
|
||||
} else {
|
||||
pt.callback.Fail(pt, pt.failedCode, pt.failedReason)
|
||||
if err := pt.callback.Fail(pt, pt.failedCode, pt.failedReason); err != nil {
|
||||
pt.Errorf("peer task callback failed %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
break loop
|
||||
|
|
@ -661,7 +665,7 @@ func (pt *peerTask) downloadPieceWorker(id int32, pti Task, requests chan *Downl
|
|||
pt.Errorf("request limiter error: %s", err)
|
||||
waitSpan.RecordError(err)
|
||||
waitSpan.End()
|
||||
pti.ReportPieceResult(&pieceTaskResult{
|
||||
if err := pti.ReportPieceResult(&pieceTaskResult{
|
||||
piece: request.piece,
|
||||
pieceResult: &scheduler.PieceResult{
|
||||
TaskId: pt.GetTaskID(),
|
||||
|
|
@ -674,7 +678,10 @@ func (pt *peerTask) downloadPieceWorker(id int32, pti Task, requests chan *Downl
|
|||
FinishedCount: 0, // update by peer task
|
||||
},
|
||||
err: err,
|
||||
})
|
||||
}); err != nil {
|
||||
pt.Errorf("report piece result failed %s", err)
|
||||
}
|
||||
|
||||
pt.failedReason = err.Error()
|
||||
pt.failedCode = dfcodes.ClientRequestLimitFail
|
||||
pt.cancel()
|
||||
|
|
|
|||
|
|
@ -99,7 +99,13 @@ func setupPeerTaskManagerComponents(ctrl *gomock.Controller, opt componentsOptio
|
|||
Type: "tcp",
|
||||
Addr: fmt.Sprintf("0.0.0.0:%d", port),
|
||||
})
|
||||
go daemonserver.New(daemon).Serve(ln)
|
||||
|
||||
go func() {
|
||||
if err := daemonserver.New(daemon).Serve(ln); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// 2. setup a scheduler
|
||||
|
|
|
|||
|
|
@ -22,7 +22,9 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
|
@ -94,7 +96,11 @@ func setupBackSourcePartialComponents(ctrl *gomock.Controller, testBytes []byte,
|
|||
Type: "tcp",
|
||||
Addr: fmt.Sprintf("0.0.0.0:%d", port),
|
||||
})
|
||||
go daemonserver.New(daemon).Serve(ln)
|
||||
go func(daemon *mock_daemon.MockDaemonServer, ln net.Listener) {
|
||||
if err := daemonserver.New(daemon).Serve(ln); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}(daemon, ln)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// 2. setup a scheduler
|
||||
|
|
|
|||
|
|
@ -59,7 +59,9 @@ func TestPieceDownloader_DownloadPiece(t *testing.T) {
|
|||
assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-0", r.URL.Path)
|
||||
data := []byte("test test ")
|
||||
w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", len(data)))
|
||||
w.Write(data)
|
||||
if _, err := w.Write(data); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
},
|
||||
taskID: "task-0",
|
||||
pieceRange: "bytes=0-9",
|
||||
|
|
@ -72,7 +74,9 @@ func TestPieceDownloader_DownloadPiece(t *testing.T) {
|
|||
assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-1", r.URL.Path)
|
||||
rg := clientutil.MustParseRange(r.Header.Get("Range"), math.MaxInt64)
|
||||
w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", rg.Length))
|
||||
w.Write(testData[rg.Start : rg.Start+rg.Length])
|
||||
if _, err := w.Write(testData[rg.Start : rg.Start+rg.Length]); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
},
|
||||
taskID: "task-1",
|
||||
pieceRange: "bytes=0-99",
|
||||
|
|
@ -85,7 +89,9 @@ func TestPieceDownloader_DownloadPiece(t *testing.T) {
|
|||
assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-2", r.URL.Path)
|
||||
rg := clientutil.MustParseRange(r.Header.Get("Range"), math.MaxInt64)
|
||||
w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", rg.Length))
|
||||
w.Write(testData[rg.Start : rg.Start+rg.Length])
|
||||
if _, err := w.Write(testData[rg.Start : rg.Start+rg.Length]); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
},
|
||||
taskID: "task-2",
|
||||
pieceRange: fmt.Sprintf("bytes=512-%d", len(testData)-1),
|
||||
|
|
@ -98,7 +104,9 @@ func TestPieceDownloader_DownloadPiece(t *testing.T) {
|
|||
assert.Equal(upload.PeerDownloadHTTPPathPrefix+"tas/"+"task-3", r.URL.Path)
|
||||
rg := clientutil.MustParseRange(r.Header.Get("Range"), math.MaxInt64)
|
||||
w.Header().Set(headers.ContentLength, fmt.Sprintf("%d", rg.Length))
|
||||
w.Write(testData[rg.Start : rg.Start+rg.Length])
|
||||
if _, err := w.Write(testData[rg.Start : rg.Start+rg.Length]); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
},
|
||||
taskID: "task-3",
|
||||
pieceRange: "bytes=512-1024",
|
||||
|
|
|
|||
|
|
@ -360,7 +360,7 @@ func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, request *sc
|
|||
// last piece, piece size maybe 0
|
||||
if n < int64(size) {
|
||||
contentLength = int64(pieceNum*pieceSize) + n
|
||||
pm.storageManager.UpdateTask(ctx,
|
||||
if err := pm.storageManager.UpdateTask(ctx,
|
||||
&storage.UpdateTaskRequest{
|
||||
PeerTaskMetaData: storage.PeerTaskMetaData{
|
||||
PeerID: pt.GetPeerID(),
|
||||
|
|
@ -368,7 +368,9 @@ func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, request *sc
|
|||
},
|
||||
ContentLength: contentLength,
|
||||
GenPieceDigest: true,
|
||||
})
|
||||
}); err != nil {
|
||||
log.Errorf("update task failed %s", err)
|
||||
}
|
||||
pt.SetTotalPieces(pieceNum + 1)
|
||||
return pt.SetContentLength(contentLength)
|
||||
}
|
||||
|
|
@ -397,8 +399,11 @@ func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, request *sc
|
|||
}
|
||||
}
|
||||
pt.SetTotalPieces(maxPieceNum)
|
||||
pt.SetContentLength(contentLength)
|
||||
pm.storageManager.UpdateTask(ctx,
|
||||
if err := pt.SetContentLength(contentLength); err != nil {
|
||||
log.Errorf("set content length failed %s", err)
|
||||
}
|
||||
|
||||
if err := pm.storageManager.UpdateTask(ctx,
|
||||
&storage.UpdateTaskRequest{
|
||||
PeerTaskMetaData: storage.PeerTaskMetaData{
|
||||
PeerID: pt.GetPeerID(),
|
||||
|
|
@ -407,7 +412,9 @@ func (pm *pieceManager) DownloadSource(ctx context.Context, pt Task, request *sc
|
|||
ContentLength: contentLength,
|
||||
TotalPieces: maxPieceNum,
|
||||
GenPieceDigest: true,
|
||||
})
|
||||
}); err != nil {
|
||||
log.Errorf("update task failed %s", err)
|
||||
}
|
||||
log.Infof("download from source ok")
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -463,7 +463,9 @@ func (proxy *Proxy) mirrorRegistry(w http.ResponseWriter, r *http.Request) {
|
|||
reverseProxy.ErrorHandler = func(rw http.ResponseWriter, req *http.Request, err error) {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
// write error string to response body
|
||||
rw.Write([]byte(err.Error()))
|
||||
if _, err := rw.Write([]byte(err.Error())); err != nil {
|
||||
logger.Errorf("write error string to response body failed %s", err)
|
||||
}
|
||||
}
|
||||
reverseProxy.ServeHTTP(w, r)
|
||||
}
|
||||
|
|
@ -567,8 +569,15 @@ func tunnelHTTPS(w http.ResponseWriter, r *http.Request) {
|
|||
http.Error(w, err.Error(), http.StatusServiceUnavailable)
|
||||
}
|
||||
|
||||
go copyAndClose(dst, clientConn)
|
||||
copyAndClose(clientConn, dst)
|
||||
go func() {
|
||||
if err := copyAndClose(dst, clientConn); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := copyAndClose(clientConn, dst); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func copyAndClose(dst io.WriteCloser, src io.ReadCloser) error {
|
||||
|
|
|
|||
|
|
@ -86,7 +86,9 @@ func TestDownloadManager_ServeDownload(t *testing.T) {
|
|||
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
|
||||
assert.Nil(err, "get free port should be ok")
|
||||
go func() {
|
||||
m.ServeDownload(ln)
|
||||
if err := m.ServeDownload(ln); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
|
|
@ -169,7 +171,9 @@ func TestDownloadManager_ServePeer(t *testing.T) {
|
|||
ln, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
|
||||
assert.Nil(err, "get free port should be ok")
|
||||
go func() {
|
||||
m.ServePeer(ln)
|
||||
if err := m.ServePeer(ln); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
|
|
|
|||
|
|
@ -64,7 +64,9 @@ func TestUploadManager_Serve(t *testing.T) {
|
|||
addr := listen.Addr().String()
|
||||
|
||||
go func() {
|
||||
um.Serve(listen)
|
||||
if err := um.Serve(listen); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
|
|
|
|||
|
|
@ -131,7 +131,11 @@ func runDaemon() error {
|
|||
break
|
||||
}
|
||||
}
|
||||
defer lock.Unlock()
|
||||
defer func() {
|
||||
if err := lock.Unlock(); err != nil {
|
||||
logger.Errorf("flock unlock failed %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
logger.Infof("daemon is launched by pid: %d", viper.GetInt("launcher"))
|
||||
|
||||
|
|
|
|||
|
|
@ -200,8 +200,15 @@ func checkAndSpawnDaemon() (client.DaemonClient, error) {
|
|||
}
|
||||
|
||||
lock := flock.New(dfpath.DfgetLockPath)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if err := lock.Lock(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := lock.Unlock(); err != nil {
|
||||
logger.Errorf("flock unlock failed %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// 2.Check with lock
|
||||
if daemonClient.CheckHealth(context.Background(), target) == nil {
|
||||
|
|
|
|||
|
|
@ -156,6 +156,10 @@ func Warnf(template string, args ...interface{}) {
|
|||
CoreLogger.Warnf(template, args...)
|
||||
}
|
||||
|
||||
func Warn(args ...interface{}) {
|
||||
CoreLogger.Warn(args...)
|
||||
}
|
||||
|
||||
func Errorf(template string, args ...interface{}) {
|
||||
CoreLogger.Errorf(template, args...)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"errors"
|
||||
"time"
|
||||
|
||||
logger "d7y.io/dragonfly/v2/internal/dflog"
|
||||
"d7y.io/dragonfly/v2/pkg/cache"
|
||||
)
|
||||
|
||||
|
|
@ -58,7 +59,9 @@ func (d *dynconfigManager) get() (interface{}, error) {
|
|||
|
||||
// Cache has expired
|
||||
// Reload and ignore client request error
|
||||
d.load()
|
||||
if err := d.load(); err != nil {
|
||||
logger.Warn("reload failed", err)
|
||||
}
|
||||
|
||||
dynconfig, ok := d.cache.Get(defaultCacheKey)
|
||||
if !ok {
|
||||
|
|
|
|||
|
|
@ -66,17 +66,23 @@ func TestDynconfigUnmarshal_ManagerSourceType(t *testing.T) {
|
|||
cleanFileCache: func(t *testing.T) {},
|
||||
mock: func(m *mock_manager_client.MockmanagerClientMockRecorder) {
|
||||
var d map[string]interface{}
|
||||
mapstructure.Decode(TestDynconfig{
|
||||
if err := mapstructure.Decode(TestDynconfig{
|
||||
Scheduler: SchedulerOption{
|
||||
Name: schedulerName,
|
||||
},
|
||||
}, &d)
|
||||
}, &d); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.Get().Return(d, nil).AnyTimes()
|
||||
},
|
||||
expect: func(t *testing.T, data interface{}) {
|
||||
assert := assert.New(t)
|
||||
var d TestDynconfig
|
||||
mapstructure.Decode(data, &d)
|
||||
if err := mapstructure.Decode(data, &d); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
assert.EqualValues(d, TestDynconfig{
|
||||
Scheduler: SchedulerOption{
|
||||
Name: schedulerName,
|
||||
|
|
@ -102,11 +108,14 @@ func TestDynconfigUnmarshal_ManagerSourceType(t *testing.T) {
|
|||
},
|
||||
mock: func(m *mock_manager_client.MockmanagerClientMockRecorder) {
|
||||
var d map[string]interface{}
|
||||
mapstructure.Decode(TestDynconfig{
|
||||
if err := mapstructure.Decode(TestDynconfig{
|
||||
Scheduler: SchedulerOption{
|
||||
Name: schedulerName,
|
||||
},
|
||||
}, &d)
|
||||
}, &d); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.Get().Return(d, nil).Times(1)
|
||||
},
|
||||
expect: func(t *testing.T, data interface{}) {
|
||||
|
|
@ -136,11 +145,14 @@ func TestDynconfigUnmarshal_ManagerSourceType(t *testing.T) {
|
|||
},
|
||||
mock: func(m *mock_manager_client.MockmanagerClientMockRecorder) {
|
||||
var d map[string]interface{}
|
||||
mapstructure.Decode(TestDynconfig{
|
||||
if err := mapstructure.Decode(TestDynconfig{
|
||||
Scheduler: SchedulerOption{
|
||||
Name: schedulerName,
|
||||
},
|
||||
}, &d)
|
||||
}, &d); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
m.Get().Return(d, nil).Times(1)
|
||||
m.Get().Return(nil, errors.New("manager serivce error")).Times(1)
|
||||
},
|
||||
|
|
@ -208,7 +220,10 @@ func TestDynconfigUnmarshal_LocalSourceType(t *testing.T) {
|
|||
expect: func(t *testing.T, data interface{}) {
|
||||
assert := assert.New(t)
|
||||
var d TestDynconfig
|
||||
mapstructure.Decode(data, &d)
|
||||
if err := mapstructure.Decode(data, &d); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
assert.EqualValues(d, TestDynconfig{
|
||||
Scheduler: SchedulerOption{
|
||||
Name: schedulerName,
|
||||
|
|
|
|||
|
|
@ -47,10 +47,13 @@ func newGithub(name, clientID, clientSecret, redirectURL string) *oauthGithub {
|
|||
}
|
||||
}
|
||||
|
||||
func (g *oauthGithub) AuthCodeURL() string {
|
||||
func (g *oauthGithub) AuthCodeURL() (string, error) {
|
||||
b := make([]byte, 16)
|
||||
rand.Read(b)
|
||||
return g.Config.AuthCodeURL(base64.URLEncoding.EncodeToString(b))
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return g.Config.AuthCodeURL(base64.URLEncoding.EncodeToString(b)), nil
|
||||
}
|
||||
|
||||
func (g *oauthGithub) Exchange(code string) (*oauth2.Token, error) {
|
||||
|
|
|
|||
|
|
@ -48,10 +48,13 @@ func newGoogle(name, clientID, clientSecret, redirectURL string) *oauthGoogle {
|
|||
}
|
||||
}
|
||||
|
||||
func (g *oauthGoogle) AuthCodeURL() string {
|
||||
func (g *oauthGoogle) AuthCodeURL() (string, error) {
|
||||
b := make([]byte, 16)
|
||||
rand.Read(b)
|
||||
return g.Config.AuthCodeURL(base64.URLEncoding.EncodeToString(b))
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return g.Config.AuthCodeURL(base64.URLEncoding.EncodeToString(b)), nil
|
||||
}
|
||||
|
||||
func (g *oauthGoogle) Exchange(code string) (*oauth2.Token, error) {
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ type User struct {
|
|||
}
|
||||
|
||||
type Oauth interface {
|
||||
AuthCodeURL() string
|
||||
AuthCodeURL() (string, error)
|
||||
Exchange(string) (*oauth2.Token, error)
|
||||
GetUser(*oauth2.Token) (*User, error)
|
||||
}
|
||||
|
|
@ -62,7 +62,7 @@ func New(name, clientID, clientSecret, redirectURL string) (Oauth, error) {
|
|||
return o, nil
|
||||
}
|
||||
|
||||
func (g *oauth) AuthCodeURL() string {
|
||||
func (g *oauth) AuthCodeURL() (string, error) {
|
||||
return g.Oauth.AuthCodeURL()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -76,7 +76,13 @@ func TestManagerConfig_Load(t *testing.T) {
|
|||
managerConfigYAML := &Config{}
|
||||
contentYAML, _ := ioutil.ReadFile("./testdata/manager.yaml")
|
||||
var dataYAML map[string]interface{}
|
||||
yaml.Unmarshal(contentYAML, &dataYAML)
|
||||
mapstructure.Decode(dataYAML, &managerConfigYAML)
|
||||
if err := yaml.Unmarshal(contentYAML, &dataYAML); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := mapstructure.Decode(dataYAML, &managerConfigYAML); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.EqualValues(config, managerConfigYAML)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ func (h *Handlers) CreateCDN(ctx *gin.Context) {
|
|||
|
||||
cdn, err := h.service.CreateCDN(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -72,7 +72,7 @@ func (h *Handlers) DestroyCDN(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.DestroyCDN(ctx.Request.Context(), params.ID); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -94,19 +94,19 @@ func (h *Handlers) DestroyCDN(ctx *gin.Context) {
|
|||
func (h *Handlers) UpdateCDN(ctx *gin.Context) {
|
||||
var params types.CDNParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
var json types.UpdateCDNRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
cdn, err := h.service.UpdateCDN(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -133,7 +133,7 @@ func (h *Handlers) GetCDN(ctx *gin.Context) {
|
|||
|
||||
cdn, err := h.service.GetCDN(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -162,7 +162,7 @@ func (h *Handlers) GetCDNs(ctx *gin.Context) {
|
|||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
||||
cdns, count, err := h.service.GetCDNs(ctx.Request.Context(), query)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ func (h *Handlers) CreateCDNCluster(ctx *gin.Context) {
|
|||
if json.SecurityGroupDomain != "" {
|
||||
cdn, err := h.service.CreateCDNClusterWithSecurityGroupDomain(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -57,7 +57,7 @@ func (h *Handlers) CreateCDNCluster(ctx *gin.Context) {
|
|||
|
||||
cdnCluster, err := h.service.CreateCDNCluster(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -83,7 +83,7 @@ func (h *Handlers) DestroyCDNCluster(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.DestroyCDNCluster(ctx.Request.Context(), params.ID); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -105,20 +105,20 @@ func (h *Handlers) DestroyCDNCluster(ctx *gin.Context) {
|
|||
func (h *Handlers) UpdateCDNCluster(ctx *gin.Context) {
|
||||
var params types.CDNClusterParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
var json types.UpdateCDNClusterRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
if json.SecurityGroupDomain != "" {
|
||||
cdn, err := h.service.UpdateCDNClusterWithSecurityGroupDomain(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -128,7 +128,7 @@ func (h *Handlers) UpdateCDNCluster(ctx *gin.Context) {
|
|||
|
||||
cdnCluster, err := h.service.UpdateCDNCluster(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -155,7 +155,7 @@ func (h *Handlers) GetCDNCluster(ctx *gin.Context) {
|
|||
|
||||
cdnCluster, err := h.service.GetCDNCluster(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -184,7 +184,7 @@ func (h *Handlers) GetCDNClusters(ctx *gin.Context) {
|
|||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
||||
cdns, count, err := h.service.GetCDNClusters(ctx.Request.Context(), query)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -212,7 +212,7 @@ func (h *Handlers) AddCDNToCDNCluster(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.AddCDNToCDNCluster(ctx.Request.Context(), params.ID, params.CDNID); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -239,7 +239,7 @@ func (h *Handlers) AddSchedulerClusterToCDNCluster(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.AddSchedulerClusterToCDNCluster(ctx.Request.Context(), params.ID, params.SchedulerClusterID); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ func (h *Handlers) CreateConfig(ctx *gin.Context) {
|
|||
|
||||
config, err := h.service.CreateConfig(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -72,7 +72,7 @@ func (h *Handlers) DestroyConfig(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.DestroyConfig(ctx.Request.Context(), params.ID); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -94,19 +94,19 @@ func (h *Handlers) DestroyConfig(ctx *gin.Context) {
|
|||
func (h *Handlers) UpdateConfig(ctx *gin.Context) {
|
||||
var params types.ConfigParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
var json types.UpdateConfigRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
config, err := h.service.UpdateConfig(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -133,7 +133,7 @@ func (h *Handlers) GetConfig(ctx *gin.Context) {
|
|||
|
||||
config, err := h.service.GetConfig(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -162,7 +162,7 @@ func (h *Handlers) GetConfigs(ctx *gin.Context) {
|
|||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
||||
configs, count, err := h.service.GetConfigs(ctx.Request.Context(), query)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func (h *Handlers) CreateJob(ctx *gin.Context) {
|
|||
|
||||
job, err := h.service.CreatePreheatJob(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -68,7 +68,7 @@ func (h *Handlers) DestroyJob(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.DestroyJob(ctx.Request.Context(), params.ID); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -90,19 +90,19 @@ func (h *Handlers) DestroyJob(ctx *gin.Context) {
|
|||
func (h *Handlers) UpdateJob(ctx *gin.Context) {
|
||||
var params types.JobParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
var json types.UpdateJobRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
job, err := h.service.UpdateJob(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -129,7 +129,7 @@ func (h *Handlers) GetJob(ctx *gin.Context) {
|
|||
|
||||
job, err := h.service.GetJob(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -158,7 +158,7 @@ func (h *Handlers) GetJobs(ctx *gin.Context) {
|
|||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
||||
jobs, count, err := h.service.GetJobs(ctx.Request.Context(), query)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ func (h *Handlers) CreateOauth(ctx *gin.Context) {
|
|||
|
||||
oauth, err := h.service.CreateOauth(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -72,7 +72,7 @@ func (h *Handlers) DestroyOauth(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.DestroyOauth(ctx.Request.Context(), params.ID); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -94,19 +94,19 @@ func (h *Handlers) DestroyOauth(ctx *gin.Context) {
|
|||
func (h *Handlers) UpdateOauth(ctx *gin.Context) {
|
||||
var params types.OauthParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
var json types.UpdateOauthRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
oauth, err := h.service.UpdateOauth(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -133,7 +133,7 @@ func (h *Handlers) GetOauth(ctx *gin.Context) {
|
|||
|
||||
oauth, err := h.service.GetOauth(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -162,7 +162,7 @@ func (h *Handlers) GetOauths(ctx *gin.Context) {
|
|||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
||||
oauth, count, err := h.service.GetOauths(ctx.Request.Context(), query)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ func (h *Handlers) CreateV1Preheat(ctx *gin.Context) {
|
|||
|
||||
preheat, err := h.service.CreateV1Preheat(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -73,7 +73,7 @@ func (h *Handlers) GetV1Preheat(ctx *gin.Context) {
|
|||
|
||||
preheat, err := h.service.GetV1Preheat(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func (h *Handlers) CreateRole(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.CreateRole(ctx.Request.Context(), json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -69,7 +69,7 @@ func (h *Handlers) DestroyRole(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if ok, err := h.service.DestroyRole(ctx.Request.Context(), params.Role); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
} else if !ok {
|
||||
ctx.Status(http.StatusNotFound)
|
||||
|
|
@ -138,7 +138,7 @@ func (h *Handlers) AddPermissionForRole(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if ok, err := h.service.AddPermissionForRole(ctx.Request.Context(), params.Role, json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
} else if !ok {
|
||||
ctx.Status(http.StatusConflict)
|
||||
|
|
@ -173,7 +173,7 @@ func (h *Handlers) DeletePermissionForRole(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if ok, err := h.service.DeletePermissionForRole(ctx.Request.Context(), params.Role, json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
} else if !ok {
|
||||
ctx.Status(http.StatusNotFound)
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ func (h *Handlers) CreateScheduler(ctx *gin.Context) {
|
|||
|
||||
scheduler, err := h.service.CreateScheduler(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -72,7 +72,7 @@ func (h *Handlers) DestroyScheduler(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.DestroyScheduler(ctx.Request.Context(), params.ID); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -94,19 +94,19 @@ func (h *Handlers) DestroyScheduler(ctx *gin.Context) {
|
|||
func (h *Handlers) UpdateScheduler(ctx *gin.Context) {
|
||||
var params types.SchedulerParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
var json types.UpdateSchedulerRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
scheduler, err := h.service.UpdateScheduler(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -133,7 +133,7 @@ func (h *Handlers) GetScheduler(ctx *gin.Context) {
|
|||
|
||||
scheduler, err := h.service.GetScheduler(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -162,7 +162,7 @@ func (h *Handlers) GetSchedulers(ctx *gin.Context) {
|
|||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
||||
schedulers, count, err := h.service.GetSchedulers(ctx.Request.Context(), query)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ func (h *Handlers) CreateSchedulerCluster(ctx *gin.Context) {
|
|||
if json.SecurityGroupDomain != "" {
|
||||
scheduler, err := h.service.CreateSchedulerClusterWithSecurityGroupDomain(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -57,7 +57,7 @@ func (h *Handlers) CreateSchedulerCluster(ctx *gin.Context) {
|
|||
|
||||
schedulerCluster, err := h.service.CreateSchedulerCluster(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -83,7 +83,7 @@ func (h *Handlers) DestroySchedulerCluster(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.DestroySchedulerCluster(ctx.Request.Context(), params.ID); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -105,20 +105,20 @@ func (h *Handlers) DestroySchedulerCluster(ctx *gin.Context) {
|
|||
func (h *Handlers) UpdateSchedulerCluster(ctx *gin.Context) {
|
||||
var params types.SchedulerClusterParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
var json types.UpdateSchedulerClusterRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
if json.SecurityGroupDomain != "" {
|
||||
scheduler, err := h.service.UpdateSchedulerClusterWithSecurityGroupDomain(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -128,7 +128,7 @@ func (h *Handlers) UpdateSchedulerCluster(ctx *gin.Context) {
|
|||
|
||||
schedulerCluster, err := h.service.UpdateSchedulerCluster(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -155,7 +155,7 @@ func (h *Handlers) GetSchedulerCluster(ctx *gin.Context) {
|
|||
|
||||
schedulerCluster, err := h.service.GetSchedulerCluster(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -184,7 +184,7 @@ func (h *Handlers) GetSchedulerClusters(ctx *gin.Context) {
|
|||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
||||
schedulerClusters, count, err := h.service.GetSchedulerClusters(ctx.Request.Context(), query)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -213,7 +213,7 @@ func (h *Handlers) AddSchedulerToSchedulerCluster(ctx *gin.Context) {
|
|||
|
||||
err := h.service.AddSchedulerToSchedulerCluster(ctx.Request.Context(), params.ID, params.SchedulerID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ func (h *Handlers) CreateSecurityGroup(ctx *gin.Context) {
|
|||
|
||||
securityGroup, err := h.service.CreateSecurityGroup(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -72,7 +72,7 @@ func (h *Handlers) DestroySecurityGroup(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.DestroySecurityGroup(ctx.Request.Context(), params.ID); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -94,19 +94,19 @@ func (h *Handlers) DestroySecurityGroup(ctx *gin.Context) {
|
|||
func (h *Handlers) UpdateSecurityGroup(ctx *gin.Context) {
|
||||
var params types.SecurityGroupParams
|
||||
if err := ctx.ShouldBindUri(¶ms); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
var json types.UpdateSecurityGroupRequest
|
||||
if err := ctx.ShouldBindJSON(&json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
securityGroup, err := h.service.UpdateSecurityGroup(ctx.Request.Context(), params.ID, json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -133,7 +133,7 @@ func (h *Handlers) GetSecurityGroup(ctx *gin.Context) {
|
|||
|
||||
securityGroup, err := h.service.GetSecurityGroup(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -162,7 +162,7 @@ func (h *Handlers) GetSecurityGroups(ctx *gin.Context) {
|
|||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
||||
securityGroups, count, err := h.service.GetSecurityGroups(ctx.Request.Context(), query)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -191,7 +191,7 @@ func (h *Handlers) AddSchedulerClusterToSecurityGroup(ctx *gin.Context) {
|
|||
|
||||
err := h.service.AddSchedulerClusterToSecurityGroup(ctx.Request.Context(), params.ID, params.SchedulerClusterID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -219,7 +219,7 @@ func (h *Handlers) AddCDNClusterToSecurityGroup(ctx *gin.Context) {
|
|||
|
||||
err := h.service.AddCDNClusterToSecurityGroup(ctx.Request.Context(), params.ID, params.CDNClusterID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ func (h *Handlers) GetUser(ctx *gin.Context) {
|
|||
|
||||
user, err := h.service.GetUser(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -76,7 +76,7 @@ func (h *Handlers) GetUsers(ctx *gin.Context) {
|
|||
h.setPaginationDefault(&query.Page, &query.PerPage)
|
||||
users, count, err := h.service.GetUsers(ctx.Request.Context(), query)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -103,7 +103,7 @@ func (h *Handlers) SignUp(ctx *gin.Context) {
|
|||
|
||||
user, err := h.service.SignUp(ctx.Request.Context(), json)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -134,7 +134,7 @@ func (h *Handlers) ResetPassword(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if err := h.service.ResetPassword(ctx.Request.Context(), params.ID, json); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -161,7 +161,7 @@ func (h *Handlers) OauthSignin(ctx *gin.Context) {
|
|||
|
||||
authURL, err := h.service.OauthSignin(ctx.Request.Context(), params.Name)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -194,7 +194,7 @@ func (h *Handlers) OauthSigninCallback(j *jwt.GinJWTMiddleware) func(*gin.Contex
|
|||
|
||||
user, err := h.service.OauthSigninCallback(ctx.Request.Context(), params.Name, query.Code)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -221,7 +221,7 @@ func (h *Handlers) GetRolesForUser(ctx *gin.Context) {
|
|||
|
||||
roles, err := h.service.GetRolesForUser(ctx.Request.Context(), params.ID)
|
||||
if err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -247,7 +247,7 @@ func (h *Handlers) AddRoleToUser(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if ok, err := h.service.AddRoleForUser(ctx.Request.Context(), params); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
} else if !ok {
|
||||
ctx.Status(http.StatusConflict)
|
||||
|
|
@ -276,7 +276,7 @@ func (h *Handlers) DeleteRoleForUser(ctx *gin.Context) {
|
|||
}
|
||||
|
||||
if ok, err := h.service.DeleteRoleForUser(ctx.Request.Context(), params); err != nil {
|
||||
ctx.Error(err)
|
||||
ctx.Error(err) // nolint: errcheck
|
||||
return
|
||||
} else if !ok {
|
||||
ctx.Status(http.StatusNotFound)
|
||||
|
|
|
|||
|
|
@ -180,7 +180,11 @@ func (p *preheat) getLayers(ctx context.Context, url string, filter string, head
|
|||
|
||||
if resp.StatusCode/100 != 2 {
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
token := getAuthToken(ctx, resp.Header)
|
||||
token, err := getAuthToken(ctx, resp.Header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bearer := "Bearer " + token
|
||||
header.Add("Authorization", bearer)
|
||||
|
||||
|
|
@ -253,18 +257,18 @@ func (p *preheat) parseLayers(resp *http.Response, url, filter string, header ht
|
|||
return layers, nil
|
||||
}
|
||||
|
||||
func getAuthToken(ctx context.Context, header http.Header) (token string) {
|
||||
func getAuthToken(ctx context.Context, header http.Header) (string, error) {
|
||||
ctx, span := tracer.Start(ctx, config.SpanAuthWithRegistry, trace.WithSpanKind(trace.SpanKindProducer))
|
||||
defer span.End()
|
||||
|
||||
authURL := authURL(header.Values("WWW-Authenticate"))
|
||||
if len(authURL) == 0 {
|
||||
return
|
||||
return "", errors.New("authURL is empty")
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", authURL, nil)
|
||||
if err != nil {
|
||||
return
|
||||
return "", err
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
|
|
@ -276,17 +280,23 @@ func getAuthToken(ctx context.Context, header http.Header) (token string) {
|
|||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
var result map[string]interface{}
|
||||
json.Unmarshal(body, &result)
|
||||
if result["token"] != nil {
|
||||
token = fmt.Sprintf("%v", result["token"])
|
||||
if err := json.Unmarshal(body, &result); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return
|
||||
|
||||
if result["token"] == nil {
|
||||
return "", errors.New("token is empty")
|
||||
}
|
||||
|
||||
token := fmt.Sprintf("%v", result["token"])
|
||||
return token, nil
|
||||
|
||||
}
|
||||
|
||||
func authURL(wwwAuth []string) string {
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ func (s *rest) CreatePreheatJob(ctx context.Context, json types.CreatePreheatJob
|
|||
func (s *rest) pollingJob(ctx context.Context, id uint, taskID string) {
|
||||
var job model.Job
|
||||
|
||||
retry.Run(ctx, func() (interface{}, bool, error) {
|
||||
if _, _, err := retry.Run(ctx, func() (interface{}, bool, error) {
|
||||
groupJob, err := s.job.GetGroupJobState(taskID)
|
||||
if err != nil {
|
||||
logger.Errorf("polling job %d and task %s failed: %v", id, taskID, err)
|
||||
|
|
@ -124,9 +124,11 @@ func (s *rest) pollingJob(ctx context.Context, id uint, taskID string) {
|
|||
default:
|
||||
return nil, false, fmt.Errorf("polling job %d and task %s status is %s", id, taskID, job.Status)
|
||||
}
|
||||
}, 5, 10, 120, nil)
|
||||
}, 5, 10, 120, nil); err != nil {
|
||||
logger.Errorf("polling job %d and task %s failed %s", id, taskID, err)
|
||||
}
|
||||
|
||||
// Polling timeout
|
||||
// Polling timeout and failed
|
||||
if job.Status != machineryv1tasks.StateSuccess && job.Status != machineryv1tasks.StateFailure {
|
||||
job := model.Job{}
|
||||
if err := s.db.WithContext(ctx).First(&job, id).Updates(model.Job{
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ func (s *rest) OauthSignin(ctx context.Context, name string) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
return o.AuthCodeURL(), nil
|
||||
return o.AuthCodeURL()
|
||||
}
|
||||
|
||||
func (s *rest) OauthSigninCallback(ctx context.Context, name, code string) (*model.User, error) {
|
||||
|
|
|
|||
|
|
@ -324,26 +324,39 @@ func testFillAndSerialize(t *testing.T, tc Cache) {
|
|||
|
||||
func TestFileSerialization(t *testing.T) {
|
||||
tc := New(DefaultExpiration, 0)
|
||||
tc.Add("a", "a", DefaultExpiration)
|
||||
tc.Add("b", "b", DefaultExpiration)
|
||||
if err := tc.Add("a", "a", DefaultExpiration); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := tc.Add("b", "b", DefaultExpiration); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
f, err := ioutil.TempFile("", "go-cache-cache.dat")
|
||||
if err != nil {
|
||||
t.Fatal("Couldn't create cache file:", err)
|
||||
}
|
||||
fname := f.Name()
|
||||
f.Close()
|
||||
tc.SaveFile(fname)
|
||||
|
||||
if err := tc.SaveFile(fname); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
oc := New(DefaultExpiration, 0)
|
||||
oc.Add("a", "aa", 0) // this should not be overwritten
|
||||
err = oc.LoadFile(fname)
|
||||
if err != nil {
|
||||
// this should not be overwritten
|
||||
if err := oc.Add("a", "aa", 0); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := oc.LoadFile(fname); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
a, found := oc.Get("a")
|
||||
if !found {
|
||||
t.Error("a was not found")
|
||||
}
|
||||
|
||||
astr := a.(string)
|
||||
if astr != "aa" {
|
||||
if astr == "a" {
|
||||
|
|
@ -352,10 +365,12 @@ func TestFileSerialization(t *testing.T) {
|
|||
t.Error("a is not aa")
|
||||
}
|
||||
}
|
||||
|
||||
b, found := oc.Get("b")
|
||||
if !found {
|
||||
t.Error("b was not found")
|
||||
}
|
||||
|
||||
if b.(string) != "b" {
|
||||
t.Error("b is not b")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -126,7 +126,9 @@ func TestGCRun(t *testing.T) {
|
|||
ml.EXPECT().Infof(gomock.Any(), gomock.Eq("foo")).Do(func(template interface{}, args ...interface{}) { wg.Done() }).Times(1),
|
||||
)
|
||||
|
||||
gc.Run(id)
|
||||
if err := gc.Run(id); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -149,7 +151,9 @@ func TestGCRun(t *testing.T) {
|
|||
ml.EXPECT().Infof(gomock.Any(), gomock.Eq("foo")).Do(func(template interface{}, args ...interface{}) { wg.Done() }).Times(1),
|
||||
)
|
||||
|
||||
gc.Run(id)
|
||||
if err := gc.Run(id); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
|
|||
|
|
@ -138,7 +138,10 @@ retry:
|
|||
SourceType: keepalive.SourceType,
|
||||
ClusterId: keepalive.ClusterId,
|
||||
}); err != nil {
|
||||
stream.CloseAndRecv()
|
||||
if _, err := stream.CloseAndRecv(); err != nil {
|
||||
logger.Errorf("hostname %s cluster id %s close and recv stream failed", keepalive.HostName, keepalive.ClusterId, err)
|
||||
}
|
||||
|
||||
cancel()
|
||||
goto retry
|
||||
}
|
||||
|
|
|
|||
|
|
@ -155,8 +155,7 @@ func (sc *schedulerClient) ReportPieceResult(ctx context.Context, taskID string,
|
|||
logger.With("peerId", ptr.PeerId, "errMsg", err).Infof("start to report piece result for taskID: %s", taskID)
|
||||
|
||||
// trigger scheduling
|
||||
pps.Send(scheduler.NewZeroPieceResult(taskID, ptr.PeerId))
|
||||
return pps, err
|
||||
return pps, pps.Send(scheduler.NewZeroPieceResult(taskID, ptr.PeerId))
|
||||
}
|
||||
|
||||
func (sc *schedulerClient) ReportPeerResult(ctx context.Context, pr *scheduler.PeerResult, opts ...grpc.CallOption) error {
|
||||
|
|
|
|||
|
|
@ -73,21 +73,24 @@ func newPeerPacketStream(ctx context.Context, sc *schedulerClient, hashKey strin
|
|||
return pps, nil
|
||||
}
|
||||
|
||||
func (pps *peerPacketStream) Send(pr *scheduler.PieceResult) (err error) {
|
||||
func (pps *peerPacketStream) Send(pr *scheduler.PieceResult) error {
|
||||
pps.lastPieceResult = pr
|
||||
pps.sc.UpdateAccessNodeMapByHashKey(pps.hashKey)
|
||||
err = pps.stream.Send(pr)
|
||||
|
||||
if err := pps.stream.Send(pr); err != nil {
|
||||
if err := pps.closeSend(); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if pr.PieceInfo.PieceNum == common.EndOfPiece {
|
||||
pps.closeSend()
|
||||
return
|
||||
if err := pps.closeSend(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
pps.closeSend()
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pps *peerPacketStream) closeSend() error {
|
||||
|
|
|
|||
|
|
@ -58,7 +58,10 @@ func TestHashFile(t *testing.T) {
|
|||
f, err := fileutils.OpenFile(path, syscall.O_CREAT|syscall.O_TRUNC|syscall.O_RDWR, 0644)
|
||||
assert.Nil(t, err)
|
||||
|
||||
f.Write([]byte("hello"))
|
||||
if _, err := f.Write([]byte("hello")); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
f.Close()
|
||||
|
||||
assert.Equal(t, expected, HashFile(path, Md5Hash))
|
||||
|
|
|
|||
|
|
@ -59,14 +59,16 @@ func MoveFile(src, dst string) error {
|
|||
return errors.Errorf("move %s to %s: src is not a regular file", src, dst)
|
||||
}
|
||||
|
||||
var err error
|
||||
if err = os.Rename(src, dst); err != nil {
|
||||
if _, err = CopyFile(src, dst); err == nil {
|
||||
fileutils.DeleteFile(src)
|
||||
if err := os.Rename(src, dst); err != nil {
|
||||
if _, err := CopyFile(src, dst); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy %s to %s", src, dst)
|
||||
}
|
||||
if err := fileutils.DeleteFile(src); err != nil {
|
||||
return errors.Wrapf(err, "failed to delete %s", src)
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "failed to move %s to %s", src, dst)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanFile cleans content of the file.
|
||||
|
|
|
|||
|
|
@ -132,7 +132,9 @@ func (s *FileUtilsTestSuite) TestIsEmptyDir() {
|
|||
_, err = fileutils.IsEmptyDir(s.testDir)
|
||||
s.Require().NotNil(err)
|
||||
|
||||
fileutils.MkdirAll(s.testDir)
|
||||
err = fileutils.MkdirAll(s.testDir)
|
||||
s.Require().Nil(err)
|
||||
|
||||
b, err := fileutils.IsEmptyDir(s.testDir)
|
||||
s.Require().Nil(err)
|
||||
s.Require().True(b)
|
||||
|
|
@ -144,7 +146,9 @@ func (s *FileUtilsTestSuite) TestCopyFile() {
|
|||
|
||||
f, err := fileutils.OpenFile(s.testFile, syscall.O_WRONLY|syscall.O_CREAT, 0644)
|
||||
s.Require().Nil(err)
|
||||
f.WriteString("hello,world")
|
||||
|
||||
_, err = f.WriteString("hello,world")
|
||||
s.Require().Nil(err)
|
||||
f.Close()
|
||||
|
||||
_, err = filerw.CopyFile(s.testFile, s.testFile+".new")
|
||||
|
|
@ -162,12 +166,14 @@ func (s *FileUtilsTestSuite) TestTryLock() {
|
|||
f2, err := fileutils.NewFileLock(s.testFile)
|
||||
s.Require().Nil(err)
|
||||
|
||||
f1.Lock()
|
||||
err = f1.Lock()
|
||||
s.Require().Nil(err)
|
||||
|
||||
err = f2.TryLock()
|
||||
s.Require().NotNil(err)
|
||||
|
||||
f1.Unlock()
|
||||
err = f1.Unlock()
|
||||
s.Require().Nil(err)
|
||||
|
||||
err = f2.TryLock()
|
||||
s.Require().Nil(err)
|
||||
|
|
|
|||
|
|
@ -81,8 +81,13 @@ func TestSchedulerConfig_Load(t *testing.T) {
|
|||
schedulerConfigYAML := &Config{}
|
||||
contentYAML, _ := ioutil.ReadFile("./testdata/scheduler.yaml")
|
||||
var dataYAML map[string]interface{}
|
||||
yaml.Unmarshal(contentYAML, &dataYAML)
|
||||
mapstructure.Decode(dataYAML, &schedulerConfigYAML)
|
||||
if err := yaml.Unmarshal(contentYAML, &dataYAML); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := mapstructure.Decode(dataYAML, &schedulerConfigYAML); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.True(reflect.DeepEqual(config, schedulerConfigYAML))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -301,7 +301,9 @@ func (d *dynconfig) watch() {
|
|||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
d.Notify()
|
||||
if err := d.Notify(); err != nil {
|
||||
logger.Error("dynconfig notify failed", err)
|
||||
}
|
||||
case <-d.done:
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -131,7 +131,9 @@ loop:
|
|||
func debug() {
|
||||
debugAddr := fmt.Sprintf("%s:%d", iputils.HostIP, 18066)
|
||||
viewer.SetConfiguration(viewer.WithAddr(debugAddr))
|
||||
statsview.New().Start()
|
||||
if err := statsview.New().Start(); err != nil {
|
||||
log.Println("stat view start failed", err)
|
||||
}
|
||||
}
|
||||
|
||||
func forceExit(signals chan os.Signal) {
|
||||
|
|
@ -278,8 +280,10 @@ func saveToOutput(results []*Result) {
|
|||
if v.PeerID == "" {
|
||||
v.PeerID = "unknown"
|
||||
}
|
||||
out.WriteString(fmt.Sprintf("%s %s %d %v %d %d %s\n",
|
||||
if _, err := out.WriteString(fmt.Sprintf("%s %s %d %v %d %d %s\n",
|
||||
v.TaskID, v.PeerID, v.StatusCode, v.Cost,
|
||||
v.StartTime.UnixNano()/100, v.EndTime.UnixNano()/100, v.Message))
|
||||
v.StartTime.UnixNano()/100, v.EndTime.UnixNano()/100, v.Message)); err != nil {
|
||||
log.Panicln("write string failed", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue