test: dump goroutine in e2e (#980)
Signed-off-by: Jim Ma <majinjing3@gmail.com>
This commit is contained in:
parent
c38bba2612
commit
37b332a366
|
|
@ -702,7 +702,7 @@ loop:
|
||||||
}
|
}
|
||||||
|
|
||||||
// update content length
|
// update content length
|
||||||
if piecePacket.ContentLength > 0 {
|
if piecePacket.ContentLength > -1 {
|
||||||
pt.SetContentLength(piecePacket.ContentLength)
|
pt.SetContentLength(piecePacket.ContentLength)
|
||||||
_ = pt.UpdateStorage(false)
|
_ = pt.UpdateStorage(false)
|
||||||
pt.Debugf("update content length: %d", pt.GetContentLength())
|
pt.Debugf("update content length: %d", pt.GetContentLength())
|
||||||
|
|
@ -731,7 +731,7 @@ loop:
|
||||||
|
|
||||||
func (pt *peerTaskConductor) init(piecePacket *base.PiecePacket, pieceBufferSize uint32) (chan *DownloadPieceRequest, bool) {
|
func (pt *peerTaskConductor) init(piecePacket *base.PiecePacket, pieceBufferSize uint32) (chan *DownloadPieceRequest, bool) {
|
||||||
pt.contentLength.Store(piecePacket.ContentLength)
|
pt.contentLength.Store(piecePacket.ContentLength)
|
||||||
if piecePacket.ContentLength > 0 {
|
if piecePacket.ContentLength > -1 {
|
||||||
pt.span.SetAttributes(config.AttributeTaskContentLength.Int64(piecePacket.ContentLength))
|
pt.span.SetAttributes(config.AttributeTaskContentLength.Int64(piecePacket.ContentLength))
|
||||||
}
|
}
|
||||||
if err := pt.InitStorage(); err != nil {
|
if err := pt.InitStorage(); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -169,6 +169,8 @@ func (ptm *peerTaskManager) getOrCreatePeerTaskConductor(ctx context.Context, ta
|
||||||
// double check
|
// double check
|
||||||
if p, ok := ptm.findPeerTaskConductor(taskID); ok {
|
if p, ok := ptm.findPeerTaskConductor(taskID); ok {
|
||||||
ptm.conductorLock.Unlock()
|
ptm.conductorLock.Unlock()
|
||||||
|
logger.Debugf("same peer task found: %s/%s, cancel created peer task %s/%s",
|
||||||
|
p.taskID, p.peerID, ptc.taskID, ptc.peerID)
|
||||||
// cancel duplicate peer task
|
// cancel duplicate peer task
|
||||||
ptc.cancel(base.Code_ClientContextCanceled, reasonContextCanceled)
|
ptc.cancel(base.Code_ClientContextCanceled, reasonContextCanceled)
|
||||||
return p, nil
|
return p, nil
|
||||||
|
|
@ -231,7 +233,7 @@ func (ptm *peerTaskManager) Stop(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ptm *peerTaskManager) PeerTaskDone(taskID string) {
|
func (ptm *peerTaskManager) PeerTaskDone(taskID string) {
|
||||||
logger.Debugf("delete task %s in running tasks", taskID)
|
logger.Debugf("delete done task %s in running tasks", taskID)
|
||||||
ptm.runningPeerTasks.Delete(taskID)
|
ptm.runningPeerTasks.Delete(taskID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -343,7 +343,12 @@ func (proxy *Proxy) handleHTTP(span trace.Span, w http.ResponseWriter, req *http
|
||||||
w.WriteHeader(resp.StatusCode)
|
w.WriteHeader(resp.StatusCode)
|
||||||
span.SetAttributes(semconv.HTTPStatusCodeKey.Int(resp.StatusCode))
|
span.SetAttributes(semconv.HTTPStatusCodeKey.Int(resp.StatusCode))
|
||||||
if n, err := io.Copy(w, resp.Body); err != nil && err != io.EOF {
|
if n, err := io.Copy(w, resp.Body); err != nil && err != io.EOF {
|
||||||
|
if peerID := resp.Header.Get(config.HeaderDragonflyPeer); peerID != "" {
|
||||||
|
logger.Errorf("failed to write http body: %v, peer: %s, task: %s",
|
||||||
|
err, peerID, resp.Header.Get(config.HeaderDragonflyTask))
|
||||||
|
} else {
|
||||||
logger.Errorf("failed to write http body: %v", err)
|
logger.Errorf("failed to write http body: %v", err)
|
||||||
|
}
|
||||||
span.RecordError(err)
|
span.RecordError(err)
|
||||||
} else {
|
} else {
|
||||||
span.SetAttributes(semconv.HTTPResponseContentLengthKey.Int64(n))
|
span.SetAttributes(semconv.HTTPResponseContentLengthKey.Int64(n))
|
||||||
|
|
|
||||||
|
|
@ -40,6 +40,7 @@ type server struct {
|
||||||
namespace string
|
namespace string
|
||||||
logDirName string
|
logDirName string
|
||||||
replicas int
|
replicas int
|
||||||
|
pprofPort int
|
||||||
}
|
}
|
||||||
|
|
||||||
var servers = map[string]server{
|
var servers = map[string]server{
|
||||||
|
|
@ -66,11 +67,13 @@ var servers = map[string]server{
|
||||||
namespace: dragonflyNamespace,
|
namespace: dragonflyNamespace,
|
||||||
logDirName: "daemon",
|
logDirName: "daemon",
|
||||||
replicas: 1,
|
replicas: 1,
|
||||||
|
pprofPort: 9999,
|
||||||
},
|
},
|
||||||
proxyServerName: {
|
proxyServerName: {
|
||||||
name: proxyServerName,
|
name: proxyServerName,
|
||||||
namespace: dragonflyE2ENamespace,
|
namespace: dragonflyE2ENamespace,
|
||||||
logDirName: "daemon",
|
logDirName: "daemon",
|
||||||
replicas: 3,
|
replicas: 3,
|
||||||
|
pprofPort: 9999,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -59,6 +59,7 @@ func singleDfgetTest(name, ns, label, podNamePrefix, container string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for url, path := range urls {
|
for url, path := range urls {
|
||||||
|
fmt.Printf("--------------------------------------------------------------------------------\n\n")
|
||||||
fmt.Println("download url: " + url)
|
fmt.Println("download url: " + url)
|
||||||
// get original file digest
|
// get original file digest
|
||||||
out, err = e2eutil.DockerCommand("sha256sum", path).CombinedOutput()
|
out, err = e2eutil.DockerCommand("sha256sum", path).CombinedOutput()
|
||||||
|
|
|
||||||
|
|
@ -75,6 +75,13 @@ var _ = AfterSuite(func() {
|
||||||
fmt.Printf("upload pod %s artifact prev stdout file error: %v\n", podName, err)
|
fmt.Printf("upload pod %s artifact prev stdout file error: %v\n", podName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if server.pprofPort > 0 {
|
||||||
|
if out, err := e2eutil.UploadArtifactPProf(server.namespace, podName,
|
||||||
|
fmt.Sprintf("%s-%d", server.name, i), server.name, server.pprofPort); err != nil {
|
||||||
|
fmt.Printf("upload pod %s artifact pprof error: %v, output: %s\n", podName, err, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
||||||
|
|
@ -66,3 +66,17 @@ func UploadArtifactPrevStdout(namespace, podName, logDirName, logPrefix string)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func UploadArtifactPProf(namespace, podName, logDirName, logPrefix string, pprofPort int) (string, error) {
|
||||||
|
logDirname := fmt.Sprintf("/tmp/artifact/%s/", logDirName)
|
||||||
|
out, err := KubeCtlCommand("-n", namespace, "exec", podName, "--", "sh", "-c", fmt.Sprintf(`
|
||||||
|
set -x
|
||||||
|
port=%d
|
||||||
|
dir=%s
|
||||||
|
prefix=%s
|
||||||
|
ip=$(hostname -i)
|
||||||
|
wget $ip:$port/debug/pprof/"goroutine?debug=1" -O $dir/$prefix-pprof-goroutine-1.log
|
||||||
|
wget $ip:$port/debug/pprof/"goroutine?debug=2" -O $dir/$prefix-pprof-goroutine-2.log
|
||||||
|
`, pprofPort, logDirname, logPrefix)).CombinedOutput()
|
||||||
|
return string(out), err
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,8 @@ dfdaemon:
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /tmp/artifact
|
path: /tmp/artifact
|
||||||
config:
|
config:
|
||||||
|
verbose: true
|
||||||
|
pprofPort: 9999
|
||||||
scheduler:
|
scheduler:
|
||||||
disableAutoBackSource: true
|
disableAutoBackSource: true
|
||||||
proxy:
|
proxy:
|
||||||
|
|
|
||||||
|
|
@ -7,9 +7,9 @@ data:
|
||||||
dfget.yaml: |-
|
dfget.yaml: |-
|
||||||
aliveTime: 0s
|
aliveTime: 0s
|
||||||
gcInterval: 1m0s
|
gcInterval: 1m0s
|
||||||
keepStorage: false
|
keepStorage: true
|
||||||
verbose: true
|
verbose: true
|
||||||
pprof-port: 0
|
pprof-port: 9999
|
||||||
scheduler:
|
scheduler:
|
||||||
manager:
|
manager:
|
||||||
enable: true
|
enable: true
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue