diff --git a/pkg/server/filters/timeout.go b/pkg/server/filters/timeout.go index 432851a63..e94387322 100644 --- a/pkg/server/filters/timeout.go +++ b/pkg/server/filters/timeout.go @@ -137,9 +137,7 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } metrics.RecordRequestPostTimeout(metrics.PostTimeoutSourceTimeoutHandler, status) - err := fmt.Errorf("post-timeout activity - time-elapsed: %s, %v %q result: %v", - time.Since(timedOutAt), r.Method, r.URL.Path, res) - utilruntime.HandleError(err) + utilruntime.HandleErrorWithContext(r.Context(), nil, "Post-timeout activity", "timeElapsed", time.Since(timedOutAt), "method", r.Method, "path", r.URL.Path, "result", res) }() }() httplog.SetStacktracePredicate(r.Context(), func(status int) bool { diff --git a/pkg/server/filters/timeout_test.go b/pkg/server/filters/timeout_test.go index 78b4ba7a8..ffb15926b 100644 --- a/pkg/server/filters/timeout_test.go +++ b/pkg/server/filters/timeout_test.go @@ -408,7 +408,9 @@ func TestErrConnKilled(t *testing.T) { if isStackTraceLoggedByRuntime(capturedOutput) { t.Errorf("unexpected stack trace in log, actual = %v", capturedOutput) } - if !strings.Contains(capturedOutput, `timeout or abort while handling: method=GET URI="/" audit-ID=""`) { + // For the sake of simplicity and clarity this matches the full log line. + // This is not part of the Kubernetes API and could change. + if !strings.Contains(capturedOutput, `"Timeout or abort while handling" logger="UnhandledError" method="GET" URI="/" auditID=""`) { t.Errorf("unexpected output captured actual = %v", capturedOutput) } } @@ -501,7 +503,9 @@ func TestErrConnKilledHTTP2(t *testing.T) { if isStackTraceLoggedByRuntime(capturedOutput) { t.Errorf("unexpected stack trace in log, actual = %v", capturedOutput) } - if !strings.Contains(capturedOutput, `timeout or abort while handling: method=GET URI="/" audit-ID=""`) { + // For the sake of simplicity and clarity this matches the full log line. + // This is not part of the Kubernetes API and could change. + if !strings.Contains(capturedOutput, `"Timeout or abort while handling" logger="UnhandledError" method="GET" URI="/" auditID=""`) { t.Errorf("unexpected output captured actual = %v", capturedOutput) } diff --git a/pkg/server/filters/wrap.go b/pkg/server/filters/wrap.go index a981eae78..73ce27026 100644 --- a/pkg/server/filters/wrap.go +++ b/pkg/server/filters/wrap.go @@ -17,7 +17,6 @@ limitations under the License. package filters import ( - "fmt" "net/http" "k8s.io/apimachinery/pkg/util/runtime" @@ -51,7 +50,7 @@ func WithPanicRecovery(handler http.Handler, resolver request.RequestInfoResolve // This call can have different handlers, but the default chain rate limits. Call it after the metrics are updated // in case the rate limit delays it. If you outrun the rate for this one timed out requests, something has gone // seriously wrong with your server, but generally having a logging signal for timeouts is useful. - runtime.HandleError(fmt.Errorf("timeout or abort while handling: method=%v URI=%q audit-ID=%q", req.Method, req.RequestURI, audit.GetAuditIDTruncated(req.Context()))) + runtime.HandleErrorWithContext(req.Context(), nil, "Timeout or abort while handling", "method", req.Method, "URI", req.RequestURI, "auditID", audit.GetAuditIDTruncated(req.Context())) return } http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError) diff --git a/pkg/util/proxy/websocket.go b/pkg/util/proxy/websocket.go index 3b9746b3b..798ce1767 100644 --- a/pkg/util/proxy/websocket.go +++ b/pkg/util/proxy/websocket.go @@ -79,7 +79,7 @@ func webSocketServerStreams(req *http.Request, w http.ResponseWriter, opts Optio if p := recover(); p != nil { // Standard panic logging. for _, fn := range runtime.PanicHandlers { - fn(p) + fn(req.Context(), p) } } }()