Fix double counting issue for request metrics on timeout.

Currently we record request metrics during the normal request flow and
we also manually invoke `Record` in the timeout handler to record
timeouts. This means that we effectively double count whenever we
timeout. This PR renames the `Record` function to `RecordRequestError`
to more accurately reflect the intended side-effect of the function
call.

Change-Id: Ie37fd0c1e501bd525640a434433d364a5fd6dde2

Kubernetes-commit: 4c6e7247878477a1f2efc26df7f141258010374f
This commit is contained in:
Han Kang 2019-10-02 15:04:29 -07:00 committed by Kubernetes Publisher
parent 550b75f0da
commit 4a680138c0
3 changed files with 18 additions and 9 deletions

View File

@ -177,7 +177,7 @@ var (
},
[]string{"group", "version", "kind"},
)
// Because of volatality of the base metric this is pre-aggregated one. Instead of reporing current usage all the time
// Because of volatility of the base metric this is pre-aggregated one. Instead of reporting current usage all the time
// it reports maximal usage during the last second.
currentInflightRequests = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
@ -187,6 +187,15 @@ var (
},
[]string{"requestKind"},
)
requestErrorTotal = compbasemetrics.NewCounterVec(
&compbasemetrics.CounterOpts{
Name: "apiserver_request_error_total",
Help: "Number of requests which have resulted in an apiserver response error",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"verb", "group", "version", "resource", "subresource", "scope", "component", "code"},
)
kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`)
metrics = []resettableCollector{
@ -203,6 +212,7 @@ var (
WatchEvents,
WatchEventsSizes,
currentInflightRequests,
requestErrorTotal,
}
)
@ -236,10 +246,9 @@ func UpdateInflightRequestMetrics(nonmutating, mutating int) {
currentInflightRequests.WithLabelValues(MutatingKind).Set(float64(mutating))
}
// Record records a single request to the standard metrics endpoints. For use by handlers that perform their own
// processing. All API paths should use InstrumentRouteFunc implicitly. Use this instead of MonitorRequest if
// you already have a RequestInfo object.
func Record(req *http.Request, requestInfo *request.RequestInfo, component, contentType string, code int, responseSizeInBytes int, elapsed time.Duration) {
// RecordRequestError records the occurrence of a request error during apiserver handling (e.g. timeouts,
// maxinflight throttling, proxyHandler errors).
func RecordRequestError(req *http.Request, requestInfo *request.RequestInfo, component string, code int) {
if requestInfo == nil {
requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path}
}
@ -251,9 +260,9 @@ func Record(req *http.Request, requestInfo *request.RequestInfo, component, cont
// However, we need to tweak it e.g. to differentiate GET from LIST.
verb := canonicalVerb(strings.ToUpper(req.Method), scope)
if requestInfo.IsResourceRequest {
MonitorRequest(req, verb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, contentType, code, responseSizeInBytes, elapsed)
requestErrorTotal.WithLabelValues(cleanVerb(verb, req), requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc()
} else {
MonitorRequest(req, verb, "", "", "", requestInfo.Path, scope, component, contentType, code, responseSizeInBytes, elapsed)
requestErrorTotal.WithLabelValues(cleanVerb(verb, req), "", "", "", requestInfo.Path, scope, component, codeToString(code)).Inc()
}
}

View File

@ -178,7 +178,7 @@ func WithMaxInFlightLimit(
}
}
}
metrics.Record(r, requestInfo, metrics.APIServerComponent, "", http.StatusTooManyRequests, 0, 0)
metrics.RecordRequestError(r, requestInfo, metrics.APIServerComponent, http.StatusTooManyRequests)
tooManyRequests(r, w)
}
}

View File

@ -59,7 +59,7 @@ func WithTimeoutForNonLongRunningRequests(handler http.Handler, longRunning apir
postTimeoutFn := func() {
cancel()
metrics.Record(req, requestInfo, metrics.APIServerComponent, "", http.StatusGatewayTimeout, 0, 0)
metrics.RecordRequestError(req, requestInfo, metrics.APIServerComponent, http.StatusGatewayTimeout)
}
return req, time.After(timeout), postTimeoutFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0)
}