Merge pull request #88609 from yue9944882/chore/follow-up-metrics

Preserve legacy inflight metrics and fixes registration

Kubernetes-commit: d115206309fab67d22e6b17ee340b654077a9289
This commit is contained in:
Kubernetes Publisher 2020-03-02 14:50:06 -08:00
commit 5f79f6fd39
6 changed files with 66 additions and 38 deletions

2
Godeps/Godeps.json generated
View File

@ -600,7 +600,7 @@
},
{
"ImportPath": "k8s.io/component-base",
"Rev": "6f93f319a354"
"Rev": "77f056ceea66"
},
{
"ImportPath": "k8s.io/gengo",

5
go.mod
View File

@ -29,7 +29,6 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/pkg/errors v0.8.1
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021 // indirect
github.com/prometheus/client_golang v1.0.0
github.com/prometheus/client_model v0.2.0
github.com/sirupsen/logrus v1.4.2 // indirect
github.com/spf13/pflag v1.0.5
@ -47,7 +46,7 @@ require (
k8s.io/api v0.0.0-20200302082247-8f54d34188b0
k8s.io/apimachinery v0.0.0-20200302045842-b9f0d37e94c6
k8s.io/client-go v0.0.0-20200302082525-ca7edf3d8a93
k8s.io/component-base v0.0.0-20200227163006-6f93f319a354
k8s.io/component-base v0.0.0-20200302162701-77f056ceea66
k8s.io/klog v1.0.0
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
k8s.io/utils v0.0.0-20200117235808-5f6fbceb4c31
@ -62,5 +61,5 @@ replace (
k8s.io/api => k8s.io/api v0.0.0-20200302082247-8f54d34188b0
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20200302045842-b9f0d37e94c6
k8s.io/client-go => k8s.io/client-go v0.0.0-20200302082525-ca7edf3d8a93
k8s.io/component-base => k8s.io/component-base v0.0.0-20200227163006-6f93f319a354
k8s.io/component-base => k8s.io/component-base v0.0.0-20200302162701-77f056ceea66
)

2
go.sum
View File

@ -340,7 +340,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
k8s.io/api v0.0.0-20200302082247-8f54d34188b0/go.mod h1:+2jnw1NMpdXlVDlcy5KjXl7Gh4M0HZ0AhKakQo+KiV8=
k8s.io/apimachinery v0.0.0-20200302045842-b9f0d37e94c6/go.mod h1:5X8oEhnd931nEg6/Nkumo00nT6ZsCLp2h7Xwd7Ym6P4=
k8s.io/client-go v0.0.0-20200302082525-ca7edf3d8a93/go.mod h1:2DzSdhxUXsi3Ln8q5B+GHLG6b2cQN64WNsTnBYZ8Y4Y=
k8s.io/component-base v0.0.0-20200227163006-6f93f319a354/go.mod h1:HDvqB92UBNY8EfzZuIRA2S7PmDF5CfiRwS88LNkTRnE=
k8s.io/component-base v0.0.0-20200302162701-77f056ceea66/go.mod h1:TFzve7JW8Nl3KjrLk5oj8UA+qvh04n0NuTOciggyn1s=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=

View File

@ -20,11 +20,7 @@ import (
"context"
"fmt"
"net/http"
// TODO: decide whether to also generate the old metrics, which
// categorize according to mutating vs readonly.
// "k8s.io/apiserver/pkg/endpoints/metrics"
"sync/atomic"
fcv1a1 "k8s.io/api/flowcontrol/v1alpha1"
apitypes "k8s.io/apimachinery/pkg/types"
@ -38,8 +34,8 @@ type priorityAndFairnessKeyType int
const priorityAndFairnessKey priorityAndFairnessKeyType = iota
const (
responseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevelUID"
responseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchemaUID"
responseHeaderMatchedPriorityLevelConfigurationUID = "X-Kubernetes-PF-PriorityLevel-UID"
responseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID"
)
// PriorityAndFairnessClassification identifies the results of
@ -57,6 +53,8 @@ func GetClassification(ctx context.Context) *PriorityAndFairnessClassification {
return ctx.Value(priorityAndFairnessKey).(*PriorityAndFairnessClassification)
}
var atomicMutatingLen, atomicNonMutatingLen int32
// WithPriorityAndFairness limits the number of in-flight
// requests in a fine-grained way.
func WithPriorityAndFairness(
@ -68,7 +66,7 @@ func WithPriorityAndFairness(
klog.Warningf("priority and fairness support not found, skipping")
return handler
}
startOnce.Do(startRecordingUsage)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
@ -97,8 +95,25 @@ func WithPriorityAndFairness(
PriorityLevelName: pl.Name,
PriorityLevelUID: pl.UID}
}
var served bool
isMutatingRequest := !nonMutatingRequestVerbs.Has(requestInfo.Verb)
execute := func() {
var mutatingLen, readOnlyLen int
if isMutatingRequest {
mutatingLen = int(atomic.AddInt32(&atomicMutatingLen, 1))
} else {
readOnlyLen = int(atomic.AddInt32(&atomicNonMutatingLen, 1))
}
defer func() {
if isMutatingRequest {
atomic.AddInt32(&atomicMutatingLen, -11)
watermark.recordMutating(mutatingLen)
} else {
atomic.AddInt32(&atomicNonMutatingLen, -1)
watermark.recordReadOnly(readOnlyLen)
}
}()
served = true
innerCtx := context.WithValue(ctx, priorityAndFairnessKey, classification)
innerReq := r.Clone(innerCtx)

View File

@ -23,6 +23,7 @@ import (
apimetrics "k8s.io/apiserver/pkg/endpoints/metrics"
"k8s.io/apiserver/pkg/server/mux"
etcd3metrics "k8s.io/apiserver/pkg/storage/etcd3/metrics"
flowcontrolmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
@ -58,4 +59,5 @@ func (m MetricsWithReset) Install(c *mux.PathRecorderMux) {
func register() {
apimetrics.Register()
etcd3metrics.Register()
flowcontrolmetrics.Register()
}

View File

@ -17,9 +17,11 @@ limitations under the License.
package metrics
import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
compbasemetrics "k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
const (
@ -37,28 +39,29 @@ var (
requestDurationSecondsBuckets = []float64{0, 0.005, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 30}
)
func init() {
prometheus.MustRegister(apiserverRejectedRequests)
prometheus.MustRegister(apiserverCurrentInqueueRequests)
prometheus.MustRegister(apiserverRequestQueueLength)
prometheus.MustRegister(apiserverRequestConcurrencyLimit)
prometheus.MustRegister(apiserverCurrentExecutingRequests)
prometheus.MustRegister(apiserverRequestWaitingSeconds)
prometheus.MustRegister(apiserverRequestExecutionSeconds)
var registerMetrics sync.Once
// Register all metrics.
func Register() {
registerMetrics.Do(func() {
for _, metric := range metrics {
legacyregistry.MustRegister(metric)
}
})
}
var (
apiserverRejectedRequests = prometheus.NewCounterVec(
prometheus.CounterOpts{
apiserverRejectedRequestsTotal = compbasemetrics.NewCounterVec(
&compbasemetrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "rejected_requests",
Name: "rejected_requests_total",
Help: "Number of rejected requests by api priority and fairness system",
},
[]string{priorityLevel, "reason"},
)
apiserverCurrentInqueueRequests = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
apiserverCurrentInqueueRequests = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "current_inqueue_requests",
@ -66,8 +69,8 @@ var (
},
[]string{priorityLevel},
)
apiserverRequestQueueLength = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
apiserverRequestQueueLength = compbasemetrics.NewHistogramVec(
&compbasemetrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "request_queue_length",
@ -76,8 +79,8 @@ var (
},
[]string{priorityLevel},
)
apiserverRequestConcurrencyLimit = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
apiserverRequestConcurrencyLimit = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "request_concurrency_limit",
@ -85,8 +88,8 @@ var (
},
[]string{priorityLevel},
)
apiserverCurrentExecutingRequests = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
apiserverCurrentExecutingRequests = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "current_executing_requests",
@ -94,8 +97,8 @@ var (
},
[]string{priorityLevel},
)
apiserverRequestWaitingSeconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
apiserverRequestWaitingSeconds = compbasemetrics.NewHistogramVec(
&compbasemetrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "request_wait_duration_seconds",
@ -104,8 +107,8 @@ var (
},
[]string{priorityLevel, flowSchema, "execute"},
)
apiserverRequestExecutionSeconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
apiserverRequestExecutionSeconds = compbasemetrics.NewHistogramVec(
&compbasemetrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "request_execution_seconds",
@ -114,6 +117,15 @@ var (
},
[]string{priorityLevel, flowSchema},
)
metrics = []compbasemetrics.Registerable{
apiserverRejectedRequestsTotal,
apiserverCurrentInqueueRequests,
apiserverRequestQueueLength,
apiserverRequestConcurrencyLimit,
apiserverCurrentExecutingRequests,
apiserverRequestWaitingSeconds,
apiserverRequestExecutionSeconds,
}
)
// UpdateFlowControlRequestsInQueue updates the value for the # of requests in the specified queues in flow control
@ -133,7 +145,7 @@ func UpdateSharedConcurrencyLimit(priorityLevel string, limit int) {
// AddReject increments the # of rejected requests for flow control
func AddReject(priorityLevel string, reason string) {
apiserverRejectedRequests.WithLabelValues(priorityLevel, reason).Add(1)
apiserverRejectedRequestsTotal.WithLabelValues(priorityLevel, reason).Add(1)
}
// ObserveQueueLength observes the queue length for flow control