add const label for metrics (#802)

Signed-off-by: zeminzhou <zhouzemin@pingcap.com>
Co-authored-by: disksing <i@disksing.com>
This commit is contained in:
zzm 2023-06-07 13:48:17 +08:00 committed by GitHub
parent 28247160f4
commit cf07be2ba0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 345 additions and 279 deletions

View File

@ -128,7 +128,7 @@ const (
LblDirection = "direction"
)
func initMetrics(namespace, subsystem string) {
func initMetrics(namespace, subsystem string, constLabels prometheus.Labels) {
TiKVTxnCmdHistogram = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespace,
@ -136,6 +136,7 @@ func initMetrics(namespace, subsystem string) {
Name: "txn_cmd_duration_seconds",
Help: "Bucketed histogram of processing time of txn cmds.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 29), // 0.5ms ~ 1.5days
ConstLabels: constLabels,
}, []string{LblType, LblScope})
TiKVBackoffHistogram = prometheus.NewHistogramVec(
@ -145,6 +146,7 @@ func initMetrics(namespace, subsystem string) {
Name: "backoff_seconds",
Help: "total backoff seconds of a single backoffer.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 29), // 0.5ms ~ 1.5days
ConstLabels: constLabels,
}, []string{LblType})
TiKVSendReqHistogram = prometheus.NewHistogramVec(
@ -154,6 +156,7 @@ func initMetrics(namespace, subsystem string) {
Name: "request_seconds",
Help: "Bucketed histogram of sending request duration.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 29), // 0.5ms ~ 1.5days
ConstLabels: constLabels,
}, []string{LblType, LblStore, LblStaleRead, LblScope})
TiKVSendReqCounter = prometheus.NewCounterVec(
@ -162,6 +165,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "request_counter",
Help: "Counter of sending request with multi dimensions.",
ConstLabels: constLabels,
}, []string{LblType, LblStore, LblStaleRead, LblSource, LblScope})
TiKVSendReqTimeCounter = prometheus.NewCounterVec(
@ -170,6 +174,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "request_time_counter",
Help: "Counter of request time with multi dimensions.",
ConstLabels: constLabels,
}, []string{LblType, LblStore, LblStaleRead, LblSource, LblScope})
TiKVRPCNetLatencyHistogram = prometheus.NewHistogramVec(
@ -179,6 +184,7 @@ func initMetrics(namespace, subsystem string) {
Name: "rpc_net_latency_seconds",
Help: "Bucketed histogram of time difference between TiDB and TiKV.",
Buckets: prometheus.ExponentialBuckets(5e-5, 2, 18), // 50us ~ 6.5s
ConstLabels: constLabels,
}, []string{LblStore, LblScope})
TiKVCoprocessorHistogram = prometheus.NewHistogramVec(
@ -188,6 +194,7 @@ func initMetrics(namespace, subsystem string) {
Name: "cop_duration_seconds",
Help: "Run duration of a single coprocessor task, includes backoff time.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 29), // 0.5ms ~ 1.5days
ConstLabels: constLabels,
}, []string{LblStore, LblStaleRead, LblScope})
TiKVLockResolverCounter = prometheus.NewCounterVec(
@ -196,6 +203,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "lock_resolver_actions_total",
Help: "Counter of lock resolver actions.",
ConstLabels: constLabels,
}, []string{LblType})
TiKVRegionErrorCounter = prometheus.NewCounterVec(
@ -204,6 +212,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "region_err_total",
Help: "Counter of region errors.",
ConstLabels: constLabels,
}, []string{LblType, LblScope})
TiKVTxnWriteKVCountHistogram = prometheus.NewHistogramVec(
@ -213,6 +222,7 @@ func initMetrics(namespace, subsystem string) {
Name: "txn_write_kv_num",
Help: "Count of kv pairs to write in a transaction.",
Buckets: prometheus.ExponentialBuckets(1, 4, 17), // 1 ~ 4G
ConstLabels: constLabels,
}, []string{LblScope})
TiKVTxnWriteSizeHistogram = prometheus.NewHistogramVec(
@ -222,6 +232,7 @@ func initMetrics(namespace, subsystem string) {
Name: "txn_write_size_bytes",
Help: "Size of kv pairs to write in a transaction.",
Buckets: prometheus.ExponentialBuckets(16, 4, 17), // 16Bytes ~ 64GB
ConstLabels: constLabels,
}, []string{LblScope})
TiKVRawkvCmdHistogram = prometheus.NewHistogramVec(
@ -231,6 +242,7 @@ func initMetrics(namespace, subsystem string) {
Name: "rawkv_cmd_seconds",
Help: "Bucketed histogram of processing time of rawkv cmds.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 29), // 0.5ms ~ 1.5days
ConstLabels: constLabels,
}, []string{LblType})
TiKVRawkvSizeHistogram = prometheus.NewHistogramVec(
@ -240,6 +252,7 @@ func initMetrics(namespace, subsystem string) {
Name: "rawkv_kv_size_bytes",
Help: "Size of key/value to put, in bytes.",
Buckets: prometheus.ExponentialBuckets(1, 2, 30), // 1Byte ~ 512MB
ConstLabels: constLabels,
}, []string{LblType})
TiKVTxnRegionsNumHistogram = prometheus.NewHistogramVec(
@ -249,6 +262,7 @@ func initMetrics(namespace, subsystem string) {
Name: "txn_regions_num",
Help: "Number of regions in a transaction.",
Buckets: prometheus.ExponentialBuckets(1, 2, 25), // 1 ~ 16M
ConstLabels: constLabels,
}, []string{LblType, LblScope})
TiKVLoadSafepointCounter = prometheus.NewCounterVec(
@ -257,6 +271,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "load_safepoint_total",
Help: "Counter of load safepoint.",
ConstLabels: constLabels,
}, []string{LblType})
TiKVSecondaryLockCleanupFailureCounter = prometheus.NewCounterVec(
@ -265,6 +280,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "lock_cleanup_task_total",
Help: "failure statistic of secondary lock cleanup task.",
ConstLabels: constLabels,
}, []string{LblType})
TiKVRegionCacheCounter = prometheus.NewCounterVec(
@ -273,6 +289,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "region_cache_operations_total",
Help: "Counter of region cache.",
ConstLabels: constLabels,
}, []string{LblType, LblResult})
TiKVLoadRegionCacheHistogram = prometheus.NewHistogramVec(
@ -282,6 +299,7 @@ func initMetrics(namespace, subsystem string) {
Name: "load_region_cache_seconds",
Help: "Load region information duration",
Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20), // 0.1ms ~ 52s
ConstLabels: constLabels,
}, []string{LblType})
TiKVLocalLatchWaitTimeHistogram = prometheus.NewHistogram(
@ -291,6 +309,7 @@ func initMetrics(namespace, subsystem string) {
Name: "local_latch_wait_seconds",
Help: "Wait time of a get local latch.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 20), // 0.5ms ~ 262s
ConstLabels: constLabels,
})
TiKVStatusDuration = prometheus.NewHistogramVec(
@ -300,6 +319,7 @@ func initMetrics(namespace, subsystem string) {
Name: "kv_status_api_duration",
Help: "duration for kv status api.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 20), // 0.5ms ~ 262s
ConstLabels: constLabels,
}, []string{"store"})
TiKVStatusCounter = prometheus.NewCounterVec(
@ -308,6 +328,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "kv_status_api_count",
Help: "Counter of access kv status api.",
ConstLabels: constLabels,
}, []string{LblResult})
TiKVBatchWaitDuration = prometheus.NewHistogram(
@ -317,6 +338,7 @@ func initMetrics(namespace, subsystem string) {
Name: "batch_wait_duration",
Buckets: prometheus.ExponentialBuckets(1, 2, 34), // 1ns ~ 8s
Help: "batch wait duration",
ConstLabels: constLabels,
})
TiKVBatchSendLatency = prometheus.NewHistogram(
@ -326,6 +348,7 @@ func initMetrics(namespace, subsystem string) {
Name: "batch_send_latency",
Buckets: prometheus.ExponentialBuckets(1, 2, 34), // 1ns ~ 8s
Help: "batch send latency",
ConstLabels: constLabels,
})
TiKVBatchRecvLatency = prometheus.NewHistogramVec(
@ -335,6 +358,7 @@ func initMetrics(namespace, subsystem string) {
Name: "batch_recv_latency",
Buckets: prometheus.ExponentialBuckets(1000, 2, 34), // 1us ~ 8000s
Help: "batch recv latency",
ConstLabels: constLabels,
}, []string{LblResult})
TiKVBatchWaitOverLoad = prometheus.NewCounter(
@ -343,6 +367,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "batch_wait_overload",
Help: "event of tikv transport layer overload",
ConstLabels: constLabels,
})
TiKVBatchPendingRequests = prometheus.NewHistogramVec(
@ -352,6 +377,7 @@ func initMetrics(namespace, subsystem string) {
Name: "batch_pending_requests",
Buckets: prometheus.ExponentialBuckets(1, 2, 11), // 1 ~ 1024
Help: "number of requests pending in the batch channel",
ConstLabels: constLabels,
}, []string{"store"})
TiKVBatchRequests = prometheus.NewHistogramVec(
@ -361,6 +387,7 @@ func initMetrics(namespace, subsystem string) {
Name: "batch_requests",
Buckets: prometheus.ExponentialBuckets(1, 2, 11), // 1 ~ 1024
Help: "number of requests in one batch",
ConstLabels: constLabels,
}, []string{"store"})
TiKVBatchClientUnavailable = prometheus.NewHistogram(
@ -370,6 +397,7 @@ func initMetrics(namespace, subsystem string) {
Name: "batch_client_unavailable_seconds",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 28), // 1ms ~ 1.5days
Help: "batch client unavailable",
ConstLabels: constLabels,
})
TiKVBatchClientWaitEstablish = prometheus.NewHistogram(
@ -379,6 +407,7 @@ func initMetrics(namespace, subsystem string) {
Name: "batch_client_wait_connection_establish",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 28), // 1ms ~ 1.5days
Help: "batch client wait new connection establish",
ConstLabels: constLabels,
})
TiKVBatchClientRecycle = prometheus.NewHistogram(
@ -388,6 +417,7 @@ func initMetrics(namespace, subsystem string) {
Name: "batch_client_reset",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 28), // 1ms ~ 1.5days
Help: "batch client recycle connection and reconnect duration",
ConstLabels: constLabels,
})
TiKVRangeTaskStats = prometheus.NewGaugeVec(
@ -396,6 +426,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "range_task_stats",
Help: "stat of range tasks",
ConstLabels: constLabels,
}, []string{LblType, LblResult})
TiKVRangeTaskPushDuration = prometheus.NewHistogramVec(
@ -405,6 +436,7 @@ func initMetrics(namespace, subsystem string) {
Name: "range_task_push_duration",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 20), // 1ms ~ 524s
Help: "duration to push sub tasks to range task workers",
ConstLabels: constLabels,
}, []string{LblType})
TiKVTokenWaitDuration = prometheus.NewHistogram(
@ -414,6 +446,7 @@ func initMetrics(namespace, subsystem string) {
Name: "batch_executor_token_wait_duration",
Buckets: prometheus.ExponentialBuckets(1, 2, 34), // 1ns ~ 8s
Help: "tidb txn token wait duration to process batches",
ConstLabels: constLabels,
})
TiKVTxnHeartBeatHistogram = prometheus.NewHistogramVec(
@ -422,6 +455,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "txn_heart_beat",
Help: "Bucketed histogram of the txn_heartbeat request duration.",
ConstLabels: constLabels,
Buckets: prometheus.ExponentialBuckets(0.001, 2, 20), // 1ms ~ 524s
}, []string{LblType})
@ -431,6 +465,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "txn_ttl_manager",
Help: "Bucketed histogram of the txn ttl manager lifetime duration.",
ConstLabels: constLabels,
Buckets: prometheus.ExponentialBuckets(1, 2, 20), // 1s ~ 524288s
})
@ -441,6 +476,7 @@ func initMetrics(namespace, subsystem string) {
Name: "pessimistic_lock_keys_duration",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 24), // 1ms ~ 8389s
Help: "tidb txn pessimistic lock keys duration",
ConstLabels: constLabels,
})
TiKVTTLLifeTimeReachCounter = prometheus.NewCounter(
@ -449,6 +485,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "ttl_lifetime_reach_total",
Help: "Counter of ttlManager live too long.",
ConstLabels: constLabels,
})
TiKVNoAvailableConnectionCounter = prometheus.NewCounter(
@ -457,6 +494,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "batch_client_no_available_connection_total",
Help: "Counter of no available batch client.",
ConstLabels: constLabels,
})
TiKVTwoPCTxnCounter = prometheus.NewCounterVec(
@ -465,6 +503,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "commit_txn_counter",
Help: "Counter of 2PC transactions.",
ConstLabels: constLabels,
}, []string{LblType})
TiKVAsyncCommitTxnCounter = prometheus.NewCounterVec(
@ -473,6 +512,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "async_commit_txn_counter",
Help: "Counter of async commit transactions.",
ConstLabels: constLabels,
}, []string{LblType})
TiKVOnePCTxnCounter = prometheus.NewCounterVec(
@ -481,6 +521,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "one_pc_txn_counter",
Help: "Counter of 1PC transactions.",
ConstLabels: constLabels,
}, []string{LblType})
TiKVStoreLimitErrorCounter = prometheus.NewCounterVec(
@ -489,6 +530,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "get_store_limit_token_error",
Help: "store token is up to the limit, probably because one of the stores is the hotspot or unavailable",
ConstLabels: constLabels,
}, []string{LblAddress, LblStore})
TiKVGRPCConnTransientFailureCounter = prometheus.NewCounterVec(
@ -497,6 +539,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "connection_transient_failure_count",
Help: "Counter of gRPC connection transient failure",
ConstLabels: constLabels,
}, []string{LblAddress, LblStore})
TiKVPanicCounter = prometheus.NewCounterVec(
@ -505,6 +548,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "panic_total",
Help: "Counter of panic.",
ConstLabels: constLabels,
}, []string{LblType})
TiKVForwardRequestCounter = prometheus.NewCounterVec(
@ -513,6 +557,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "forward_request_counter",
Help: "Counter of tikv request being forwarded through another node",
ConstLabels: constLabels,
}, []string{LblFromStore, LblToStore, LblType, LblResult})
TiKVTSFutureWaitDuration = prometheus.NewHistogram(
@ -521,6 +566,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "ts_future_wait_seconds",
Help: "Bucketed histogram of seconds cost for waiting timestamp future.",
ConstLabels: constLabels,
Buckets: prometheus.ExponentialBuckets(0.000005, 2, 30), // 5us ~ 2560s
})
@ -530,6 +576,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "safets_update_counter",
Help: "Counter of tikv safe_ts being updated.",
ConstLabels: constLabels,
}, []string{LblResult, LblStore})
TiKVMinSafeTSGapSeconds = prometheus.NewGaugeVec(
@ -538,6 +585,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "min_safets_gap_seconds",
Help: "The minimal (non-zero) SafeTS gap for each store.",
ConstLabels: constLabels,
}, []string{LblStore})
TiKVReplicaSelectorFailureCounter = prometheus.NewCounterVec(
@ -546,6 +594,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "replica_selector_failure_counter",
Help: "Counter of the reason why the replica selector cannot yield a potential leader.",
ConstLabels: constLabels,
}, []string{LblType})
TiKVRequestRetryTimesHistogram = prometheus.NewHistogram(
@ -554,6 +603,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "request_retry_times",
Help: "Bucketed histogram of how many times a region request retries.",
ConstLabels: constLabels,
Buckets: []float64{1, 2, 3, 4, 8, 16, 32, 64, 128, 256},
})
TiKVTxnCommitBackoffSeconds = prometheus.NewHistogram(
@ -562,6 +612,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "txn_commit_backoff_seconds",
Help: "Bucketed histogram of the total backoff duration in committing a transaction.",
ConstLabels: constLabels,
Buckets: prometheus.ExponentialBuckets(0.001, 2, 22), // 1ms ~ 2097s
})
TiKVTxnCommitBackoffCount = prometheus.NewHistogram(
@ -570,6 +621,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "txn_commit_backoff_count",
Help: "Bucketed histogram of the backoff count in committing a transaction.",
ConstLabels: constLabels,
Buckets: prometheus.ExponentialBuckets(1, 2, 12), // 1 ~ 2048
})
@ -580,6 +632,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: "sli", // Always use "sli" to make it compatible with TiDB.
Name: "tikv_small_read_duration",
Help: "Read time of TiKV small read.",
ConstLabels: constLabels,
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 28), // 0.5ms ~ 74h
})
@ -589,6 +642,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: "sli",
Name: "tikv_read_throughput",
Help: "Read throughput of TiKV read in Bytes/s.",
ConstLabels: constLabels,
Buckets: prometheus.ExponentialBuckets(1024, 2, 13), // 1MB/s ~ 4GB/s
})
@ -598,6 +652,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "gc_unsafe_destroy_range_failures",
Help: "Counter of unsafe destroyrange failures",
ConstLabels: constLabels,
}, []string{LblType})
TiKVPrewriteAssertionUsageCounter = prometheus.NewCounterVec(
@ -606,6 +661,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "prewrite_assertion_count",
Help: "Counter of assertions used in prewrite requests",
ConstLabels: constLabels,
}, []string{LblType})
TiKVGrpcConnectionState = prometheus.NewGaugeVec(
@ -614,6 +670,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "grpc_connection_state",
Help: "State of gRPC connection",
ConstLabels: constLabels,
}, []string{"connection_id", "store_ip", "grpc_state"})
TiKVAggressiveLockedKeysCounter = prometheus.NewCounterVec(
@ -622,6 +679,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "aggressive_locking_count",
Help: "Counter of keys locked in aggressive locking mode",
ConstLabels: constLabels,
}, []string{LblType})
TiKVStoreSlowScoreGauge = prometheus.NewGaugeVec(
@ -630,6 +688,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "store_slow_score",
Help: "Slow scores of each tikv node based on RPC timecosts",
ConstLabels: constLabels,
}, []string{LblStore})
TiKVPreferLeaderFlowsGauge = prometheus.NewGaugeVec(
@ -638,6 +697,7 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "prefer_leader_flows_gauge",
Help: "Counter of flows under PreferLeader mode.",
ConstLabels: constLabels,
}, []string{LblType, LblStore})
TiKVStaleReadSizeSummary = prometheus.NewSummaryVec(
@ -646,18 +706,24 @@ func initMetrics(namespace, subsystem string) {
Subsystem: subsystem,
Name: "stale_read_bytes",
Help: "Size of stale read.",
ConstLabels: constLabels,
}, []string{LblResult, LblDirection})
initShortcuts()
}
func init() {
initMetrics("tikv", "client_go")
initMetrics("tikv", "client_go", nil)
}
// InitMetrics initializes metrics variables with given namespace and subsystem name.
func InitMetrics(namespace, subsystem string) {
initMetrics(namespace, subsystem)
initMetrics(namespace, subsystem, nil)
}
// InitMetricsWithConstLabels initializes metrics variables with given namespace, subsystem name and const labels.
func InitMetricsWithConstLabels(namespace, subsystem string, constLabels prometheus.Labels) {
initMetrics(namespace, subsystem, constLabels)
}
// RegisterMetrics registers all metrics variables.