mirror of https://github.com/tikv/client-go.git
This reverts commit 1a44252779.
Signed-off-by: crazycs520 <crazycs520@gmail.com>
This commit is contained in:
parent
3520f13fc0
commit
1362f1e875
|
|
@ -70,7 +70,6 @@ type batchCommandsEntry struct {
|
||||||
// canceled indicated the request is canceled or not.
|
// canceled indicated the request is canceled or not.
|
||||||
canceled int32
|
canceled int32
|
||||||
err error
|
err error
|
||||||
start time.Time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *batchCommandsEntry) isCanceled() bool {
|
func (b *batchCommandsEntry) isCanceled() bool {
|
||||||
|
|
@ -383,14 +382,11 @@ func (a *batchConn) getClientAndSend() {
|
||||||
}
|
}
|
||||||
defer cli.unlockForSend()
|
defer cli.unlockForSend()
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
tiKVBatchWaitToSendDuration := metrics.TiKVBatchWaitDuration.WithLabelValues("wait-to-send", target)
|
|
||||||
req, forwardingReqs := a.reqBuilder.build(func(id uint64, e *batchCommandsEntry) {
|
req, forwardingReqs := a.reqBuilder.build(func(id uint64, e *batchCommandsEntry) {
|
||||||
cli.batched.Store(id, e)
|
cli.batched.Store(id, e)
|
||||||
if trace.IsEnabled() {
|
if trace.IsEnabled() {
|
||||||
trace.Log(e.ctx, "rpc", "send")
|
trace.Log(e.ctx, "rpc", "send")
|
||||||
}
|
}
|
||||||
tiKVBatchWaitToSendDuration.Observe(float64(now.Sub(e.start)))
|
|
||||||
})
|
})
|
||||||
if req != nil {
|
if req != nil {
|
||||||
cli.send("", req)
|
cli.send("", req)
|
||||||
|
|
@ -515,14 +511,6 @@ func (c *batchCommandsClient) isStopped() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *batchCommandsClient) send(forwardedHost string, req *tikvpb.BatchCommandsRequest) {
|
func (c *batchCommandsClient) send(forwardedHost string, req *tikvpb.BatchCommandsRequest) {
|
||||||
start := time.Now()
|
|
||||||
defer func() {
|
|
||||||
if forwardedHost == "" {
|
|
||||||
metrics.TiKVBatchConnSendDuration.WithLabelValues(c.target).Observe(time.Since(start).Seconds())
|
|
||||||
} else {
|
|
||||||
metrics.TiKVBatchConnSendDuration.WithLabelValues(forwardedHost).Observe(time.Since(start).Seconds())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
err := c.initBatchClient(forwardedHost)
|
err := c.initBatchClient(forwardedHost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logutil.BgLogger().Warn(
|
logutil.BgLogger().Warn(
|
||||||
|
|
@ -789,7 +777,6 @@ func sendBatchRequest(
|
||||||
req *tikvpb.BatchCommandsRequest_Request,
|
req *tikvpb.BatchCommandsRequest_Request,
|
||||||
timeout time.Duration,
|
timeout time.Duration,
|
||||||
) (*tikvrpc.Response, error) {
|
) (*tikvrpc.Response, error) {
|
||||||
start := time.Now()
|
|
||||||
entry := &batchCommandsEntry{
|
entry := &batchCommandsEntry{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
req: req,
|
req: req,
|
||||||
|
|
@ -797,11 +784,11 @@ func sendBatchRequest(
|
||||||
forwardedHost: forwardedHost,
|
forwardedHost: forwardedHost,
|
||||||
canceled: 0,
|
canceled: 0,
|
||||||
err: nil,
|
err: nil,
|
||||||
start: start,
|
|
||||||
}
|
}
|
||||||
timer := time.NewTimer(timeout)
|
timer := time.NewTimer(timeout)
|
||||||
defer timer.Stop()
|
defer timer.Stop()
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
select {
|
select {
|
||||||
case batchConn.batchCommandsCh <- entry:
|
case batchConn.batchCommandsCh <- entry:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
|
@ -812,7 +799,7 @@ func sendBatchRequest(
|
||||||
return nil, errors.WithMessage(context.DeadlineExceeded, "wait sendLoop")
|
return nil, errors.WithMessage(context.DeadlineExceeded, "wait sendLoop")
|
||||||
}
|
}
|
||||||
waitDuration := time.Since(start)
|
waitDuration := time.Since(start)
|
||||||
metrics.TiKVBatchWaitDuration.WithLabelValues("wait-to-chan", addr).Observe(float64(waitDuration))
|
metrics.TiKVBatchWaitDuration.Observe(float64(waitDuration))
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case res, ok := <-entry.res:
|
case res, ok := <-entry.res:
|
||||||
|
|
|
||||||
|
|
@ -62,8 +62,7 @@ var (
|
||||||
TiKVLocalLatchWaitTimeHistogram prometheus.Histogram
|
TiKVLocalLatchWaitTimeHistogram prometheus.Histogram
|
||||||
TiKVStatusDuration *prometheus.HistogramVec
|
TiKVStatusDuration *prometheus.HistogramVec
|
||||||
TiKVStatusCounter *prometheus.CounterVec
|
TiKVStatusCounter *prometheus.CounterVec
|
||||||
TiKVBatchWaitDuration *prometheus.HistogramVec
|
TiKVBatchWaitDuration prometheus.Histogram
|
||||||
TiKVBatchConnSendDuration *prometheus.HistogramVec
|
|
||||||
TiKVBatchSendLatency prometheus.Histogram
|
TiKVBatchSendLatency prometheus.Histogram
|
||||||
TiKVBatchWaitOverLoad prometheus.Counter
|
TiKVBatchWaitOverLoad prometheus.Counter
|
||||||
TiKVBatchPendingRequests *prometheus.HistogramVec
|
TiKVBatchPendingRequests *prometheus.HistogramVec
|
||||||
|
|
@ -334,25 +333,15 @@ func initMetrics(namespace, subsystem string, constLabels prometheus.Labels) {
|
||||||
ConstLabels: constLabels,
|
ConstLabels: constLabels,
|
||||||
}, []string{LblResult})
|
}, []string{LblResult})
|
||||||
|
|
||||||
TiKVBatchWaitDuration = prometheus.NewHistogramVec(
|
TiKVBatchWaitDuration = prometheus.NewHistogram(
|
||||||
prometheus.HistogramOpts{
|
prometheus.HistogramOpts{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
Name: "batch_wait_duration",
|
Name: "batch_wait_duration",
|
||||||
Buckets: prometheus.ExponentialBuckets(64, 2, 34), // 64ns ~ 549s
|
Buckets: prometheus.ExponentialBuckets(1, 2, 34), // 1ns ~ 8s
|
||||||
Help: "batch-cmd wait duration, unit is nanosecond",
|
Help: "batch wait duration",
|
||||||
ConstLabels: constLabels,
|
ConstLabels: constLabels,
|
||||||
}, []string{LblType, LblStore})
|
})
|
||||||
|
|
||||||
TiKVBatchConnSendDuration = prometheus.NewHistogramVec(
|
|
||||||
prometheus.HistogramOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: subsystem,
|
|
||||||
Name: "batch_conn_send_seconds",
|
|
||||||
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 22), // 0.5ms ~ 1048s
|
|
||||||
Help: "batch conn send duration",
|
|
||||||
ConstLabels: constLabels,
|
|
||||||
}, []string{LblStore})
|
|
||||||
|
|
||||||
TiKVBatchSendLatency = prometheus.NewHistogram(
|
TiKVBatchSendLatency = prometheus.NewHistogram(
|
||||||
prometheus.HistogramOpts{
|
prometheus.HistogramOpts{
|
||||||
|
|
@ -779,7 +768,6 @@ func RegisterMetrics() {
|
||||||
prometheus.MustRegister(TiKVStatusDuration)
|
prometheus.MustRegister(TiKVStatusDuration)
|
||||||
prometheus.MustRegister(TiKVStatusCounter)
|
prometheus.MustRegister(TiKVStatusCounter)
|
||||||
prometheus.MustRegister(TiKVBatchWaitDuration)
|
prometheus.MustRegister(TiKVBatchWaitDuration)
|
||||||
prometheus.MustRegister(TiKVBatchConnSendDuration)
|
|
||||||
prometheus.MustRegister(TiKVBatchSendLatency)
|
prometheus.MustRegister(TiKVBatchSendLatency)
|
||||||
prometheus.MustRegister(TiKVBatchRecvLatency)
|
prometheus.MustRegister(TiKVBatchRecvLatency)
|
||||||
prometheus.MustRegister(TiKVBatchWaitOverLoad)
|
prometheus.MustRegister(TiKVBatchWaitOverLoad)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue