mirror of https://github.com/tikv/client-go.git
log: only log the first lock in every flush or prewrite response (#1295)
* log: only log the first lock in every flush or prewrite response Signed-off-by: ekexium <eke@fastmail.com> * log each conflicting txn at most once Signed-off-by: ekexium <eke@fastmail.com> * log: ttlManager Signed-off-by: ekexium <eke@fastmail.com> --------- Signed-off-by: ekexium <eke@fastmail.com>
This commit is contained in:
parent
5b3e266723
commit
e02b025475
|
|
@ -1219,7 +1219,9 @@ func keepAlive(
|
|||
logutil.Logger(bo.GetCtx()).Info("ttlManager live up to its lifetime",
|
||||
zap.Uint64("txnStartTS", c.startTS),
|
||||
zap.Uint64("uptime", uptime),
|
||||
zap.Uint64("maxTxnTTL", config.GetGlobalConfig().MaxTxnTTL))
|
||||
zap.Uint64("maxTxnTTL", config.GetGlobalConfig().MaxTxnTTL),
|
||||
zap.Bool("isPipelinedTxn", isPipelinedTxn),
|
||||
)
|
||||
metrics.TiKVTTLLifeTimeReachCounter.Inc()
|
||||
// the pessimistic locks may expire if the ttl manager has timed out, set `LockExpired` flag
|
||||
// so that this transaction could only commit or rollback with no more statement executions
|
||||
|
|
@ -1231,7 +1233,10 @@ func keepAlive(
|
|||
|
||||
newTTL := uptime + atomic.LoadUint64(&ManagedLockTTL)
|
||||
logutil.Logger(bo.GetCtx()).Info("send TxnHeartBeat",
|
||||
zap.Uint64("startTS", c.startTS), zap.Uint64("newTTL", newTTL))
|
||||
zap.Uint64("startTS", c.startTS),
|
||||
zap.Uint64("newTTL", newTTL),
|
||||
zap.Bool("isPipelinedTxn", isPipelinedTxn),
|
||||
)
|
||||
startTime := time.Now()
|
||||
_, stopHeartBeat, err := sendTxnHeartBeat(bo, c.store, primaryKey, c.startTS, newTTL)
|
||||
if err != nil {
|
||||
|
|
@ -1239,12 +1244,16 @@ func keepAlive(
|
|||
metrics.TxnHeartBeatHistogramError.Observe(time.Since(startTime).Seconds())
|
||||
logutil.Logger(bo.GetCtx()).Debug("send TxnHeartBeat failed",
|
||||
zap.Error(err),
|
||||
zap.Uint64("txnStartTS", c.startTS))
|
||||
zap.Uint64("txnStartTS", c.startTS),
|
||||
zap.Bool("isPipelinedTxn", isPipelinedTxn),
|
||||
)
|
||||
if stopHeartBeat || keepFail > maxConsecutiveFailure {
|
||||
logutil.Logger(bo.GetCtx()).Warn("stop TxnHeartBeat",
|
||||
zap.Error(err),
|
||||
zap.Int("consecutiveFailure", keepFail),
|
||||
zap.Uint64("txnStartTS", c.startTS))
|
||||
zap.Uint64("txnStartTS", c.startTS),
|
||||
zap.Bool("isPipelinedTxn", isPipelinedTxn),
|
||||
)
|
||||
return
|
||||
}
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -215,6 +215,7 @@ func (action actionPipelinedFlush) handleSingleBatch(
|
|||
}
|
||||
locks := make([]*txnlock.Lock, 0, len(keyErrs))
|
||||
|
||||
logged := make(map[uint64]struct{}, 1)
|
||||
for _, keyErr := range keyErrs {
|
||||
// Check already exists error
|
||||
if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil {
|
||||
|
|
@ -227,12 +228,16 @@ func (action actionPipelinedFlush) handleSingleBatch(
|
|||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
logutil.Logger(bo.GetCtx()).Info(
|
||||
"[pipelined dml] encounters lock",
|
||||
zap.Uint64("txnID", c.startTS),
|
||||
zap.Uint64("generation", action.generation),
|
||||
zap.Stringer("lock", lock),
|
||||
)
|
||||
if _, ok := logged[lock.TxnID]; !ok {
|
||||
logutil.Logger(bo.GetCtx()).Info(
|
||||
"[pipelined dml] flush encounters lock. "+
|
||||
"More locks belonging to the same transaction may be omitted",
|
||||
zap.Uint64("txnID", c.startTS),
|
||||
zap.Uint64("generation", action.generation),
|
||||
zap.Stringer("lock", lock),
|
||||
)
|
||||
logged[lock.TxnID] = struct{}{}
|
||||
}
|
||||
// If an optimistic transaction encounters a lock with larger TS, this transaction will certainly
|
||||
// fail due to a WriteConflict error. So we can construct and return an error here early.
|
||||
// Pessimistic transactions don't need such an optimization. If this key needs a pessimistic lock,
|
||||
|
|
|
|||
|
|
@ -429,6 +429,7 @@ func (action actionPrewrite) handleSingleBatch(
|
|||
return nil
|
||||
}
|
||||
var locks []*txnlock.Lock
|
||||
logged := make(map[uint64]struct{})
|
||||
for _, keyErr := range keyErrs {
|
||||
// Check already exists error
|
||||
if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil {
|
||||
|
|
@ -441,12 +442,16 @@ func (action actionPrewrite) handleSingleBatch(
|
|||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
logutil.BgLogger().Info(
|
||||
"prewrite encounters lock",
|
||||
zap.Uint64("session", c.sessionID),
|
||||
zap.Uint64("txnID", c.startTS),
|
||||
zap.Stringer("lock", lock),
|
||||
)
|
||||
if _, ok := logged[lock.TxnID]; !ok {
|
||||
logutil.BgLogger().Info(
|
||||
"prewrite encounters lock. "+
|
||||
"More locks belonging to the same transaction may be omitted",
|
||||
zap.Uint64("session", c.sessionID),
|
||||
zap.Uint64("txnID", c.startTS),
|
||||
zap.Stringer("lock", lock),
|
||||
)
|
||||
logged[lock.TxnID] = struct{}{}
|
||||
}
|
||||
// If an optimistic transaction encounters a lock with larger TS, this transaction will certainly
|
||||
// fail due to a WriteConflict error. So we can construct and return an error here early.
|
||||
// Pessimistic transactions don't need such an optimization. If this key needs a pessimistic lock,
|
||||
|
|
|
|||
Loading…
Reference in New Issue