Bump go.uber.org/zap from 1.19.1 to 1.24.0 (#2774)

* Bump go.uber.org/zap from 1.19.1 to 1.24.0

Bumps [go.uber.org/zap](https://github.com/uber-go/zap) from 1.19.1 to 1.24.0.
- [Release notes](https://github.com/uber-go/zap/releases)
- [Changelog](https://github.com/uber-go/zap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/uber-go/zap/compare/v1.19.1...v1.24.0)

---
updated-dependencies:
- dependency-name: go.uber.org/zap
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Run ./hack/update-codegen.sh

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2023-08-02 01:38:52 +00:00 committed by GitHub
parent 7b50f3c433
commit 72f264817d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
45 changed files with 1700 additions and 377 deletions

3
go.mod
View File

@ -31,7 +31,7 @@ require (
go.opencensus.io v0.24.0
go.uber.org/atomic v1.9.0
go.uber.org/automaxprocs v1.4.0
go.uber.org/zap v1.19.1
go.uber.org/zap v1.24.0
golang.org/x/net v0.12.0
golang.org/x/oauth2 v0.8.0
golang.org/x/sync v0.3.0
@ -57,6 +57,7 @@ require (
cloud.google.com/go v0.110.2 // indirect
cloud.google.com/go/compute v1.19.0 // indirect
cloud.google.com/go/iam v1.0.1 // indirect
github.com/benbjohnson/clock v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect

12
go.sum
View File

@ -409,7 +409,6 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@ -426,15 +425,14 @@ go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0=
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -479,7 +477,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
@ -517,7 +514,6 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@ -543,7 +539,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
@ -587,9 +582,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -661,7 +654,6 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8=
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Ben Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

21
vendor/github.com/benbjohnson/clock/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Ben Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

104
vendor/github.com/benbjohnson/clock/README.md generated vendored Normal file
View File

@ -0,0 +1,104 @@
clock
=====
Clock is a small library for mocking time in Go. It provides an interface
around the standard library's [`time`][time] package so that the application
can use the realtime clock while tests can use the mock clock.
[time]: http://golang.org/pkg/time/
## Usage
### Realtime Clock
Your application can maintain a `Clock` variable that will allow realtime and
mock clocks to be interchangeable. For example, if you had an `Application` type:
```go
import "github.com/benbjohnson/clock"
type Application struct {
Clock clock.Clock
}
```
You could initialize it to use the realtime clock like this:
```go
var app Application
app.Clock = clock.New()
...
```
Then all timers and time-related functionality should be performed from the
`Clock` variable.
### Mocking time
In your tests, you will want to use a `Mock` clock:
```go
import (
"testing"
"github.com/benbjohnson/clock"
)
func TestApplication_DoSomething(t *testing.T) {
mock := clock.NewMock()
app := Application{Clock: mock}
...
}
```
Now that you've initialized your application to use the mock clock, you can
adjust the time programmatically. The mock clock always starts from the Unix
epoch (midnight UTC on Jan 1, 1970).
### Controlling time
The mock clock provides the same functions that the standard library's `time`
package provides. For example, to find the current time, you use the `Now()`
function:
```go
mock := clock.NewMock()
// Find the current time.
mock.Now().UTC() // 1970-01-01 00:00:00 +0000 UTC
// Move the clock forward.
mock.Add(2 * time.Hour)
// Check the time again. It's 2 hours later!
mock.Now().UTC() // 1970-01-01 02:00:00 +0000 UTC
```
Timers and Tickers are also controlled by this same mock clock. They will only
execute when the clock is moved forward:
```go
mock := clock.NewMock()
count := 0
// Kick off a timer to increment every 1 mock second.
go func() {
ticker := clock.Ticker(1 * time.Second)
for {
<-ticker.C
count++
}
}()
runtime.Gosched()
// Move the clock forward 10 seconds.
mock.Add(10 * time.Second)
// This prints 10.
fmt.Println(count)
```

340
vendor/github.com/benbjohnson/clock/clock.go generated vendored Normal file
View File

@ -0,0 +1,340 @@
package clock
import (
"sort"
"sync"
"time"
)
// Clock represents an interface to the functions in the standard library time
// package. Two implementations are available in the clock package. The first
// is a real-time clock which simply wraps the time package's functions. The
// second is a mock clock which will only change when
// programmatically adjusted.
type Clock interface {
After(d time.Duration) <-chan time.Time
AfterFunc(d time.Duration, f func()) *Timer
Now() time.Time
Since(t time.Time) time.Duration
Sleep(d time.Duration)
Tick(d time.Duration) <-chan time.Time
Ticker(d time.Duration) *Ticker
Timer(d time.Duration) *Timer
}
// New returns an instance of a real-time clock.
func New() Clock {
return &clock{}
}
// clock implements a real-time clock by simply wrapping the time package functions.
type clock struct{}
func (c *clock) After(d time.Duration) <-chan time.Time { return time.After(d) }
func (c *clock) AfterFunc(d time.Duration, f func()) *Timer {
return &Timer{timer: time.AfterFunc(d, f)}
}
func (c *clock) Now() time.Time { return time.Now() }
func (c *clock) Since(t time.Time) time.Duration { return time.Since(t) }
func (c *clock) Sleep(d time.Duration) { time.Sleep(d) }
func (c *clock) Tick(d time.Duration) <-chan time.Time { return time.Tick(d) }
func (c *clock) Ticker(d time.Duration) *Ticker {
t := time.NewTicker(d)
return &Ticker{C: t.C, ticker: t}
}
func (c *clock) Timer(d time.Duration) *Timer {
t := time.NewTimer(d)
return &Timer{C: t.C, timer: t}
}
// Mock represents a mock clock that only moves forward programmatically.
// It can be preferable to a real-time clock when testing time-based functionality.
type Mock struct {
mu sync.Mutex
now time.Time // current time
timers clockTimers // tickers & timers
}
// NewMock returns an instance of a mock clock.
// The current time of the mock clock on initialization is the Unix epoch.
func NewMock() *Mock {
return &Mock{now: time.Unix(0, 0)}
}
// Add moves the current time of the mock clock forward by the specified duration.
// This should only be called from a single goroutine at a time.
func (m *Mock) Add(d time.Duration) {
// Calculate the final current time.
t := m.now.Add(d)
// Continue to execute timers until there are no more before the new time.
for {
if !m.runNextTimer(t) {
break
}
}
// Ensure that we end with the new time.
m.mu.Lock()
m.now = t
m.mu.Unlock()
// Give a small buffer to make sure that other goroutines get handled.
gosched()
}
// Set sets the current time of the mock clock to a specific one.
// This should only be called from a single goroutine at a time.
func (m *Mock) Set(t time.Time) {
// Continue to execute timers until there are no more before the new time.
for {
if !m.runNextTimer(t) {
break
}
}
// Ensure that we end with the new time.
m.mu.Lock()
m.now = t
m.mu.Unlock()
// Give a small buffer to make sure that other goroutines get handled.
gosched()
}
// runNextTimer executes the next timer in chronological order and moves the
// current time to the timer's next tick time. The next time is not executed if
// its next time is after the max time. Returns true if a timer was executed.
func (m *Mock) runNextTimer(max time.Time) bool {
m.mu.Lock()
// Sort timers by time.
sort.Sort(m.timers)
// If we have no more timers then exit.
if len(m.timers) == 0 {
m.mu.Unlock()
return false
}
// Retrieve next timer. Exit if next tick is after new time.
t := m.timers[0]
if t.Next().After(max) {
m.mu.Unlock()
return false
}
// Move "now" forward and unlock clock.
m.now = t.Next()
m.mu.Unlock()
// Execute timer.
t.Tick(m.now)
return true
}
// After waits for the duration to elapse and then sends the current time on the returned channel.
func (m *Mock) After(d time.Duration) <-chan time.Time {
return m.Timer(d).C
}
// AfterFunc waits for the duration to elapse and then executes a function.
// A Timer is returned that can be stopped.
func (m *Mock) AfterFunc(d time.Duration, f func()) *Timer {
t := m.Timer(d)
t.C = nil
t.fn = f
return t
}
// Now returns the current wall time on the mock clock.
func (m *Mock) Now() time.Time {
m.mu.Lock()
defer m.mu.Unlock()
return m.now
}
// Since returns time since the mock clock's wall time.
func (m *Mock) Since(t time.Time) time.Duration {
return m.Now().Sub(t)
}
// Sleep pauses the goroutine for the given duration on the mock clock.
// The clock must be moved forward in a separate goroutine.
func (m *Mock) Sleep(d time.Duration) {
<-m.After(d)
}
// Tick is a convenience function for Ticker().
// It will return a ticker channel that cannot be stopped.
func (m *Mock) Tick(d time.Duration) <-chan time.Time {
return m.Ticker(d).C
}
// Ticker creates a new instance of Ticker.
func (m *Mock) Ticker(d time.Duration) *Ticker {
m.mu.Lock()
defer m.mu.Unlock()
ch := make(chan time.Time, 1)
t := &Ticker{
C: ch,
c: ch,
mock: m,
d: d,
next: m.now.Add(d),
}
m.timers = append(m.timers, (*internalTicker)(t))
return t
}
// Timer creates a new instance of Timer.
func (m *Mock) Timer(d time.Duration) *Timer {
m.mu.Lock()
defer m.mu.Unlock()
ch := make(chan time.Time, 1)
t := &Timer{
C: ch,
c: ch,
mock: m,
next: m.now.Add(d),
stopped: false,
}
m.timers = append(m.timers, (*internalTimer)(t))
return t
}
func (m *Mock) removeClockTimer(t clockTimer) {
for i, timer := range m.timers {
if timer == t {
copy(m.timers[i:], m.timers[i+1:])
m.timers[len(m.timers)-1] = nil
m.timers = m.timers[:len(m.timers)-1]
break
}
}
sort.Sort(m.timers)
}
// clockTimer represents an object with an associated start time.
type clockTimer interface {
Next() time.Time
Tick(time.Time)
}
// clockTimers represents a list of sortable timers.
type clockTimers []clockTimer
func (a clockTimers) Len() int { return len(a) }
func (a clockTimers) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a clockTimers) Less(i, j int) bool { return a[i].Next().Before(a[j].Next()) }
// Timer represents a single event.
// The current time will be sent on C, unless the timer was created by AfterFunc.
type Timer struct {
C <-chan time.Time
c chan time.Time
timer *time.Timer // realtime impl, if set
next time.Time // next tick time
mock *Mock // mock clock, if set
fn func() // AfterFunc function, if set
stopped bool // True if stopped, false if running
}
// Stop turns off the ticker.
func (t *Timer) Stop() bool {
if t.timer != nil {
return t.timer.Stop()
}
t.mock.mu.Lock()
registered := !t.stopped
t.mock.removeClockTimer((*internalTimer)(t))
t.stopped = true
t.mock.mu.Unlock()
return registered
}
// Reset changes the expiry time of the timer
func (t *Timer) Reset(d time.Duration) bool {
if t.timer != nil {
return t.timer.Reset(d)
}
t.mock.mu.Lock()
t.next = t.mock.now.Add(d)
defer t.mock.mu.Unlock()
registered := !t.stopped
if t.stopped {
t.mock.timers = append(t.mock.timers, (*internalTimer)(t))
}
t.stopped = false
return registered
}
type internalTimer Timer
func (t *internalTimer) Next() time.Time { return t.next }
func (t *internalTimer) Tick(now time.Time) {
t.mock.mu.Lock()
if t.fn != nil {
t.fn()
} else {
t.c <- now
}
t.mock.removeClockTimer((*internalTimer)(t))
t.stopped = true
t.mock.mu.Unlock()
gosched()
}
// Ticker holds a channel that receives "ticks" at regular intervals.
type Ticker struct {
C <-chan time.Time
c chan time.Time
ticker *time.Ticker // realtime impl, if set
next time.Time // next tick time
mock *Mock // mock clock, if set
d time.Duration // time between ticks
}
// Stop turns off the ticker.
func (t *Ticker) Stop() {
if t.ticker != nil {
t.ticker.Stop()
} else {
t.mock.mu.Lock()
t.mock.removeClockTimer((*internalTicker)(t))
t.mock.mu.Unlock()
}
}
// Reset resets the ticker to a new duration.
func (t *Ticker) Reset(dur time.Duration) {
if t.ticker != nil {
t.ticker.Reset(dur)
}
}
type internalTicker Ticker
func (t *internalTicker) Next() time.Time { return t.next }
func (t *internalTicker) Tick(now time.Time) {
select {
case t.c <- now:
default:
}
t.next = now.Add(t.d)
gosched()
}
// Sleep momentarily so that other goroutines can process.
func gosched() { time.Sleep(1 * time.Millisecond) }

12
vendor/go.uber.org/zap/.readme.tmpl generated vendored
View File

@ -96,14 +96,14 @@ Released under the [MIT License](LICENSE.txt).
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
[doc]: https://godoc.org/go.uber.org/zap
[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master
[ci]: https://travis-ci.com/uber-go/zap
[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
[doc]: https://pkg.go.dev/go.uber.org/zap
[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/uber-go/zap
[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod

103
vendor/go.uber.org/zap/CHANGELOG.md generated vendored
View File

@ -3,9 +3,110 @@ All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## 1.24.0 (30 Nov 2022)
Enhancements:
* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
current minimum enabled log level.
* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
contributions to this release.
[#1148]: https://github.coml/uber-go/zap/pull/1148
[#1185]: https://github.coml/uber-go/zap/pull/1185
## 1.23.0 (24 Aug 2022)
Enhancements:
* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
`LevelEnabler` or `Core`.
* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
that implement `String() string`.
[#1147]: https://github.com/uber-go/zap/pull/1147
[#1155]: https://github.com/uber-go/zap/pull/1155
## 1.22.0 (8 Aug 2022)
Enhancements:
* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log
arrays of objects. With these two constructors, you don't need to implement
`zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement
`zapcore.ObjectMarshaler`.
* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing
`SugaredLogger` with the provided options applied.
* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level.
These functions provide a string joining behavior similar to `fmt.Println`.
* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the
logger for `Fatal`-level log entries. This defaults to exiting the program.
* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or
`NewDevelopment` to panic if the system was unable to build the logger.
* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for
a statement dynamically.
Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun
for their contributions to this release.
[#1071]: https://github.com/uber-go/zap/pull/1071
[#1079]: https://github.com/uber-go/zap/pull/1079
[#1080]: https://github.com/uber-go/zap/pull/1080
[#1088]: https://github.com/uber-go/zap/pull/1088
[#1108]: https://github.com/uber-go/zap/pull/1108
[#1118]: https://github.com/uber-go/zap/pull/1118
## 1.21.0 (7 Feb 2022)
Enhancements:
* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
string.
Bugfixes:
* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
Other changes:
* [#1052][]: Improve encoding performance when the `AddCaller` and
`AddStacktrace` options are used together.
[#1047]: https://github.com/uber-go/zap/pull/1047
[#1048]: https://github.com/uber-go/zap/pull/1048
[#1052]: https://github.com/uber-go/zap/pull/1052
[#1058]: https://github.com/uber-go/zap/pull/1058
Thanks to @aerosol and @Techassi for their contributions to this release.
## 1.20.0 (4 Jan 2022)
Enhancements:
* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
characters between log statements.
* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
encoding of reflected log fields.
Bugfixes:
* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
methods when the methods return.
* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
Other changes:
* [#1028][]: Drop support for Go < 1.15.
[#554]: https://github.com/uber-go/zap/pull/554
[#989]: https://github.com/uber-go/zap/pull/989
[#1011]: https://github.com/uber-go/zap/pull/1011
[#1017]: https://github.com/uber-go/zap/pull/1017
[#1028]: https://github.com/uber-go/zap/pull/1028
[#1033]: https://github.com/uber-go/zap/pull/1033
[#1039]: https://github.com/uber-go/zap/pull/1039
Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
## 1.19.1 (8 Sep 2021)
### Fixed
Bugfixes:
* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
* [#1003][]: JSON: Fix inaccurate precision when encoding float32.

View File

@ -16,7 +16,7 @@ you to accept the CLA when you open your pull request.
[Fork][fork], then clone the repository:
```
```bash
mkdir -p $GOPATH/src/go.uber.org
cd $GOPATH/src/go.uber.org
git clone git@github.com:your_github_username/zap.git
@ -27,21 +27,16 @@ git fetch upstream
Make sure that the tests and the linters pass:
```
```bash
make test
make lint
```
If you're not using the minor version of Go specified in the Makefile's
`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
fine, but it means that you'll only discover lint failures after you open your
pull request.
## Making Changes
Start by creating a new branch for your changes:
```
```bash
cd $GOPATH/src/go.uber.org/zap
git checkout master
git fetch upstream
@ -52,22 +47,22 @@ git checkout -b cool_new_feature
Make your changes, then ensure that `make lint` and `make test` still pass. If
you're satisfied with your changes, push them to your fork.
```
```bash
git push origin cool_new_feature
```
Then use the GitHub UI to open a pull request.
At this point, you're waiting on us to review your changes. We *try* to respond
At this point, you're waiting on us to review your changes. We _try_ to respond
to issues and pull requests within a few business days, and we may suggest some
improvements or alternatives. Once your changes are approved, one of the
project maintainers will merge them.
We're much more likely to approve your changes if you:
* Add tests for new functionality.
* Write a [good commit message][commit-message].
* Maintain backward compatibility.
- Add tests for new functionality.
- Write a [good commit message][commit-message].
- Maintain backward compatibility.
[fork]: https://github.com/uber-go/zap/fork
[open-issue]: https://github.com/uber-go/zap/issues/new

59
vendor/go.uber.org/zap/README.md generated vendored
View File

@ -54,7 +54,7 @@ and make many small allocations. Put differently, using `encoding/json` and
Zap takes a different approach. It includes a reflection-free, zero-allocation
JSON encoder, and the base `Logger` strives to avoid serialization overhead
and allocations wherever possible. By building the high-level `SugaredLogger`
on that foundation, zap lets users *choose* when they need to count every
on that foundation, zap lets users _choose_ when they need to count every
allocation and when they'd prefer a more familiar, loosely typed API.
As measured by its own [benchmarking suite][], not only is zap more performant
@ -64,40 +64,40 @@ id="anchor-versions">[1](#footnote-versions)</sup>
Log a message and 10 fields:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 862 ns/op | +0% | 5 allocs/op
| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op
| zerolog | 4021 ns/op | +366% | 76 allocs/op
| go-kit | 4542 ns/op | +427% | 105 allocs/op
| apex/log | 26785 ns/op | +3007% | 115 allocs/op
| logrus | 29501 ns/op | +3322% | 125 allocs/op
| log15 | 29906 ns/op | +3369% | 122 allocs/op
| Package | Time | Time % to zap | Objects Allocated |
| :------------------ | :---------: | :-----------: | :---------------: |
| :zap: zap | 2900 ns/op | +0% | 5 allocs/op |
| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op |
| zerolog | 10639 ns/op | +267% | 32 allocs/op |
| go-kit | 14434 ns/op | +398% | 59 allocs/op |
| logrus | 17104 ns/op | +490% | 81 allocs/op |
| apex/log | 32424 ns/op | +1018% | 66 allocs/op |
| log15 | 33579 ns/op | +1058% | 76 allocs/op |
Log a message with a logger that already has 10 fields of context:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 126 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op
| zerolog | 88 ns/op | -30% | 0 allocs/op
| go-kit | 5087 ns/op | +3937% | 103 allocs/op
| log15 | 18548 ns/op | +14621% | 73 allocs/op
| apex/log | 26012 ns/op | +20544% | 104 allocs/op
| logrus | 27236 ns/op | +21516% | 113 allocs/op
| Package | Time | Time % to zap | Objects Allocated |
| :------------------ | :---------: | :-----------: | :---------------: |
| :zap: zap | 373 ns/op | +0% | 0 allocs/op |
| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op |
| zerolog | 288 ns/op | -23% | 0 allocs/op |
| go-kit | 11785 ns/op | +3060% | 58 allocs/op |
| logrus | 19629 ns/op | +5162% | 70 allocs/op |
| log15 | 21866 ns/op | +5762% | 72 allocs/op |
| apex/log | 30890 ns/op | +8182% | 55 allocs/op |
Log a static string, without any context or `printf`-style templating:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
| :zap: zap | 118 ns/op | +0% | 0 allocs/op
| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op
| zerolog | 93 ns/op | -21% | 0 allocs/op
| go-kit | 280 ns/op | +137% | 11 allocs/op
| standard library | 499 ns/op | +323% | 2 allocs/op
| apex/log | 1990 ns/op | +1586% | 10 allocs/op
| logrus | 3129 ns/op | +2552% | 24 allocs/op
| log15 | 3887 ns/op | +3194% | 23 allocs/op
| Package | Time | Time % to zap | Objects Allocated |
| :------------------ | :--------: | :-----------: | :---------------: |
| :zap: zap | 381 ns/op | +0% | 0 allocs/op |
| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op |
| zerolog | 369 ns/op | -3% | 0 allocs/op |
| standard library | 385 ns/op | +1% | 2 allocs/op |
| go-kit | 606 ns/op | +59% | 11 allocs/op |
| logrus | 1730 ns/op | +354% | 25 allocs/op |
| apex/log | 1998 ns/op | +424% | 7 allocs/op |
| log15 | 4546 ns/op | +1093% | 22 allocs/op |
## Development Status: Stable
@ -131,4 +131,3 @@ pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
[cov]: https://codecov.io/gh/uber-go/zap
[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod

156
vendor/go.uber.org/zap/array_go118.go generated vendored Normal file
View File

@ -0,0 +1,156 @@
// Copyright (c) 2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build go1.18
// +build go1.18
package zap
import (
"fmt"
"go.uber.org/zap/zapcore"
)
// Objects constructs a field with the given key, holding a list of the
// provided objects that can be marshaled by Zap.
//
// Note that these objects must implement zapcore.ObjectMarshaler directly.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the Request type, not its pointer (*Request).
// If it's on the pointer, use ObjectValues.
//
// Given an object that implements MarshalLogObject on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Author struct{ ... }
// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var authors []Author = ...
// logger.Info("loading article", zap.Objects("authors", authors))
//
// Similarly, given a type that implements MarshalLogObject on its pointer
// receiver, you can log a slice of pointers to that object with Objects like
// so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
//
// If instead, you have a slice of values of such an object, use the
// ObjectValues constructor.
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
return Array(key, objects[T](values))
}
type objects[T zapcore.ObjectMarshaler] []T
func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
if err := arr.AppendObject(o); err != nil {
return err
}
}
return nil
}
// ObjectMarshalerPtr is a constraint that specifies that the given type
// implements zapcore.ObjectMarshaler on a pointer receiver.
type ObjectMarshalerPtr[T any] interface {
*T
zapcore.ObjectMarshaler
}
// ObjectValues constructs a field with the given key, holding a list of the
// provided objects, where pointers to these objects can be marshaled by Zap.
//
// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the *Request type, not the value (Request).
// If it's on the value, use Objects.
//
// Given an object that implements MarshalLogObject on the pointer receiver,
// you can log a slice of those objects with ObjectValues like so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
//
// If instead, you have a slice of pointers of such an object, use the Objects
// field constructor.
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
return Array(key, objectValues[T, P](values))
}
type objectValues[T any, P ObjectMarshalerPtr[T]] []T
func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range os {
// It is necessary for us to explicitly reference the "P" type.
// We cannot simply pass "&os[i]" to AppendObject because its type
// is "*T", which the type system does not consider as
// implementing ObjectMarshaler.
// Only the type "P" satisfies ObjectMarshaler, which we have
// to convert "*T" to explicitly.
var p P = &os[i]
if err := arr.AppendObject(p); err != nil {
return err
}
}
return nil
}
// Stringers constructs a field with the given key, holding a list of the
// output provided by the value's String method
//
// Given an object that implements String on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Request struct{ ... }
// func (a Request) String() string
//
// var requests []Request = ...
// logger.Info("sending requests", zap.Stringers("requests", requests))
//
// Note that these objects must implement fmt.Stringer directly.
// That is, if you're trying to marshal a []Request, the String method
// must be declared on the Request type, not its pointer (*Request).
func Stringers[T fmt.Stringer](key string, values []T) Field {
return Array(key, stringers[T](values))
}
type stringers[T fmt.Stringer] []T
func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
arr.AppendString(o.String())
}
return nil
}

4
vendor/go.uber.org/zap/config.go generated vendored
View File

@ -21,7 +21,7 @@
package zap
import (
"fmt"
"errors"
"sort"
"time"
@ -182,7 +182,7 @@ func (cfg Config) Build(opts ...Option) (*Logger, error) {
}
if cfg.Level == (AtomicLevel{}) {
return nil, fmt.Errorf("missing Level")
return nil, errors.New("missing Level")
}
log := New(

60
vendor/go.uber.org/zap/doc.go generated vendored
View File

@ -32,7 +32,7 @@
// they need to count every allocation and when they'd prefer a more familiar,
// loosely typed API.
//
// Choosing a Logger
// # Choosing a Logger
//
// In contexts where performance is nice, but not critical, use the
// SugaredLogger. It's 4-10x faster than other structured logging packages and
@ -41,14 +41,15 @@
// variadic number of key-value pairs. (For more advanced use cases, they also
// accept strongly typed fields - see the SugaredLogger.With documentation for
// details.)
// sugar := zap.NewExample().Sugar()
// defer sugar.Sync()
// sugar.Infow("failed to fetch URL",
// "url", "http://example.com",
// "attempt", 3,
// "backoff", time.Second,
// )
// sugar.Infof("failed to fetch URL: %s", "http://example.com")
//
// sugar := zap.NewExample().Sugar()
// defer sugar.Sync()
// sugar.Infow("failed to fetch URL",
// "url", "http://example.com",
// "attempt", 3,
// "backoff", time.Second,
// )
// sugar.Infof("failed to fetch URL: %s", "http://example.com")
//
// By default, loggers are unbuffered. However, since zap's low-level APIs
// allow buffering, calling Sync before letting your process exit is a good
@ -57,32 +58,35 @@
// In the rare contexts where every microsecond and every allocation matter,
// use the Logger. It's even faster than the SugaredLogger and allocates far
// less, but it only supports strongly-typed, structured logging.
// logger := zap.NewExample()
// defer logger.Sync()
// logger.Info("failed to fetch URL",
// zap.String("url", "http://example.com"),
// zap.Int("attempt", 3),
// zap.Duration("backoff", time.Second),
// )
//
// logger := zap.NewExample()
// defer logger.Sync()
// logger.Info("failed to fetch URL",
// zap.String("url", "http://example.com"),
// zap.Int("attempt", 3),
// zap.Duration("backoff", time.Second),
// )
//
// Choosing between the Logger and SugaredLogger doesn't need to be an
// application-wide decision: converting between the two is simple and
// inexpensive.
// logger := zap.NewExample()
// defer logger.Sync()
// sugar := logger.Sugar()
// plain := sugar.Desugar()
//
// Configuring Zap
// logger := zap.NewExample()
// defer logger.Sync()
// sugar := logger.Sugar()
// plain := sugar.Desugar()
//
// # Configuring Zap
//
// The simplest way to build a Logger is to use zap's opinionated presets:
// NewExample, NewProduction, and NewDevelopment. These presets build a logger
// with a single function call:
// logger, err := zap.NewProduction()
// if err != nil {
// log.Fatalf("can't initialize zap logger: %v", err)
// }
// defer logger.Sync()
//
// logger, err := zap.NewProduction()
// if err != nil {
// log.Fatalf("can't initialize zap logger: %v", err)
// }
// defer logger.Sync()
//
// Presets are fine for small projects, but larger projects and organizations
// naturally require a bit more customization. For most users, zap's Config
@ -94,7 +98,7 @@
// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
// example for sample code.
//
// Extending Zap
// # Extending Zap
//
// The zap package itself is a relatively thin wrapper around the interfaces
// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
@ -106,7 +110,7 @@
// Similarly, package authors can use the high-performance Encoder and Core
// implementations in the zapcore package to build their own loggers.
//
// Frequently Asked Questions
// # Frequently Asked Questions
//
// An FAQ covering everything from installation errors to design decisions is
// available at https://github.com/uber-go/zap/blob/master/FAQ.md.

2
vendor/go.uber.org/zap/encoder.go generated vendored
View File

@ -63,7 +63,7 @@ func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapco
func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
return nil, fmt.Errorf("missing EncodeTime in EncoderConfig")
return nil, errors.New("missing EncodeTime in EncoderConfig")
}
_encoderMutex.RLock()

1
vendor/go.uber.org/zap/global.go generated vendored
View File

@ -31,6 +31,7 @@ import (
)
const (
_stdLogDefaultDepth = 1
_loggerWriterDepth = 2
_programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
"https://github.com/uber-go/zap/issues/new and reference this error: %v"

View File

@ -22,6 +22,7 @@ package zap
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@ -32,22 +33,23 @@ import (
// ServeHTTP is a simple JSON endpoint that can report on or change the current
// logging level.
//
// GET
// # GET
//
// The GET request returns a JSON description of the current logging level like:
// {"level":"info"}
//
// PUT
// {"level":"info"}
//
// # PUT
//
// The PUT request changes the logging level. It is perfectly safe to change the
// logging level while a program is running. Two content types are supported:
//
// Content-Type: application/x-www-form-urlencoded
// Content-Type: application/x-www-form-urlencoded
//
// With this content type, the level can be provided through the request body or
// a query parameter. The log level is URL encoded like:
//
// level=debug
// level=debug
//
// The request body takes precedence over the query parameter, if both are
// specified.
@ -55,18 +57,17 @@ import (
// This content type is the default for a curl PUT request. Following are two
// example curl requests that both set the logging level to debug.
//
// curl -X PUT localhost:8080/log/level?level=debug
// curl -X PUT localhost:8080/log/level -d level=debug
// curl -X PUT localhost:8080/log/level?level=debug
// curl -X PUT localhost:8080/log/level -d level=debug
//
// For any other content type, the payload is expected to be JSON encoded and
// look like:
//
// {"level":"info"}
// {"level":"info"}
//
// An example curl request could look like this:
//
// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
//
// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
type errorResponse struct {
Error string `json:"error"`
@ -108,7 +109,7 @@ func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error
func decodePutURL(r *http.Request) (zapcore.Level, error) {
lvl := r.FormValue("level")
if lvl == "" {
return 0, fmt.Errorf("must specify logging level")
return 0, errors.New("must specify logging level")
}
var l zapcore.Level
if err := l.UnmarshalText([]byte(lvl)); err != nil {
@ -125,7 +126,7 @@ func decodePutJSON(body io.Reader) (zapcore.Level, error) {
return 0, fmt.Errorf("malformed request body: %v", err)
}
if pld.Level == nil {
return 0, fmt.Errorf("must specify logging level")
return 0, errors.New("must specify logging level")
}
return *pld.Level, nil

View File

@ -24,24 +24,25 @@ package exit
import "os"
var real = func() { os.Exit(1) }
var _exit = os.Exit
// Exit normally terminates the process by calling os.Exit(1). If the package
// is stubbed, it instead records a call in the testing spy.
func Exit() {
real()
// With terminates the process by calling os.Exit(code). If the package is
// stubbed, it instead records a call in the testing spy.
func With(code int) {
_exit(code)
}
// A StubbedExit is a testing fake for os.Exit.
type StubbedExit struct {
Exited bool
prev func()
Code int
prev func(code int)
}
// Stub substitutes a fake for the call to os.Exit(1).
func Stub() *StubbedExit {
s := &StubbedExit{prev: real}
real = s.exit
s := &StubbedExit{prev: _exit}
_exit = s.exit
return s
}
@ -56,9 +57,10 @@ func WithStub(f func()) *StubbedExit {
// Unstub restores the previous exit function.
func (se *StubbedExit) Unstub() {
real = se.prev
_exit = se.prev
}
func (se *StubbedExit) exit() {
func (se *StubbedExit) exit(code int) {
se.Exited = true
se.Code = code
}

View File

@ -1,4 +1,4 @@
// Copyright (c) 2019 Uber Technologies, Inc.
// Copyright (c) 2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -18,9 +18,18 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// See #682 for more information.
// +build go1.12
package internal
package zap
import "go.uber.org/zap/zapcore"
const _stdLogDefaultDepth = 1
// LeveledEnabler is an interface satisfied by LevelEnablers that are able to
// report their own level.
//
// This interface is defined to use more conveniently in tests and non-zapcore
// packages.
// This cannot be imported from zapcore because of the cyclic dependency.
type LeveledEnabler interface {
zapcore.LevelEnabler
Level() zapcore.Level
}

50
vendor/go.uber.org/zap/internal/ztest/clock.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package ztest
import (
"time"
"github.com/benbjohnson/clock"
)
// MockClock provides control over the time.
type MockClock struct{ m *clock.Mock }
// NewMockClock builds a new mock clock that provides control of time.
func NewMockClock() *MockClock {
return &MockClock{clock.NewMock()}
}
// Now reports the current time.
func (c *MockClock) Now() time.Time {
return c.m.Now()
}
// NewTicker returns a time.Ticker that ticks at the specified frequency.
func (c *MockClock) NewTicker(d time.Duration) *time.Ticker {
return &time.Ticker{C: c.m.Ticker(d).C}
}
// Add progresses time by the given duration.
func (c *MockClock) Add(d time.Duration) {
c.m.Add(d)
}

View File

@ -42,11 +42,11 @@ func Sleep(base time.Duration) {
// Initialize checks the environment and alters the timeout scale accordingly.
// It returns a function to undo the scaling.
func Initialize(factor string) func() {
original := _timeoutScale
fv, err := strconv.ParseFloat(factor, 64)
if err != nil {
panic(err)
}
original := _timeoutScale
_timeoutScale = fv
return func() { _timeoutScale = original }
}

View File

@ -23,7 +23,7 @@ package ztest
import (
"bytes"
"errors"
"io/ioutil"
"io"
"strings"
)
@ -50,12 +50,12 @@ func (s *Syncer) Called() bool {
return s.called
}
// A Discarder sends all writes to ioutil.Discard.
// A Discarder sends all writes to io.Discard.
type Discarder struct{ Syncer }
// Write implements io.Writer.
func (d *Discarder) Write(b []byte) (int, error) {
return ioutil.Discard.Write(b)
return io.Discard.Write(b)
}
// FailWriter is a WriteSyncer that always returns an error on writes.

20
vendor/go.uber.org/zap/level.go generated vendored
View File

@ -22,6 +22,7 @@ package zap
import (
"go.uber.org/atomic"
"go.uber.org/zap/internal"
"go.uber.org/zap/zapcore"
)
@ -70,6 +71,8 @@ type AtomicLevel struct {
l *atomic.Int32
}
var _ internal.LeveledEnabler = AtomicLevel{}
// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
// enabled.
func NewAtomicLevel() AtomicLevel {
@ -86,6 +89,23 @@ func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
return a
}
// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
// representation of the log level. If the provided ASCII representation is
// invalid an error is returned.
//
// This is particularly useful when dealing with text input to configure log
// levels.
func ParseAtomicLevel(text string) (AtomicLevel, error) {
a := NewAtomicLevel()
l, err := zapcore.ParseLevel(text)
if err != nil {
return a, err
}
a.SetLevel(l)
return a, nil
}
// Enabled implements the zapcore.LevelEnabler interface, which allows the
// AtomicLevel to be used in place of traditional static levels.
func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {

126
vendor/go.uber.org/zap/logger.go generated vendored
View File

@ -22,11 +22,11 @@ package zap
import (
"fmt"
"io/ioutil"
"io"
"os"
"runtime"
"strings"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/zapcore"
)
@ -42,7 +42,7 @@ type Logger struct {
development bool
addCaller bool
onFatal zapcore.CheckWriteAction // default is WriteThenFatal
onFatal zapcore.CheckWriteHook // default is WriteThenFatal
name string
errorOutput zapcore.WriteSyncer
@ -85,7 +85,7 @@ func New(core zapcore.Core, options ...Option) *Logger {
func NewNop() *Logger {
return &Logger{
core: zapcore.NewNopCore(),
errorOutput: zapcore.AddSync(ioutil.Discard),
errorOutput: zapcore.AddSync(io.Discard),
addStack: zapcore.FatalLevel + 1,
clock: zapcore.DefaultClock,
}
@ -107,6 +107,19 @@ func NewDevelopment(options ...Option) (*Logger, error) {
return NewDevelopmentConfig().Build(options...)
}
// Must is a helper that wraps a call to a function returning (*Logger, error)
// and panics if the error is non-nil. It is intended for use in variable
// initialization such as:
//
// var logger = zap.Must(zap.NewProduction())
func Must(logger *Logger, err error) *Logger {
if err != nil {
panic(err)
}
return logger
}
// NewExample builds a Logger that's designed for use in zap's testable
// examples. It writes DebugLevel and above logs to standard out as JSON, but
// omits the timestamp and calling function to keep example output
@ -170,6 +183,13 @@ func (log *Logger) With(fields ...Field) *Logger {
return l
}
// Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
func (log *Logger) Level() zapcore.Level {
return zapcore.LevelOf(log.core)
}
// Check returns a CheckedEntry if logging a message at the specified level
// is enabled. It's a completely optional optimization; in high-performance
// applications, Check can help avoid allocating a slice to hold fields.
@ -177,6 +197,14 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
return log.check(lvl, msg)
}
// Log logs a message at the specified level. The message includes any fields
// passed at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
if ce := log.check(lvl, msg); ce != nil {
ce.Write(fields...)
}
}
// Debug logs a message at DebugLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Debug(msg string, fields ...Field) {
@ -259,8 +287,10 @@ func (log *Logger) clone() *Logger {
}
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// check must always be called directly by a method in the Logger interface
// (e.g., Check, Info, Fatal).
// Logger.check must always be called directly by a method in the
// Logger interface (e.g., Check, Info, Fatal).
// This skips Logger.check and the Info/Fatal/Check/etc. method that
// called it.
const callerSkipOffset = 2
// Check the level first to reduce the cost of disabled log calls.
@ -283,18 +313,27 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Set up any required terminal behavior.
switch ent.Level {
case zapcore.PanicLevel:
ce = ce.Should(ent, zapcore.WriteThenPanic)
ce = ce.After(ent, zapcore.WriteThenPanic)
case zapcore.FatalLevel:
onFatal := log.onFatal
// Noop is the default value for CheckWriteAction, and it leads to
// continued execution after a Fatal which is unexpected.
if onFatal == zapcore.WriteThenNoop {
// nil or WriteThenNoop will lead to continued execution after
// a Fatal log entry, which is unexpected. For example,
//
// f, err := os.Open(..)
// if err != nil {
// log.Fatal("cannot open", zap.Error(err))
// }
// fmt.Println(f.Name())
//
// The f.Name() will panic if we continue execution after the
// log.Fatal.
if onFatal == nil || onFatal == zapcore.WriteThenNoop {
onFatal = zapcore.WriteThenFatal
}
ce = ce.Should(ent, onFatal)
ce = ce.After(ent, onFatal)
case zapcore.DPanicLevel:
if log.development {
ce = ce.Should(ent, zapcore.WriteThenPanic)
ce = ce.After(ent, zapcore.WriteThenPanic)
}
}
@ -307,42 +346,55 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Thread the error output through to the CheckedEntry.
ce.ErrorOutput = log.errorOutput
if log.addCaller {
frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset)
if !defined {
addStack := log.addStack.Enabled(ce.Level)
if !log.addCaller && !addStack {
return ce
}
// Adding the caller or stack trace requires capturing the callers of
// this function. We'll share information between these two.
stackDepth := stacktraceFirst
if addStack {
stackDepth = stacktraceFull
}
stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth)
defer stack.Free()
if stack.Count() == 0 {
if log.addCaller {
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
log.errorOutput.Sync()
}
return ce
}
ce.Entry.Caller = zapcore.EntryCaller{
Defined: defined,
frame, more := stack.Next()
if log.addCaller {
ce.Caller = zapcore.EntryCaller{
Defined: frame.PC != 0,
PC: frame.PC,
File: frame.File,
Line: frame.Line,
Function: frame.Function,
}
}
if log.addStack.Enabled(ce.Entry.Level) {
ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String
if addStack {
buffer := bufferpool.Get()
defer buffer.Free()
stackfmt := newStackFormatter(buffer)
// We've already extracted the first frame, so format that
// separately and defer to stackfmt for the rest.
stackfmt.FormatFrame(frame)
if more {
stackfmt.FormatStack(stack)
}
ce.Stack = buffer.String()
}
return ce
}
// getCallerFrame gets caller frame. The argument skip is the number of stack
// frames to ascend, with 0 identifying the caller of getCallerFrame. The
// boolean ok is false if it was not possible to recover the information.
//
// Note: This implementation is similar to runtime.Caller, but it returns the whole frame.
func getCallerFrame(skip int) (frame runtime.Frame, ok bool) {
const skipOffset = 2 // skip getCallerFrame and Callers
pc := make([]uintptr, 1)
numFrames := runtime.Callers(skip+skipOffset, pc)
if numFrames < 1 {
return
}
frame, _ = runtime.CallersFrames(pc).Next()
return frame, frame.PC != 0
}

21
vendor/go.uber.org/zap/options.go generated vendored
View File

@ -133,9 +133,28 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
}
// OnFatal sets the action to take on fatal logs.
//
// Deprecated: Use [WithFatalHook] instead.
func OnFatal(action zapcore.CheckWriteAction) Option {
return WithFatalHook(action)
}
// WithFatalHook sets a CheckWriteHook to run on fatal logs.
// Zap will call this hook after writing a log statement with a Fatal level.
//
// For example, the following builds a logger that will exit the current
// goroutine after writing a fatal log message, but it will not exit the
// program.
//
// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
//
// It is important that the provided CheckWriteHook stops the control flow at
// the current statement to meet expectations of callers of the logger.
// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
// minimum.
func WithFatalHook(hook zapcore.CheckWriteHook) Option {
return optionFunc(func(log *Logger) {
log.onFatal = action
log.onFatal = hook
})
}

100
vendor/go.uber.org/zap/sink.go generated vendored
View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -26,6 +26,7 @@ import (
"io"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
@ -34,23 +35,7 @@ import (
const schemeFile = "file"
var (
_sinkMutex sync.RWMutex
_sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
)
func init() {
resetSinkRegistry()
}
func resetSinkRegistry() {
_sinkMutex.Lock()
defer _sinkMutex.Unlock()
_sinkFactories = map[string]func(*url.URL) (Sink, error){
schemeFile: newFileSink,
}
}
var _sinkRegistry = newSinkRegistry()
// Sink defines the interface to write to and close logger destinations.
type Sink interface {
@ -58,10 +43,6 @@ type Sink interface {
io.Closer
}
type nopCloserSink struct{ zapcore.WriteSyncer }
func (nopCloserSink) Close() error { return nil }
type errSinkNotFound struct {
scheme string
}
@ -70,16 +51,29 @@ func (e *errSinkNotFound) Error() string {
return fmt.Sprintf("no sink found for scheme %q", e.scheme)
}
// RegisterSink registers a user-supplied factory for all sinks with a
// particular scheme.
//
// All schemes must be ASCII, valid under section 3.1 of RFC 3986
// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already
// have a factory registered. Zap automatically registers a factory for the
// "file" scheme.
func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
_sinkMutex.Lock()
defer _sinkMutex.Unlock()
type nopCloserSink struct{ zapcore.WriteSyncer }
func (nopCloserSink) Close() error { return nil }
type sinkRegistry struct {
mu sync.Mutex
factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
}
func newSinkRegistry() *sinkRegistry {
sr := &sinkRegistry{
factories: make(map[string]func(*url.URL) (Sink, error)),
openFile: os.OpenFile,
}
sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
return sr
}
// RegisterScheme registers the given factory for the specific scheme.
func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
sr.mu.Lock()
defer sr.mu.Unlock()
if scheme == "" {
return errors.New("can't register a sink factory for empty string")
@ -88,14 +82,22 @@ func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
if err != nil {
return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
}
if _, ok := _sinkFactories[normalized]; ok {
if _, ok := sr.factories[normalized]; ok {
return fmt.Errorf("sink factory already registered for scheme %q", normalized)
}
_sinkFactories[normalized] = factory
sr.factories[normalized] = factory
return nil
}
func newSink(rawURL string) (Sink, error) {
func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
// URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
// the drive, and path is unset unless `c:/log.txt` is used.
// To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
// filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
if filepath.IsAbs(rawURL) {
return sr.newFileSinkFromPath(rawURL)
}
u, err := url.Parse(rawURL)
if err != nil {
return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
@ -104,16 +106,27 @@ func newSink(rawURL string) (Sink, error) {
u.Scheme = schemeFile
}
_sinkMutex.RLock()
factory, ok := _sinkFactories[u.Scheme]
_sinkMutex.RUnlock()
sr.mu.Lock()
factory, ok := sr.factories[u.Scheme]
sr.mu.Unlock()
if !ok {
return nil, &errSinkNotFound{u.Scheme}
}
return factory(u)
}
func newFileSink(u *url.URL) (Sink, error) {
// RegisterSink registers a user-supplied factory for all sinks with a
// particular scheme.
//
// All schemes must be ASCII, valid under section 0.1 of RFC 3986
// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
// have a factory registered. Zap automatically registers a factory for the
// "file" scheme.
func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
return _sinkRegistry.RegisterSink(scheme, factory)
}
func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
if u.User != nil {
return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
}
@ -130,13 +143,18 @@ func newFileSink(u *url.URL) (Sink, error) {
if hn := u.Hostname(); hn != "" && hn != "localhost" {
return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
}
switch u.Path {
return sr.newFileSinkFromPath(u.Path)
}
func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
switch path {
case "stdout":
return nopCloserSink{os.Stdout}, nil
case "stderr":
return nopCloserSink{os.Stderr}, nil
}
return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
}
func normalizeScheme(s string) (string, error) {

179
vendor/go.uber.org/zap/stacktrace.go generated vendored
View File

@ -24,62 +24,153 @@ import (
"runtime"
"sync"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
)
var (
_stacktracePool = sync.Pool{
New: func() interface{} {
return newProgramCounters(64)
},
}
var _stacktracePool = sync.Pool{
New: func() interface{} {
return &stacktrace{
storage: make([]uintptr, 64),
}
},
}
type stacktrace struct {
pcs []uintptr // program counters; always a subslice of storage
frames *runtime.Frames
// The size of pcs varies depending on requirements:
// it will be one if the only the first frame was requested,
// and otherwise it will reflect the depth of the call stack.
//
// storage decouples the slice we need (pcs) from the slice we pool.
// We will always allocate a reasonably large storage, but we'll use
// only as much of it as we need.
storage []uintptr
}
// stacktraceDepth specifies how deep of a stack trace should be captured.
type stacktraceDepth int
const (
// stacktraceFirst captures only the first frame.
stacktraceFirst stacktraceDepth = iota
// stacktraceFull captures the entire call stack, allocating more
// storage for it if needed.
stacktraceFull
)
// captureStacktrace captures a stack trace of the specified depth, skipping
// the provided number of frames. skip=0 identifies the caller of
// captureStacktrace.
//
// The caller must call Free on the returned stacktrace after using it.
func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
stack := _stacktracePool.Get().(*stacktrace)
switch depth {
case stacktraceFirst:
stack.pcs = stack.storage[:1]
case stacktraceFull:
stack.pcs = stack.storage
}
// Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
// itself. +2 to skip captureStacktrace and runtime.Callers.
numFrames := runtime.Callers(
skip+2,
stack.pcs,
)
// runtime.Callers truncates the recorded stacktrace if there is no
// room in the provided slice. For the full stack trace, keep expanding
// storage until there are fewer frames than there is room.
if depth == stacktraceFull {
pcs := stack.pcs
for numFrames == len(pcs) {
pcs = make([]uintptr, len(pcs)*2)
numFrames = runtime.Callers(skip+2, pcs)
}
// Discard old storage instead of returning it to the pool.
// This will adjust the pool size over time if stack traces are
// consistently very deep.
stack.storage = pcs
stack.pcs = pcs[:numFrames]
} else {
stack.pcs = stack.pcs[:numFrames]
}
stack.frames = runtime.CallersFrames(stack.pcs)
return stack
}
// Free releases resources associated with this stacktrace
// and returns it back to the pool.
func (st *stacktrace) Free() {
st.frames = nil
st.pcs = nil
_stacktracePool.Put(st)
}
// Count reports the total number of frames in this stacktrace.
// Count DOES NOT change as Next is called.
func (st *stacktrace) Count() int {
return len(st.pcs)
}
// Next returns the next frame in the stack trace,
// and a boolean indicating whether there are more after it.
func (st *stacktrace) Next() (_ runtime.Frame, more bool) {
return st.frames.Next()
}
func takeStacktrace(skip int) string {
stack := captureStacktrace(skip+1, stacktraceFull)
defer stack.Free()
buffer := bufferpool.Get()
defer buffer.Free()
programCounters := _stacktracePool.Get().(*programCounters)
defer _stacktracePool.Put(programCounters)
var numFrames int
for {
// Skip the call to runtime.Callers and takeStacktrace so that the
// program counters start at the caller of takeStacktrace.
numFrames = runtime.Callers(skip+2, programCounters.pcs)
if numFrames < len(programCounters.pcs) {
break
}
// Don't put the too-short counter slice back into the pool; this lets
// the pool adjust if we consistently take deep stacktraces.
programCounters = newProgramCounters(len(programCounters.pcs) * 2)
}
i := 0
frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
// Note: On the last iteration, frames.Next() returns false, with a valid
// frame, but we ignore this frame. The last frame is a a runtime frame which
// adds noise, since it's only either runtime.main or runtime.goexit.
for frame, more := frames.Next(); more; frame, more = frames.Next() {
if i != 0 {
buffer.AppendByte('\n')
}
i++
buffer.AppendString(frame.Function)
buffer.AppendByte('\n')
buffer.AppendByte('\t')
buffer.AppendString(frame.File)
buffer.AppendByte(':')
buffer.AppendInt(int64(frame.Line))
}
stackfmt := newStackFormatter(buffer)
stackfmt.FormatStack(stack)
return buffer.String()
}
type programCounters struct {
pcs []uintptr
// stackFormatter formats a stack trace into a readable string representation.
type stackFormatter struct {
b *buffer.Buffer
nonEmpty bool // whehther we've written at least one frame already
}
func newProgramCounters(size int) *programCounters {
return &programCounters{make([]uintptr, size)}
// newStackFormatter builds a new stackFormatter.
func newStackFormatter(b *buffer.Buffer) stackFormatter {
return stackFormatter{b: b}
}
// FormatStack formats all remaining frames in the provided stacktrace -- minus
// the final runtime.main/runtime.goexit frame.
func (sf *stackFormatter) FormatStack(stack *stacktrace) {
// Note: On the last iteration, frames.Next() returns false, with a valid
// frame, but we ignore this frame. The last frame is a runtime frame which
// adds noise, since it's only either runtime.main or runtime.goexit.
for frame, more := stack.Next(); more; frame, more = stack.Next() {
sf.FormatFrame(frame)
}
}
// FormatFrame formats the given frame.
func (sf *stackFormatter) FormatFrame(frame runtime.Frame) {
if sf.nonEmpty {
sf.b.AppendByte('\n')
}
sf.nonEmpty = true
sf.b.AppendString(frame.Function)
sf.b.AppendByte('\n')
sf.b.AppendByte('\t')
sf.b.AppendString(frame.File)
sf.b.AppendByte(':')
sf.b.AppendInt(int64(frame.Line))
}

147
vendor/go.uber.org/zap/sugar.go generated vendored
View File

@ -31,6 +31,7 @@ import (
const (
_oddNumberErrMsg = "Ignored key without a value."
_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
_multipleErrMsg = "Multiple errors without a key."
)
// A SugaredLogger wraps the base Logger functionality in a slower, but less
@ -38,10 +39,19 @@ const (
// method.
//
// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
// For each log level, it exposes three methods: one for loosely-typed
// structured logging, one for println-style formatting, and one for
// printf-style formatting. For example, SugaredLoggers can produce InfoLevel
// output with Infow ("info with" structured context), Info, or Infof.
// For each log level, it exposes four methods:
//
// - methods named after the log level for log.Print-style logging
// - methods ending in "w" for loosely-typed structured logging
// - methods ending in "f" for log.Printf-style logging
// - methods ending in "ln" for log.Println-style logging
//
// For example, the methods for InfoLevel are:
//
// Info(...any) Print-style logging
// Infow(...any) Structured logging (read as "info with")
// Infof(string, ...any) Printf-style logging
// Infoln(...any) Println-style logging
type SugaredLogger struct {
base *Logger
}
@ -61,27 +71,40 @@ func (s *SugaredLogger) Named(name string) *SugaredLogger {
return &SugaredLogger{base: s.base.Named(name)}
}
// WithOptions clones the current SugaredLogger, applies the supplied Options,
// and returns the result. It's safe to use concurrently.
func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
base := s.base.clone()
for _, opt := range opts {
opt.apply(base)
}
return &SugaredLogger{base: base}
}
// With adds a variadic number of fields to the logging context. It accepts a
// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
// processing pairs, the first element of the pair is used as the field key
// and the second as the field value.
//
// For example,
// sugaredLogger.With(
// "hello", "world",
// "failure", errors.New("oh no"),
// Stack(),
// "count", 42,
// "user", User{Name: "alice"},
// )
//
// sugaredLogger.With(
// "hello", "world",
// "failure", errors.New("oh no"),
// Stack(),
// "count", 42,
// "user", User{Name: "alice"},
// )
//
// is the equivalent of
// unsugared.With(
// String("hello", "world"),
// String("failure", "oh no"),
// Stack(),
// Int("count", 42),
// Object("user", User{Name: "alice"}),
// )
//
// unsugared.With(
// String("hello", "world"),
// String("failure", "oh no"),
// Stack(),
// Int("count", 42),
// Object("user", User{Name: "alice"}),
// )
//
// Note that the keys in key-value pairs should be strings. In development,
// passing a non-string key panics. In production, the logger is more
@ -92,6 +115,13 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
}
// Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
func (s *SugaredLogger) Level() zapcore.Level {
return zapcore.LevelOf(s.base.core)
}
// Debug uses fmt.Sprint to construct and log a message.
func (s *SugaredLogger) Debug(args ...interface{}) {
s.log(DebugLevel, "", args, nil)
@ -168,7 +198,8 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
// pairs are treated as they are in With.
//
// When debug-level logging is disabled, this is much faster than
// s.With(keysAndValues).Debug(msg)
//
// s.With(keysAndValues).Debug(msg)
func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
s.log(DebugLevel, msg, nil, keysAndValues)
}
@ -210,11 +241,48 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
s.log(FatalLevel, msg, nil, keysAndValues)
}
// Debugln uses fmt.Sprintln to construct and log a message.
func (s *SugaredLogger) Debugln(args ...interface{}) {
s.logln(DebugLevel, args, nil)
}
// Infoln uses fmt.Sprintln to construct and log a message.
func (s *SugaredLogger) Infoln(args ...interface{}) {
s.logln(InfoLevel, args, nil)
}
// Warnln uses fmt.Sprintln to construct and log a message.
func (s *SugaredLogger) Warnln(args ...interface{}) {
s.logln(WarnLevel, args, nil)
}
// Errorln uses fmt.Sprintln to construct and log a message.
func (s *SugaredLogger) Errorln(args ...interface{}) {
s.logln(ErrorLevel, args, nil)
}
// DPanicln uses fmt.Sprintln to construct and log a message. In development, the
// logger then panics. (See DPanicLevel for details.)
func (s *SugaredLogger) DPanicln(args ...interface{}) {
s.logln(DPanicLevel, args, nil)
}
// Panicln uses fmt.Sprintln to construct and log a message, then panics.
func (s *SugaredLogger) Panicln(args ...interface{}) {
s.logln(PanicLevel, args, nil)
}
// Fatalln uses fmt.Sprintln to construct and log a message, then calls os.Exit.
func (s *SugaredLogger) Fatalln(args ...interface{}) {
s.logln(FatalLevel, args, nil)
}
// Sync flushes any buffered log entries.
func (s *SugaredLogger) Sync() error {
return s.base.Sync()
}
// log message with Sprint, Sprintf, or neither.
func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
// If logging at this level is completely disabled, skip the overhead of
// string formatting.
@ -228,6 +296,18 @@ func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interf
}
}
// logln message with Sprintln
func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
return
}
msg := getMessageln(fmtArgs)
if ce := s.base.Check(lvl, msg); ce != nil {
ce.Write(s.sweetenFields(context)...)
}
}
// getMessage format with Sprint, Sprintf, or neither.
func getMessage(template string, fmtArgs []interface{}) string {
if len(fmtArgs) == 0 {
@ -246,15 +326,24 @@ func getMessage(template string, fmtArgs []interface{}) string {
return fmt.Sprint(fmtArgs...)
}
// getMessageln format with Sprintln.
func getMessageln(fmtArgs []interface{}) string {
msg := fmt.Sprintln(fmtArgs...)
return msg[:len(msg)-1]
}
func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
if len(args) == 0 {
return nil
}
// Allocate enough space for the worst case; if users pass only structured
// fields, we shouldn't penalize them with extra allocations.
fields := make([]Field, 0, len(args))
var invalid invalidPairs
var (
// Allocate enough space for the worst case; if users pass only structured
// fields, we shouldn't penalize them with extra allocations.
fields = make([]Field, 0, len(args))
invalid invalidPairs
seenError bool
)
for i := 0; i < len(args); {
// This is a strongly-typed field. Consume it and move on.
@ -264,6 +353,18 @@ func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
continue
}
// If it is an error, consume it and move on.
if err, ok := args[i].(error); ok {
if !seenError {
seenError = true
fields = append(fields, Error(err))
} else {
s.base.Error(_multipleErrMsg, Error(err))
}
i++
continue
}
// Make sure this element isn't a dangling key.
if i == len(args)-1 {
s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))

11
vendor/go.uber.org/zap/writer.go generated vendored
View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -23,7 +23,6 @@ package zap
import (
"fmt"
"io"
"io/ioutil"
"go.uber.org/zap/zapcore"
@ -69,9 +68,9 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
var openErr error
for _, path := range paths {
sink, err := newSink(path)
sink, err := _sinkRegistry.newSink(path)
if err != nil {
openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
continue
}
writers = append(writers, sink)
@ -79,7 +78,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
}
if openErr != nil {
close()
return writers, nil, openErr
return nil, nil, openErr
}
return writers, close, nil
@ -93,7 +92,7 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
if len(writers) == 0 {
return zapcore.AddSync(ioutil.Discard)
return zapcore.AddSync(io.Discard)
}
return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
}

View File

@ -43,6 +43,37 @@ const (
//
// BufferedWriteSyncer is safe for concurrent use. You don't need to use
// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
//
// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
// destination (*os.File is a valid WriteSyncer), wrap it with
// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
// object.
//
// func main() {
// ws := ... // your log destination
// bws := &zapcore.BufferedWriteSyncer{WS: ws}
// defer bws.Stop()
//
// // ...
// core := zapcore.NewCore(enc, bws, lvl)
// logger := zap.New(core)
//
// // ...
// }
//
// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
// waiting at most 30 seconds between flushes.
// You can customize these parameters by setting the Size or FlushInterval
// fields.
// For example, the following buffers up to 512 kB of logs before flushing them
// to Stderr, with a maximum of one minute between each flush.
//
// ws := &BufferedWriteSyncer{
// WS: os.Stderr,
// Size: 512 * 1024, // 512 kB
// FlushInterval: time.Minute,
// }
// defer ws.Stop()
type BufferedWriteSyncer struct {
// WS is the WriteSyncer around which BufferedWriteSyncer will buffer
// writes.

View File

@ -20,9 +20,7 @@
package zapcore
import (
"time"
)
import "time"
// DefaultClock is the default clock used by Zap in operations that require
// time. This clock uses the system clock for all operations.

View File

@ -125,11 +125,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
line.AppendString(ent.Stack)
}
if c.LineEnding != "" {
line.AppendString(c.LineEnding)
} else {
line.AppendString(DefaultLineEnding)
}
line.AppendString(c.LineEnding)
return line, nil
}

View File

@ -69,6 +69,15 @@ type ioCore struct {
out WriteSyncer
}
var (
_ Core = (*ioCore)(nil)
_ leveledEnabler = (*ioCore)(nil)
)
func (c *ioCore) Level() Level {
return LevelOf(c.LevelEnabler)
}
func (c *ioCore) With(fields []Field) Core {
clone := c.clone()
addFields(clone.enc, fields)

View File

@ -22,6 +22,7 @@ package zapcore
import (
"encoding/json"
"io"
"time"
"go.uber.org/zap/buffer"
@ -187,10 +188,13 @@ func (e *TimeEncoder) UnmarshalText(text []byte) error {
// UnmarshalYAML unmarshals YAML to a TimeEncoder.
// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
// timeEncoder:
// layout: 06/01/02 03:04pm
//
// timeEncoder:
// layout: 06/01/02 03:04pm
//
// If value is string, it uses UnmarshalText.
// timeEncoder: iso8601
//
// timeEncoder: iso8601
func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
var o struct {
Layout string `json:"layout" yaml:"layout"`
@ -312,14 +316,15 @@ func (e *NameEncoder) UnmarshalText(text []byte) error {
type EncoderConfig struct {
// Set the keys used for each log entry. If any key is empty, that portion
// of the entry is omitted.
MessageKey string `json:"messageKey" yaml:"messageKey"`
LevelKey string `json:"levelKey" yaml:"levelKey"`
TimeKey string `json:"timeKey" yaml:"timeKey"`
NameKey string `json:"nameKey" yaml:"nameKey"`
CallerKey string `json:"callerKey" yaml:"callerKey"`
FunctionKey string `json:"functionKey" yaml:"functionKey"`
StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
LineEnding string `json:"lineEnding" yaml:"lineEnding"`
MessageKey string `json:"messageKey" yaml:"messageKey"`
LevelKey string `json:"levelKey" yaml:"levelKey"`
TimeKey string `json:"timeKey" yaml:"timeKey"`
NameKey string `json:"nameKey" yaml:"nameKey"`
CallerKey string `json:"callerKey" yaml:"callerKey"`
FunctionKey string `json:"functionKey" yaml:"functionKey"`
StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
LineEnding string `json:"lineEnding" yaml:"lineEnding"`
// Configure the primitive representations of common complex types. For
// example, some users may want all time.Times serialized as floating-point
// seconds since epoch, while others may prefer ISO8601 strings.
@ -330,6 +335,9 @@ type EncoderConfig struct {
// Unlike the other primitive type encoders, EncodeName is optional. The
// zero value falls back to FullNameEncoder.
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
// Configure the encoder for interface{} type objects.
// If not provided, objects are encoded using json.Encoder
NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
// Configures the field separator used by the console encoder. Defaults
// to tab.
ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`

View File

@ -27,10 +27,9 @@ import (
"sync"
"time"
"go.uber.org/multierr"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/exit"
"go.uber.org/multierr"
)
var (
@ -152,6 +151,27 @@ type Entry struct {
Stack string
}
// CheckWriteHook is a custom action that may be executed after an entry is
// written.
//
// Register one on a CheckedEntry with the After method.
//
// if ce := logger.Check(...); ce != nil {
// ce = ce.After(hook)
// ce.Write(...)
// }
//
// You can configure the hook for Fatal log statements at the logger level with
// the zap.WithFatalHook option.
type CheckWriteHook interface {
// OnWrite is invoked with the CheckedEntry that was written and a list
// of fields added with that entry.
//
// The list of fields DOES NOT include fields that were already added
// to the logger with the With method.
OnWrite(*CheckedEntry, []Field)
}
// CheckWriteAction indicates what action to take after a log entry is
// processed. Actions are ordered in increasing severity.
type CheckWriteAction uint8
@ -164,21 +184,36 @@ const (
WriteThenGoexit
// WriteThenPanic causes a panic after Write.
WriteThenPanic
// WriteThenFatal causes a fatal os.Exit after Write.
// WriteThenFatal causes an os.Exit(1) after Write.
WriteThenFatal
)
// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
// with the new CheckWriteHook interface which deprecates CheckWriteAction.
func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
switch a {
case WriteThenGoexit:
runtime.Goexit()
case WriteThenPanic:
panic(ce.Message)
case WriteThenFatal:
exit.With(1)
}
}
var _ CheckWriteHook = CheckWriteAction(0)
// CheckedEntry is an Entry together with a collection of Cores that have
// already agreed to log it.
//
// CheckedEntry references should be created by calling AddCore or Should on a
// CheckedEntry references should be created by calling AddCore or After on a
// nil *CheckedEntry. References are returned to a pool after Write, and MUST
// NOT be retained after calling their Write method.
type CheckedEntry struct {
Entry
ErrorOutput WriteSyncer
dirty bool // best-effort detection of pool misuse
should CheckWriteAction
after CheckWriteHook
cores []Core
}
@ -186,7 +221,7 @@ func (ce *CheckedEntry) reset() {
ce.Entry = Entry{}
ce.ErrorOutput = nil
ce.dirty = false
ce.should = WriteThenNoop
ce.after = nil
for i := range ce.cores {
// don't keep references to cores
ce.cores[i] = nil
@ -224,17 +259,11 @@ func (ce *CheckedEntry) Write(fields ...Field) {
ce.ErrorOutput.Sync()
}
should, msg := ce.should, ce.Message
putCheckedEntry(ce)
switch should {
case WriteThenPanic:
panic(msg)
case WriteThenFatal:
exit.Exit()
case WriteThenGoexit:
runtime.Goexit()
hook := ce.after
if hook != nil {
hook.OnWrite(ce, fields)
}
putCheckedEntry(ce)
}
// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
@ -252,11 +281,20 @@ func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
// Core will panic or fatal after writing this log entry. Like AddCore, it's
// safe to call on nil CheckedEntry references.
//
// Deprecated: Use [CheckedEntry.After] instead.
func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
return ce.After(ent, should)
}
// After sets this CheckEntry's CheckWriteHook, which will be called after this
// log entry has been written. It's safe to call this on nil CheckedEntry
// references.
func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
if ce == nil {
ce = getCheckedEntry()
ce.Entry = ent
}
ce.should = should
ce.after = hook
return ce
}

View File

@ -36,13 +36,13 @@ import (
// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
// array of objects containing the errors this error was comprised of.
//
// {
// "error": err.Error(),
// "errorVerbose": fmt.Sprintf("%+v", err),
// "errorCauses": [
// ...
// ],
// }
// {
// "error": err.Error(),
// "errorVerbose": fmt.Sprintf("%+v", err),
// "errorCauses": [
// ...
// ],
// }
func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
// Try to capture panics (from nil references or otherwise) when calling
// the Error() method

View File

@ -27,6 +27,11 @@ type hooked struct {
funcs []func(Entry) error
}
var (
_ Core = (*hooked)(nil)
_ leveledEnabler = (*hooked)(nil)
)
// RegisterHooks wraps a Core and runs a collection of user-defined callback
// hooks each time a message is logged. Execution of the callbacks is blocking.
//
@ -40,6 +45,10 @@ func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
}
}
func (h *hooked) Level() Level {
return LevelOf(h.Core)
}
func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
// Let the wrapped Core decide whether to log this message or not. This
// also gives the downstream a chance to register itself directly with the

View File

@ -27,6 +27,11 @@ type levelFilterCore struct {
level LevelEnabler
}
var (
_ Core = (*levelFilterCore)(nil)
_ leveledEnabler = (*levelFilterCore)(nil)
)
// NewIncreaseLevelCore creates a core that can be used to increase the level of
// an existing Core. It cannot be used to decrease the logging level, as it acts
// as a filter before calling the underlying core. If level decreases the log level,
@ -45,6 +50,10 @@ func (c *levelFilterCore) Enabled(lvl Level) bool {
return c.level.Enabled(lvl)
}
func (c *levelFilterCore) Level() Level {
return LevelOf(c.level)
}
func (c *levelFilterCore) With(fields []Field) Core {
return &levelFilterCore{c.core.With(fields), c.level}
}

View File

@ -22,7 +22,6 @@ package zapcore
import (
"encoding/base64"
"encoding/json"
"math"
"sync"
"time"
@ -64,7 +63,7 @@ type jsonEncoder struct {
// for encoding generic values by reflection
reflectBuf *buffer.Buffer
reflectEnc *json.Encoder
reflectEnc ReflectedEncoder
}
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
@ -72,7 +71,9 @@ type jsonEncoder struct {
//
// Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like
// {"foo":"bar","foo":"baz"}
//
// {"foo":"bar","foo":"baz"}
//
// This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate
@ -82,6 +83,17 @@ func NewJSONEncoder(cfg EncoderConfig) Encoder {
}
func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
if cfg.SkipLineEnding {
cfg.LineEnding = ""
} else if cfg.LineEnding == "" {
cfg.LineEnding = DefaultLineEnding
}
// If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
if cfg.NewReflectedEncoder == nil {
cfg.NewReflectedEncoder = defaultReflectedEncoder
}
return &jsonEncoder{
EncoderConfig: &cfg,
buf: bufferpool.Get(),
@ -118,6 +130,11 @@ func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
enc.AppendComplex128(val)
}
func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
enc.addKey(key)
enc.AppendComplex64(val)
}
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
@ -141,10 +158,7 @@ func (enc *jsonEncoder) AddInt64(key string, val int64) {
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = bufferpool.Get()
enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
// For consistency with our custom JSON encoder.
enc.reflectEnc.SetEscapeHTML(false)
enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
} else {
enc.reflectBuf.Reset()
}
@ -206,10 +220,16 @@ func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
}
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
// Close ONLY new openNamespaces that are created during
// AppendObject().
old := enc.openNamespaces
enc.openNamespaces = 0
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
enc.closeOpenNamespaces()
enc.openNamespaces = old
return err
}
@ -225,20 +245,23 @@ func (enc *jsonEncoder) AppendByteString(val []byte) {
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendComplex128(val complex128) {
// appendComplex appends the encoded form of the provided complex128 value.
// precision specifies the encoding precision for the real and imaginary
// components of the complex number.
func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
enc.buf.AppendFloat(r, 64)
enc.buf.AppendFloat(r, precision)
// If imaginary part is less than 0, minus (-) sign is added by default
// by AppendFloat.
if i >= 0 {
enc.buf.AppendByte('+')
}
enc.buf.AppendFloat(i, 64)
enc.buf.AppendFloat(i, precision)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
@ -301,28 +324,28 @@ func (enc *jsonEncoder) AppendUint64(val uint64) {
enc.buf.AppendUint(val)
}
func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() Encoder {
clone := enc.clone()
@ -343,7 +366,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final := enc.clone()
final.buf.AppendByte('{')
if final.LevelKey != "" {
if final.LevelKey != "" && final.EncodeLevel != nil {
final.addKey(final.LevelKey)
cur := final.buf.Len()
final.EncodeLevel(ent.Level, final)
@ -404,11 +427,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final.AddString(final.StacktraceKey, ent.Stack)
}
final.buf.AppendByte('}')
if final.LineEnding != "" {
final.buf.AppendString(final.LineEnding)
} else {
final.buf.AppendString(DefaultLineEnding)
}
final.buf.AppendString(final.LineEnding)
ret := final.buf
putJSONEncoder(final)
@ -423,6 +442,7 @@ func (enc *jsonEncoder) closeOpenNamespaces() {
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
enc.openNamespaces = 0
}
func (enc *jsonEncoder) addKey(key string) {

View File

@ -53,8 +53,62 @@ const (
_minLevel = DebugLevel
_maxLevel = FatalLevel
// InvalidLevel is an invalid value for Level.
//
// Core implementations may panic if they see messages of this level.
InvalidLevel = _maxLevel + 1
)
// ParseLevel parses a level based on the lower-case or all-caps ASCII
// representation of the log level. If the provided ASCII representation is
// invalid an error is returned.
//
// This is particularly useful when dealing with text input to configure log
// levels.
func ParseLevel(text string) (Level, error) {
var level Level
err := level.UnmarshalText([]byte(text))
return level, err
}
type leveledEnabler interface {
LevelEnabler
Level() Level
}
// LevelOf reports the minimum enabled log level for the given LevelEnabler
// from Zap's supported log levels, or [InvalidLevel] if none of them are
// enabled.
//
// A LevelEnabler may implement a 'Level() Level' method to override the
// behavior of this function.
//
// func (c *core) Level() Level {
// return c.currentLevel
// }
//
// It is recommended that [Core] implementations that wrap other cores use
// LevelOf to retrieve the level of the wrapped core. For example,
//
// func (c *coreWrapper) Level() Level {
// return zapcore.LevelOf(c.wrappedCore)
// }
func LevelOf(enab LevelEnabler) Level {
if lvler, ok := enab.(leveledEnabler); ok {
return lvler.Level()
}
for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
if enab.Enabled(lvl) {
return lvl
}
}
return InvalidLevel
}
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {

View File

@ -1,4 +1,4 @@
// Copyright (c) 2019 Uber Technologies, Inc.
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -18,9 +18,24 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// See #682 for more information.
// +build !go1.12
package zapcore
package zap
import (
"encoding/json"
"io"
)
const _stdLogDefaultDepth = 2
// ReflectedEncoder serializes log fields that can't be serialized with Zap's
// JSON encoder. These have the ReflectType field type.
// Use EncoderConfig.NewReflectedEncoder to set this.
type ReflectedEncoder interface {
// Encode encodes and writes to the underlying data stream.
Encode(interface{}) error
}
func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
enc := json.NewEncoder(w)
// For consistency with our custom JSON encoder.
enc.SetEscapeHTML(false)
return enc
}

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -113,12 +113,12 @@ func nopSamplingHook(Entry, SamplingDecision) {}
// This hook may be used to get visibility into the performance of the sampler.
// For example, use it to track metrics of dropped versus sampled logs.
//
// var dropped atomic.Int64
// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
// if dec&zapcore.LogDropped > 0 {
// dropped.Inc()
// }
// })
// var dropped atomic.Int64
// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
// if dec&zapcore.LogDropped > 0 {
// dropped.Inc()
// }
// })
func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
return optionFunc(func(s *sampler) {
s.hook = hook
@ -133,10 +133,21 @@ func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
// each tick. If more Entries with the same level and message are seen during
// the same interval, every Mth message is logged and the rest are dropped.
//
// For example,
//
// core = NewSamplerWithOptions(core, time.Second, 10, 5)
//
// This will log the first 10 log entries with the same level and message
// in a one second interval as-is. Following that, it will allow through
// every 5th log entry with the same level and message in that interval.
//
// If thereafter is zero, the Core will drop all log entries after the first N
// in that interval.
//
// Sampler can be configured to report sampling decisions with the SamplerHook
// option.
//
// Keep in mind that zap's sampling implementation is optimized for speed over
// Keep in mind that Zap's sampling implementation is optimized for speed over
// absolute precision; under load, each tick may be slightly over- or
// under-sampled.
func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
@ -164,6 +175,11 @@ type sampler struct {
hook func(Entry, SamplingDecision)
}
var (
_ Core = (*sampler)(nil)
_ leveledEnabler = (*sampler)(nil)
)
// NewSampler creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs.
@ -181,6 +197,10 @@ func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
return NewSamplerWithOptions(core, tick, first, thereafter)
}
func (s *sampler) Level() Level {
return LevelOf(s.Core)
}
func (s *sampler) With(fields []Field) Core {
return &sampler{
Core: s.Core.With(fields),
@ -200,7 +220,7 @@ func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
if ent.Level >= _minLevel && ent.Level <= _maxLevel {
counter := s.counts.get(ent.Level, ent.Message)
n := counter.IncCheckReset(ent.Time, s.tick)
if n > s.first && (n-s.first)%s.thereafter != 0 {
if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
s.hook(ent, LogDropped)
return ce
}

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc.
// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -24,6 +24,11 @@ import "go.uber.org/multierr"
type multiCore []Core
var (
_ leveledEnabler = multiCore(nil)
_ Core = multiCore(nil)
)
// NewTee creates a Core that duplicates log entries into two or more
// underlying Cores.
//
@ -48,6 +53,16 @@ func (mc multiCore) With(fields []Field) Core {
return clone
}
func (mc multiCore) Level() Level {
minLvl := _maxLevel // mc is never empty
for i := range mc {
if lvl := LevelOf(mc[i]); lvl < minLvl {
minLvl = lvl
}
}
return minLvl
}
func (mc multiCore) Enabled(lvl Level) bool {
for i := range mc {
if mc[i].Enabled(lvl) {

View File

@ -61,7 +61,7 @@ func WrapOptions(zapOpts ...zap.Option) LoggerOption {
// NewLogger builds a new Logger that logs all messages to the given
// testing.TB.
//
// logger := zaptest.NewLogger(t)
// logger := zaptest.NewLogger(t)
//
// Use this with a *testing.T or *testing.B to get logs which get printed only
// if a test fails or if you ran go test -v.
@ -69,11 +69,11 @@ func WrapOptions(zapOpts ...zap.Option) LoggerOption {
// The returned logger defaults to logging debug level messages and above.
// This may be changed by passing a zaptest.Level during construction.
//
// logger := zaptest.NewLogger(t, zaptest.Level(zap.WarnLevel))
// logger := zaptest.NewLogger(t, zaptest.Level(zap.WarnLevel))
//
// You may also pass zap.Option's to customize test logger.
//
// logger := zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller()))
// logger := zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller()))
func NewLogger(t TestingT, opts ...LoggerOption) *zap.Logger {
cfg := loggerOptions{
Level: zapcore.DebugLevel,

View File

@ -26,7 +26,7 @@ type (
// A Syncer is a spy for the Sync portion of zapcore.WriteSyncer.
Syncer = ztest.Syncer
// A Discarder sends all writes to ioutil.Discard.
// A Discarder sends all writes to io.Discard.
Discarder = ztest.Discarder
// FailWriter is a WriteSyncer that always returns an error on writes.

8
vendor/modules.txt vendored
View File

@ -29,6 +29,9 @@ contrib.go.opencensus.io/exporter/prometheus
# contrib.go.opencensus.io/exporter/zipkin v0.1.2
## explicit
contrib.go.opencensus.io/exporter/zipkin
# github.com/benbjohnson/clock v1.1.0
## explicit; go 1.15
github.com/benbjohnson/clock
# github.com/beorn7/perks v1.0.1
## explicit; go 1.11
github.com/beorn7/perks/quantile
@ -299,10 +302,11 @@ go.uber.org/automaxprocs/maxprocs
# go.uber.org/multierr v1.6.0
## explicit; go 1.12
go.uber.org/multierr
# go.uber.org/zap v1.19.1
## explicit; go 1.13
# go.uber.org/zap v1.24.0
## explicit; go 1.19
go.uber.org/zap
go.uber.org/zap/buffer
go.uber.org/zap/internal
go.uber.org/zap/internal/bufferpool
go.uber.org/zap/internal/color
go.uber.org/zap/internal/exit