upgrade to latest dependencies (#1857)

bumping google.golang.org/genproto/googleapis/rpc 1744710...2d3300f:
  > 2d3300f chore(all): update actions/checkout action to v4 (# 1052)
  > b8732ec chore(all): auto-regenerate .pb.go files (# 1051)
  > ccc8af3 chore(all): auto-regenerate .pb.go files (# 1050)
  > 6bfd019 chore(all): auto-regenerate .pb.go files (# 1047)
  > ca7cfce fix(analytics/admin/v1alpha): remove broken aliases (# 1045)
bumping google.golang.org/grpc 87bf02a...62726d4:
  > 62726d4 update version to 1.58.1 (# 6629)
  > fa6d9ab cherry-pick 6610 and 6620 (# 6627)
  > 467fbf2 Change version to 1.58.1-dev (# 6580)
  > c2b0797 Change version to 1.58.0 (# 6579)
  > 0467e47 balancer/leastrequest: Cache atomic load and also add concurrent rpc test (# 6607)
  > 5d1c0ae leastrequest: fix data race in leastrequest picker (# 6606)
  > e26457d stream: swallow Header errors as we used to; RecvMsg can still return it (# 6591)
  > 4c9777c clusterresolver: fix deadlock when dns resolver responds inline with update or error at build time (# 6563)
  > 81b9df2 idle: move idleness manager to separate package and ~13s of tests into it (# 6566)
  > 7d35b8e test: speed up TestServiceConfigTimeoutTD from 1.8s to 0.03s (# 6571)
  > d51b3f4 interop/grpc_testing: update protos from grpc-proto repo (# 6567)
  > fe1519e client: fix ClientStream.Header() behavior (# 6557)
  > 8a2c220 cdsbalancer: test cleanup part 2/N (# 6554)
  > 7f66074 vet.sh: fix interface{} check for macos (# 6561)
  > b07bf5d cdsbalancer: test cleanup part 1/N (# 6546)
  > 33f9fa2 test: speed up two tests (# 6558)
  > aca07ce xds/internal/xdsclient: Add least request support in xDS (# 6517)
  > e5d8eac test: improve and speed up channelz keepalive test (# 6556)
  > ebf0b4e idle: speed up test by 5x even while running 2x more iterations (# 6555)
  > 7d3996f grpctest: use an interface instead of reflection (# 6553)
  > cc705fe interop: regenerate pb.gos (# 6551)
  > 3e92504 status: optimize GRPCStatus() calls (# 6539)
  > 402ba09 pick_first: de-experiment pick first (# 6549)
  > 2821d7f resolver: remove outdated Target examples (# 6547)
  > 53d1f23 benchmark: update proper benchmark binary to use larger buffers (# 6537)
  > fbff2ab *: update `interface{}` to `any` and `go.mod` version to `go 1.19` (# 6544)
  > e40da66 clientconn: release lock when returning from enterIdleMode() (# 6538)
  > dbbc983 balancer/leastrequest: Add least request balancer (# 6510)
  > a010079 *: remove references to old versions of go (# 6545)
  > 03d32b9 orca: update example and interop to use StateListener (# 6529)
  > c2bc22c testing: update Go versions tested to 1.19-1.21 (# 6543)
  > 879faf6 test: update client state subscriber test to be not flaky and more stressful about rapid updates (# 6512)
  > f3e94ec xds: improve error message when matched route on client is not of type RouteActionRoute (# 6248)
  > bb41067 balancergroup: do not cache closed sub-balancers by default (# 6523)
  > 68704f8 gracefulswitch, stub: remove last UpdateSubConnState references (# 6533)
  > 4900699 balancer/rls, xds/wrrlocality: stop forwarding UpdateSubConnState calls (# 6532)
  > ebc3c51 internal/balancergroup: remove usage of UpdateSubConnState (# 6528)
  > 5da2731 balancer/weightedtarget: stop forwarding UpdateSubConnState calls (# 6525)
  > 182b0ad interop/grpc_testing: regenerate protos (# 6534)
  > e274152 rls: fix flaky test introduced by # 6514 (# 6535)
  > 61a1f77 balancer/weightedroundrobin: migrate to StateListener (# 6530)
  > 175c84c xds/ringhash: use StateListener instead of UpdateSubConnState (# 6522)
  > 3fa17cc test: speed up test that was taking 10 seconds to timeout (# 6531)
  > 694cb64 xds/clusterresolver: stop forwarding UpdateSubConnState calls (# 6526)
  > 8f51ca8 tests: stop using UpdateSubConnState (# 6527)
  > cea77bb xds/clustermanager: stop forwarding UpdateSubConnState calls (# 6519)
  > ce68413 xds/priority: stop forwarding UpdateSubConnState calls (# 6521)
  > dceb6ee xds/clusterimpl: stop forwarding UpdateSubConnState calls (# 6518)
  > 8def12a xds/outlierdetection: Stop handling UpdateSubConnState forwarding (# 6520)
  > 67a8e73 multiple/test: use stub balancer instead of defining wrapped balancers (# 6514)
  > 92b481a test: allow set request/response size in interop soak test (# 6513)
  > 07609e1 benchmark: restore old buffer size values for published benchmarks (# 6516)
  > 2059c6e grpc: report connectivity state changes on the ClientConn for Subscribers (# 6437)
  > 4832deb test: clean up deadlines set in tests (# 6506)
  > 9c46304 xds/cdsbalancer: stop handling subconn state updates (# 6509)
  > e9a4e94 base: update base balancer for new APIs (# 6503)
  > 6c0c69e all: replace RemoveSubConn with Shutdown as much as possible (# 6505)
  > 28ac6ef xdsclient: make watch timer a no-op if authority is closed (# 6502)
  > d06ab0d pickfirst: receive state updates via callback instead of UpdateSubConnState (# 6495)
  > 7aceafc balancer: add SubConn.Shutdown; deprecate Balancer.RemoveSubConn (# 6493)
  > 4fe8d3d balancer: fix tests not properly updating subconn states (# 6501)
  > 8ebe462 outlierdetection: fix unconditional calls of child UpdateSubConnState (# 6500)
  > 5d3d9d7 grpc: perform a blocking close of the balancer in ccb (# 6497)
  > ecc5645 clusterresolver: fix a flaky test (# 6499)
  > b9356e3 client: fix race between connection error and subconn shutdown (# 6494)
  > 2db7b17 test/xds: increase default test timeout (# 6498)
  > 8f496b2 test/kokoro: Add bootstrap generator test into Go Kokoro script (# 6463)
  > 0246373 testutils: remove TestSubConns for future extensibility (# 6492)
  > c635404 balancer: add StateListener to NewSubConnOptions for SubConn state updates (# 6481)
  > 94df716 resolver: State: add Endpoints and deprecate Addresses (# 6471)
  > 20c51a9 pickfirst: add tests for resolver error scenarios (# 6484)
  > b8d36ca pickfirst: add prefix logging (# 6482)
  > 5ce5686 pickfirst: guard config parsing on GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG (# 6470)
  > 41d1232 resolver/weighted_round_robin: remove experimental suffix from name (# 6477)
  > 2aa2615 clusterresolver: comply with A37 for handling errors from discovery mechanisms (# 6461)
  > d7f45cd xds/server: create the xDS client when the xDS enabled gRPC server is created (# 6446)
  > f1fc2ca clientconn: add channel ID to some idleness logs (# 6459)
  > 9bb44fb transport: use a sync.Pool to share per-connection write buffer (# 6309)
  > d524b40 multiple: update dependencies after 1.57 branch cut (# 6452)
  > 7aab9c0 stats: Add RPC event for blocking for a picker update (# 6422)
  > 02946a3 resolver: remove deprecated AddressType (# 6451)
  > 919fe35 Change version to 1.58.0-dev (# 6450)

Signed-off-by: Knative Automation <automation@knative.team>
This commit is contained in:
Knative Automation 2023-09-19 10:38:57 +01:00 committed by GitHub
parent 20b9b03a30
commit 0b8c5d4159
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
131 changed files with 2447 additions and 1667 deletions

42
go.mod
View File

@ -12,7 +12,7 @@ require (
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.16.0 github.com/spf13/viper v1.16.0
golang.org/x/mod v0.12.0 golang.org/x/mod v0.12.0
golang.org/x/term v0.11.0 golang.org/x/term v0.12.0
gotest.tools/v3 v3.3.0 gotest.tools/v3 v3.3.0
k8s.io/api v0.26.5 k8s.io/api v0.26.5
k8s.io/apiextensions-apiserver v0.26.5 k8s.io/apiextensions-apiserver v0.26.5
@ -20,12 +20,12 @@ require (
k8s.io/cli-runtime v0.26.5 k8s.io/cli-runtime v0.26.5
k8s.io/client-go v0.26.5 k8s.io/client-go v0.26.5
k8s.io/code-generator v0.26.5 k8s.io/code-generator v0.26.5
knative.dev/client-pkg v0.0.0-20230815131440-5abd12981b4b knative.dev/client-pkg v0.0.0-20230914131734-b21a925efce6
knative.dev/eventing v0.38.1-0.20230822134255-a2e2aa3d515d knative.dev/eventing v0.38.1-0.20230918195630-5acf97450f78
knative.dev/hack v0.0.0-20230818155117-9cc05a31e8c0 knative.dev/hack v0.0.0-20230914013105-0bb79ff2d162
knative.dev/networking v0.0.0-20230822003854-1d7920d27b9e knative.dev/networking v0.0.0-20230918152419-6feaf0cf4a0e
knative.dev/pkg v0.0.0-20230821102121-81e4ee140363 knative.dev/pkg v0.0.0-20230918163324-7fe699e4f743
knative.dev/serving v0.38.1-0.20230823024257-eaff0b39b99e knative.dev/serving v0.38.1-0.20230918165406-7222bf9fe259
sigs.k8s.io/yaml v1.3.0 sigs.k8s.io/yaml v1.3.0
) )
@ -45,12 +45,12 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/evanphx/json-patch/v5 v5.7.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect github.com/go-errors/errors v1.4.2 // indirect
github.com/go-kit/log v0.2.1 // indirect github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/logr v1.2.4 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/swag v0.22.3 // indirect
@ -62,7 +62,7 @@ require (
github.com/google/go-containerregistry v0.13.0 // indirect github.com/google/go-containerregistry v0.13.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.1 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
github.com/imdario/mergo v0.3.13 // indirect github.com/imdario/mergo v0.3.13 // indirect
@ -80,7 +80,7 @@ require (
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/openzipkin/zipkin-go v0.4.1 // indirect github.com/openzipkin/zipkin-go v0.4.2 // indirect
github.com/pelletier/go-toml/v2 v2.0.9 // indirect github.com/pelletier/go-toml/v2 v2.0.9 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
@ -102,21 +102,21 @@ require (
go.starlark.net v0.0.0-20220817180228-f738f5508c12 // indirect go.starlark.net v0.0.0-20220817180228-f738f5508c12 // indirect
go.uber.org/atomic v1.10.0 // indirect go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.10.0 // indirect go.uber.org/multierr v1.10.0 // indirect
go.uber.org/zap v1.25.0 // indirect go.uber.org/zap v1.26.0 // indirect
golang.org/x/net v0.14.0 // indirect golang.org/x/net v0.15.0 // indirect
golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect
golang.org/x/sync v0.3.0 // indirect golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.11.0 // indirect golang.org/x/sys v0.12.0 // indirect
golang.org/x/text v0.12.0 // indirect golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.12.0 // indirect golang.org/x/tools v0.13.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.138.0 // indirect google.golang.org/api v0.141.0 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230911183012-2d3300fd4832 // indirect
google.golang.org/grpc v1.57.0 // indirect google.golang.org/grpc v1.58.1 // indirect
google.golang.org/protobuf v1.31.0 // indirect google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect

93
go.sum
View File

@ -52,7 +52,6 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8V
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -97,8 +96,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
@ -122,8 +121,8 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
@ -133,6 +132,7 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@ -207,12 +207,13 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
@ -234,7 +235,6 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@ -289,13 +289,12 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/openzipkin/zipkin-go v0.4.1 h1:kNd/ST2yLLWhaWrkgchya40TJabe8Hioj9udfPcEO5A= github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA=
github.com/openzipkin/zipkin-go v0.4.1/go.mod h1:qY0VqDSN1pOBN94dBc6w2GJlWLiovAyg7Qt6/I9HecM= github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY=
github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0=
github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
@ -421,8 +420,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -507,8 +506,8 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -520,8 +519,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -586,12 +585,12 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -601,8 +600,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -658,14 +657,14 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@ -686,8 +685,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.138.0 h1:K/tVp05MxNVbHShRw9m7e9VJGdagNeTdMzqPH7AUqr0= google.golang.org/api v0.141.0 h1:Df6vfMgDoIM6ss0m7H4MPwFwY87WNXHfBIda/Bmfl4E=
google.golang.org/api v0.138.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY= google.golang.org/api v0.141.0/go.mod h1:iZqLkdPlXKyG0b90eu6KxVSE4D/ccRF2e/doKD2CnQQ=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -739,8 +738,8 @@ google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWof
google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44=
google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= google.golang.org/genproto/googleapis/rpc v0.0.0-20230911183012-2d3300fd4832 h1:o4LtQxebKIJ4vkzyhtD2rfUNZ20Zf0ik5YVP5E7G7VE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/genproto/googleapis/rpc v0.0.0-20230911183012-2d3300fd4832/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -760,8 +759,8 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= google.golang.org/grpc v1.58.1 h1:OL+Vz23DTtrrldqHK49FUOPHyY75rvFqJfXC84NYW58=
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/grpc v1.58.1/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -833,18 +832,18 @@ k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+O
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8= k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8=
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/client-pkg v0.0.0-20230815131440-5abd12981b4b h1:Ln3mDUER89kC4sYHkCpoZVcwthGlMwjekQi2IdMH4vU= knative.dev/client-pkg v0.0.0-20230914131734-b21a925efce6 h1:TYwrsutijYhQcgP3SIwjjAkn/9r58P/2mboDxyKqVDc=
knative.dev/client-pkg v0.0.0-20230815131440-5abd12981b4b/go.mod h1:GRVZm1rKOy0lpPBGRM1ugK5joDbhXFEvULINHqPoA9U= knative.dev/client-pkg v0.0.0-20230914131734-b21a925efce6/go.mod h1:GRVZm1rKOy0lpPBGRM1ugK5joDbhXFEvULINHqPoA9U=
knative.dev/eventing v0.38.1-0.20230822134255-a2e2aa3d515d h1:K8EcKbY3POUZ+D7uekPUqYY7m+Ci3nkYu1+SqpYiwdY= knative.dev/eventing v0.38.1-0.20230918195630-5acf97450f78 h1:7ck/w/2+Gg0e9fWk6QrP7q0Jk3JvRwzuYUCfu3Sl1No=
knative.dev/eventing v0.38.1-0.20230822134255-a2e2aa3d515d/go.mod h1:InEIckt+XICxXXYy2P4kqEJ4nMDXqI1iCUZ84vGDRbM= knative.dev/eventing v0.38.1-0.20230918195630-5acf97450f78/go.mod h1:drwtJr2Wan4oW+K9DLoN+mkG5LaRgH9LhuxV646hBKk=
knative.dev/hack v0.0.0-20230818155117-9cc05a31e8c0 h1:n9YEGYuoj31pAkhGlNL+xTQAeXKYTLeMmIZLWA9fAeo= knative.dev/hack v0.0.0-20230914013105-0bb79ff2d162 h1:5IYUAmyfUsvSoP5ZQmqBetYLMv/EUEayC4uHEuhTnCo=
knative.dev/hack v0.0.0-20230818155117-9cc05a31e8c0/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q= knative.dev/hack v0.0.0-20230914013105-0bb79ff2d162/go.mod h1:dx0YG3YWqJu653e9tjcT0Q1ZdS9JJXLKbUhzr4EB0g8=
knative.dev/networking v0.0.0-20230822003854-1d7920d27b9e h1:9CcYOXJKJSiZ5aeNm0AJZ1DjOlisadZhMSa/O/hOSmE= knative.dev/networking v0.0.0-20230918152419-6feaf0cf4a0e h1:53Z4C003PKNKQGgfWIfKkveo7J0y6eRxN18K1TAgqaQ=
knative.dev/networking v0.0.0-20230822003854-1d7920d27b9e/go.mod h1:1voQlQD0tuot6U3Kldw+uch33mK2LV85fi3MfbV0CP8= knative.dev/networking v0.0.0-20230918152419-6feaf0cf4a0e/go.mod h1:t5rGgqqJ55N1KdGcaT/S/3mVJfttqQx0xa/wxcLC09w=
knative.dev/pkg v0.0.0-20230821102121-81e4ee140363 h1:TI2hMwTM5Bl+yaWu1gN5bXAHSvc+FtH9cqm3NzmDBtY= knative.dev/pkg v0.0.0-20230918163324-7fe699e4f743 h1:AI7nF2yLlbzifEYbNpyQFqqnI2cvUVwxeLeTckD+/Fg=
knative.dev/pkg v0.0.0-20230821102121-81e4ee140363/go.mod h1:dA3TdhFTRm4KmmpvfknpGV43SbGNFkLHySjC8/+NczM= knative.dev/pkg v0.0.0-20230918163324-7fe699e4f743/go.mod h1:zIkWt0Gh4IZF4lWyU2UN46f+8icvkkjZBVP7Kfhz4oA=
knative.dev/serving v0.38.1-0.20230823024257-eaff0b39b99e h1:LbHIIoOPcHXDwoJO/5OYqBx46ChN5heEujHiJK71GBc= knative.dev/serving v0.38.1-0.20230918165406-7222bf9fe259 h1:2qBW7GM92sbBjszKLu/EJrhPJbPvQ6RSKXq3r/15DIw=
knative.dev/serving v0.38.1-0.20230823024257-eaff0b39b99e/go.mod h1:/q/HksDFhDiPdwOiyFdDE464YF+GjOYL22x7q57NPB0= knative.dev/serving v0.38.1-0.20230918165406-7222bf9fe259/go.mod h1:5wCFY88XFzaEMCtRGWUl8uWm7AnWPAGUZRJ2+DCGxBw=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@ -180,7 +180,7 @@ func (n *partialDoc) UnmarshalJSON(data []byte) error {
if t, err := d.Token(); err != nil { if t, err := d.Token(); err != nil {
return err return err
} else if t != startObject { } else if t != startObject {
return &syntaxError{fmt.Sprintf("unexpected JSON token in document node: %s", t)} return &syntaxError{fmt.Sprintf("unexpected JSON token in document node: %v", t)}
} }
for d.More() { for d.More() {
k, err := d.Token() k, err := d.Token()
@ -454,7 +454,11 @@ func (o Operation) value() *lazyNode {
// ValueInterface decodes the operation value into an interface. // ValueInterface decodes the operation value into an interface.
func (o Operation) ValueInterface() (interface{}, error) { func (o Operation) ValueInterface() (interface{}, error) {
if obj, ok := o["value"]; ok && obj != nil { if obj, ok := o["value"]; ok {
if obj == nil {
return nil, nil
}
var v interface{} var v interface{}
err := json.Unmarshal(*obj, &v) err := json.Unmarshal(*obj, &v)
@ -816,6 +820,43 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
return nil return nil
} }
func validateOperation(op Operation) error {
switch op.Kind() {
case "add", "replace":
if _, err := op.ValueInterface(); err != nil {
return errors.Wrapf(err, "failed to decode 'value'")
}
case "move", "copy":
if _, err := op.From(); err != nil {
return errors.Wrapf(err, "failed to decode 'from'")
}
case "remove", "test":
default:
return fmt.Errorf("unsupported operation")
}
if _, err := op.Path(); err != nil {
return errors.Wrapf(err, "failed to decode 'path'")
}
return nil
}
func validatePatch(p Patch) error {
for _, op := range p {
if err := validateOperation(op); err != nil {
opData, infoErr := json.Marshal(op)
if infoErr != nil {
return errors.Wrapf(err, "invalid operation")
}
return errors.Wrapf(err, "invalid operation %s", opData)
}
}
return nil
}
func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error { func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error {
path, err := op.Path() path, err := op.Path()
if err != nil { if err != nil {
@ -965,7 +1006,7 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
} }
if val == nil { if val == nil {
if op.value().raw == nil { if op.value() == nil || op.value().raw == nil {
return nil return nil
} }
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
@ -1044,6 +1085,10 @@ func DecodePatch(buf []byte) (Patch, error) {
return nil, err return nil, err
} }
if err := validatePatch(p); err != nil {
return nil, err
}
return p, nil return p, nil
} }

View File

@ -6,7 +6,6 @@ linters:
disable-all: true disable-all: true
enable: enable:
- asciicheck - asciicheck
- deadcode
- errcheck - errcheck
- forcetypeassert - forcetypeassert
- gocritic - gocritic
@ -18,10 +17,8 @@ linters:
- misspell - misspell
- revive - revive
- staticcheck - staticcheck
- structcheck
- typecheck - typecheck
- unused - unused
- varcheck
issues: issues:
exclude-use-default: false exclude-use-default: false

View File

@ -20,35 +20,5 @@ package logr
// used whenever the caller is not interested in the logs. Logger instances // used whenever the caller is not interested in the logs. Logger instances
// produced by this function always compare as equal. // produced by this function always compare as equal.
func Discard() Logger { func Discard() Logger {
return Logger{ return New(nil)
level: 0,
sink: discardLogSink{},
}
}
// discardLogSink is a LogSink that discards all messages.
type discardLogSink struct{}
// Verify that it actually implements the interface
var _ LogSink = discardLogSink{}
func (l discardLogSink) Init(RuntimeInfo) {
}
func (l discardLogSink) Enabled(int) bool {
return false
}
func (l discardLogSink) Info(int, string, ...interface{}) {
}
func (l discardLogSink) Error(error, string, ...interface{}) {
}
func (l discardLogSink) WithValues(...interface{}) LogSink {
return l
}
func (l discardLogSink) WithName(string) LogSink {
return l
} }

View File

@ -21,7 +21,7 @@ limitations under the License.
// to back that API. Packages in the Go ecosystem can depend on this package, // to back that API. Packages in the Go ecosystem can depend on this package,
// while callers can implement logging with whatever backend is appropriate. // while callers can implement logging with whatever backend is appropriate.
// //
// Usage // # Usage
// //
// Logging is done using a Logger instance. Logger is a concrete type with // Logging is done using a Logger instance. Logger is a concrete type with
// methods, which defers the actual logging to a LogSink interface. The main // methods, which defers the actual logging to a LogSink interface. The main
@ -30,15 +30,19 @@ limitations under the License.
// "structured logging". // "structured logging".
// //
// With Go's standard log package, we might write: // With Go's standard log package, we might write:
//
// log.Printf("setting target value %s", targetValue) // log.Printf("setting target value %s", targetValue)
// //
// With logr's structured logging, we'd write: // With logr's structured logging, we'd write:
//
// logger.Info("setting target", "value", targetValue) // logger.Info("setting target", "value", targetValue)
// //
// Errors are much the same. Instead of: // Errors are much the same. Instead of:
//
// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // log.Printf("failed to open the pod bay door for user %s: %v", user, err)
// //
// We'd write: // We'd write:
//
// logger.Error(err, "failed to open the pod bay door", "user", user) // logger.Error(err, "failed to open the pod bay door", "user", user)
// //
// Info() and Error() are very similar, but they are separate methods so that // Info() and Error() are very similar, but they are separate methods so that
@ -47,7 +51,7 @@ limitations under the License.
// always logged, regardless of the current verbosity. If there is no error // always logged, regardless of the current verbosity. If there is no error
// instance available, passing nil is valid. // instance available, passing nil is valid.
// //
// Verbosity // # Verbosity
// //
// Often we want to log information only when the application in "verbose // Often we want to log information only when the application in "verbose
// mode". To write log lines that are more verbose, Logger has a V() method. // mode". To write log lines that are more verbose, Logger has a V() method.
@ -58,14 +62,16 @@ limitations under the License.
// Error messages do not have a verbosity level and are always logged. // Error messages do not have a verbosity level and are always logged.
// //
// Where we might have written: // Where we might have written:
//
// if flVerbose >= 2 { // if flVerbose >= 2 {
// log.Printf("an unusual thing happened") // log.Printf("an unusual thing happened")
// } // }
// //
// We can write: // We can write:
//
// logger.V(2).Info("an unusual thing happened") // logger.V(2).Info("an unusual thing happened")
// //
// Logger Names // # Logger Names
// //
// Logger instances can have name strings so that all messages logged through // Logger instances can have name strings so that all messages logged through
// that instance have additional context. For example, you might want to add // that instance have additional context. For example, you might want to add
@ -82,17 +88,19 @@ limitations under the License.
// joining operation (e.g. whitespace, commas, periods, slashes, brackets, // joining operation (e.g. whitespace, commas, periods, slashes, brackets,
// quotes, etc). // quotes, etc).
// //
// Saved Values // # Saved Values
// //
// Logger instances can store any number of key/value pairs, which will be // Logger instances can store any number of key/value pairs, which will be
// logged alongside all messages logged through that instance. For example, // logged alongside all messages logged through that instance. For example,
// you might want to create a Logger instance per managed object: // you might want to create a Logger instance per managed object:
// //
// With the standard log package, we might write: // With the standard log package, we might write:
//
// log.Printf("decided to set field foo to value %q for object %s/%s", // log.Printf("decided to set field foo to value %q for object %s/%s",
// targetValue, object.Namespace, object.Name) // targetValue, object.Namespace, object.Name)
// //
// With logr we'd write: // With logr we'd write:
//
// // Elsewhere: set up the logger to log the object name. // // Elsewhere: set up the logger to log the object name.
// obj.logger = mainLogger.WithValues( // obj.logger = mainLogger.WithValues(
// "name", obj.name, "namespace", obj.namespace) // "name", obj.name, "namespace", obj.namespace)
@ -100,7 +108,7 @@ limitations under the License.
// // later on... // // later on...
// obj.logger.Info("setting foo", "value", targetValue) // obj.logger.Info("setting foo", "value", targetValue)
// //
// Best Practices // # Best Practices
// //
// Logger has very few hard rules, with the goal that LogSink implementations // Logger has very few hard rules, with the goal that LogSink implementations
// might have a lot of freedom to differentiate. There are, however, some // might have a lot of freedom to differentiate. There are, however, some
@ -124,15 +132,15 @@ limitations under the License.
// around. For cases where passing a logger is optional, a pointer to Logger // around. For cases where passing a logger is optional, a pointer to Logger
// should be used. // should be used.
// //
// Key Naming Conventions // # Key Naming Conventions
// //
// Keys are not strictly required to conform to any specification or regex, but // Keys are not strictly required to conform to any specification or regex, but
// it is recommended that they: // it is recommended that they:
// * be human-readable and meaningful (not auto-generated or simple ordinals) // - be human-readable and meaningful (not auto-generated or simple ordinals)
// * be constant (not dependent on input data) // - be constant (not dependent on input data)
// * contain only printable characters // - contain only printable characters
// * not contain whitespace or punctuation // - not contain whitespace or punctuation
// * use lower case for simple keys and lowerCamelCase for more complex ones // - use lower case for simple keys and lowerCamelCase for more complex ones
// //
// These guidelines help ensure that log data is processed properly regardless // These guidelines help ensure that log data is processed properly regardless
// of the log implementation. For example, log implementations will try to // of the log implementation. For example, log implementations will try to
@ -141,24 +149,25 @@ limitations under the License.
// While users are generally free to use key names of their choice, it's // While users are generally free to use key names of their choice, it's
// generally best to avoid using the following keys, as they're frequently used // generally best to avoid using the following keys, as they're frequently used
// by implementations: // by implementations:
// * "caller": the calling information (file/line) of a particular log line // - "caller": the calling information (file/line) of a particular log line
// * "error": the underlying error value in the `Error` method // - "error": the underlying error value in the `Error` method
// * "level": the log level // - "level": the log level
// * "logger": the name of the associated logger // - "logger": the name of the associated logger
// * "msg": the log message // - "msg": the log message
// * "stacktrace": the stack trace associated with a particular log line or // - "stacktrace": the stack trace associated with a particular log line or
// error (often from the `Error` message) // error (often from the `Error` message)
// * "ts": the timestamp for a log line // - "ts": the timestamp for a log line
// //
// Implementations are encouraged to make use of these keys to represent the // Implementations are encouraged to make use of these keys to represent the
// above concepts, when necessary (for example, in a pure-JSON output form, it // above concepts, when necessary (for example, in a pure-JSON output form, it
// would be necessary to represent at least message and timestamp as ordinary // would be necessary to represent at least message and timestamp as ordinary
// named values). // named values).
// //
// Break Glass // # Break Glass
// //
// Implementations may choose to give callers access to the underlying // Implementations may choose to give callers access to the underlying
// logging implementation. The recommended pattern for this is: // logging implementation. The recommended pattern for this is:
//
// // Underlier exposes access to the underlying logging implementation. // // Underlier exposes access to the underlying logging implementation.
// // Since callers only have a logr.Logger, they have to know which // // Since callers only have a logr.Logger, they have to know which
// // implementation is in use, so this interface is less of an abstraction // // implementation is in use, so this interface is less of an abstraction
@ -168,8 +177,9 @@ limitations under the License.
// } // }
// //
// Logger grants access to the sink to enable type assertions like this: // Logger grants access to the sink to enable type assertions like this:
//
// func DoSomethingWithImpl(log logr.Logger) { // func DoSomethingWithImpl(log logr.Logger) {
// if underlier, ok := log.GetSink()(impl.Underlier) { // if underlier, ok := log.GetSink().(impl.Underlier); ok {
// implLogger := underlier.GetUnderlying() // implLogger := underlier.GetUnderlying()
// ... // ...
// } // }
@ -177,11 +187,12 @@ limitations under the License.
// //
// Custom `With*` functions can be implemented by copying the complete // Custom `With*` functions can be implemented by copying the complete
// Logger struct and replacing the sink in the copy: // Logger struct and replacing the sink in the copy:
//
// // WithFooBar changes the foobar parameter in the log sink and returns a // // WithFooBar changes the foobar parameter in the log sink and returns a
// // new logger with that modified sink. It does nothing for loggers where // // new logger with that modified sink. It does nothing for loggers where
// // the sink doesn't support that parameter. // // the sink doesn't support that parameter.
// func WithFoobar(log logr.Logger, foobar int) logr.Logger { // func WithFoobar(log logr.Logger, foobar int) logr.Logger {
// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { // if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) // log = log.WithSink(foobarLogSink.WithFooBar(foobar))
// } // }
// return log // return log
@ -201,11 +212,14 @@ import (
) )
// New returns a new Logger instance. This is primarily used by libraries // New returns a new Logger instance. This is primarily used by libraries
// implementing LogSink, rather than end users. // implementing LogSink, rather than end users. Passing a nil sink will create
// a Logger which discards all log lines.
func New(sink LogSink) Logger { func New(sink LogSink) Logger {
logger := Logger{} logger := Logger{}
logger.setSink(sink) logger.setSink(sink)
if sink != nil {
sink.Init(runtimeInfo) sink.Init(runtimeInfo)
}
return logger return logger
} }
@ -244,7 +258,7 @@ type Logger struct {
// Enabled tests whether this Logger is enabled. For example, commandline // Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs. // flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool { func (l Logger) Enabled() bool {
return l.sink.Enabled(l.level) return l.sink != nil && l.sink.Enabled(l.level)
} }
// Info logs a non-error message with the given key/value pairs as context. // Info logs a non-error message with the given key/value pairs as context.
@ -254,6 +268,9 @@ func (l Logger) Enabled() bool {
// information. The key/value pairs must alternate string keys and arbitrary // information. The key/value pairs must alternate string keys and arbitrary
// values. // values.
func (l Logger) Info(msg string, keysAndValues ...interface{}) { func (l Logger) Info(msg string, keysAndValues ...interface{}) {
if l.sink == nil {
return
}
if l.Enabled() { if l.Enabled() {
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()() withHelper.GetCallStackHelper()()
@ -273,6 +290,9 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
// triggered this log line, if present. The err parameter is optional // triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance. // and nil may be passed instead of an error instance.
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
if l.sink == nil {
return
}
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()() withHelper.GetCallStackHelper()()
} }
@ -284,6 +304,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
// level means a log message is less important. Negative V-levels are treated // level means a log message is less important. Negative V-levels are treated
// as 0. // as 0.
func (l Logger) V(level int) Logger { func (l Logger) V(level int) Logger {
if l.sink == nil {
return l
}
if level < 0 { if level < 0 {
level = 0 level = 0
} }
@ -294,6 +317,9 @@ func (l Logger) V(level int) Logger {
// WithValues returns a new Logger instance with additional key/value pairs. // WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work. // See Info for documentation on how key/value pairs work.
func (l Logger) WithValues(keysAndValues ...interface{}) Logger { func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
if l.sink == nil {
return l
}
l.setSink(l.sink.WithValues(keysAndValues...)) l.setSink(l.sink.WithValues(keysAndValues...))
return l return l
} }
@ -304,6 +330,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
// contain only letters, digits, and hyphens (see the package documentation for // contain only letters, digits, and hyphens (see the package documentation for
// more information). // more information).
func (l Logger) WithName(name string) Logger { func (l Logger) WithName(name string) Logger {
if l.sink == nil {
return l
}
l.setSink(l.sink.WithName(name)) l.setSink(l.sink.WithName(name))
return l return l
} }
@ -324,6 +353,9 @@ func (l Logger) WithName(name string) Logger {
// WithCallDepth(1) because it works with implementions that support the // WithCallDepth(1) because it works with implementions that support the
// CallDepthLogSink and/or CallStackHelperLogSink interfaces. // CallDepthLogSink and/or CallStackHelperLogSink interfaces.
func (l Logger) WithCallDepth(depth int) Logger { func (l Logger) WithCallDepth(depth int) Logger {
if l.sink == nil {
return l
}
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(depth)) l.setSink(withCallDepth.WithCallDepth(depth))
} }
@ -345,6 +377,9 @@ func (l Logger) WithCallDepth(depth int) Logger {
// implementation does not support either of these, the original Logger will be // implementation does not support either of these, the original Logger will be
// returned. // returned.
func (l Logger) WithCallStackHelper() (func(), Logger) { func (l Logger) WithCallStackHelper() (func(), Logger) {
if l.sink == nil {
return func() {}, l
}
var helper func() var helper func()
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(1)) l.setSink(withCallDepth.WithCallDepth(1))
@ -357,6 +392,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) {
return helper, l return helper, l
} }
// IsZero returns true if this logger is an uninitialized zero value
func (l Logger) IsZero() bool {
return l.sink == nil
}
// contextKey is how we find Loggers in a context.Context. // contextKey is how we find Loggers in a context.Context.
type contextKey struct{} type contextKey struct{}
@ -442,7 +482,7 @@ type LogSink interface {
WithName(name string) LogSink WithName(name string) LogSink
} }
// CallDepthLogSink represents a Logger that knows how to climb the call stack // CallDepthLogSink represents a LogSink that knows how to climb the call stack
// to identify the original call site and can offset the depth by a specified // to identify the original call site and can offset the depth by a specified
// number of frames. This is useful for users who have helper functions // number of frames. This is useful for users who have helper functions
// between the "real" call site and the actual calls to Logger methods. // between the "real" call site and the actual calls to Logger methods.
@ -467,7 +507,7 @@ type CallDepthLogSink interface {
WithCallDepth(depth int) LogSink WithCallDepth(depth int) LogSink
} }
// CallStackHelperLogSink represents a Logger that knows how to climb // CallStackHelperLogSink represents a LogSink that knows how to climb
// the call stack to identify the original call site and can skip // the call stack to identify the original call site and can skip
// intermediate helper functions if they mark themselves as // intermediate helper functions if they mark themselves as
// helper. Go's testing package uses that approach. // helper. Go's testing package uses that approach.

View File

@ -1,9 +0,0 @@
language: go
go:
- 1.4.3
- 1.5.3
- tip
script:
- go test -v ./...

10
vendor/github.com/google/uuid/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,10 @@
# Changelog
## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
### Bug Fixes
* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
## Changelog

View File

@ -2,6 +2,22 @@
We definitely welcome patches and contribution to this project! We definitely welcome patches and contribution to this project!
### Tips
Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
Always try to include a test case! If it is not possible or not necessary,
please explain why in the pull request description.
### Releasing
Commits that would precipitate a SemVer change, as desrcibed in the Conventional
Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
to create a release candidate pull request. Once submitted, `release-please`
will create a release.
For tips on how to work with `release-please`, see its documentation.
### Legal requirements ### Legal requirements
In order to protect both you and ourselves, you will need to sign the In order to protect both you and ourselves, you will need to sign the

View File

@ -1,6 +1,6 @@
# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) # uuid
The uuid package generates and inspects UUIDs based on The uuid package generates and inspects UUIDs based on
[RFC 4122](http://tools.ietf.org/html/rfc4122) [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
and DCE 1.1: Authentication and Security Services. and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named This package is based on the github.com/pborman/uuid package (previously named
@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this
change is the ability to represent an invalid UUID (vs a NIL UUID). change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install ###### Install
`go get github.com/google/uuid` ```sh
go get github.com/google/uuid
```
###### Documentation ###### Documentation
[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) [![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here: installing this package by using the GoDoc site here:

View File

@ -7,6 +7,6 @@
package uuid package uuid
// getHardwareInterface returns nil values for the JS version of the code. // getHardwareInterface returns nil values for the JS version of the code.
// This remvoves the "net" dependency, because it is not used in the browser. // This removes the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil } func getHardwareInterface(name string) (string, []byte) { return "", nil }

View File

@ -69,7 +69,7 @@ func Parse(s string) (UUID, error) {
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: case 36 + 9:
if strings.ToLower(s[:9]) != "urn:uuid:" { if !strings.EqualFold(s[:9], "urn:uuid:") {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
} }
s = s[9:] s = s[9:]
@ -101,7 +101,8 @@ func Parse(s string) (UUID, error) {
9, 11, 9, 11,
14, 16, 14, 16,
19, 21, 19, 21,
24, 26, 28, 30, 32, 34} { 24, 26, 28, 30, 32, 34,
} {
v, ok := xtob(s[x], s[x+1]) v, ok := xtob(s[x], s[x+1])
if !ok { if !ok {
return uuid, errors.New("invalid UUID format") return uuid, errors.New("invalid UUID format")
@ -117,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) {
switch len(b) { switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
} }
b = b[9:] b = b[9:]
@ -145,7 +146,8 @@ func ParseBytes(b []byte) (UUID, error) {
9, 11, 9, 11,
14, 16, 14, 16,
19, 21, 19, 21,
24, 26, 28, 30, 32, 34} { 24, 26, 28, 30, 32, 34,
} {
v, ok := xtob(b[x], b[x+1]) v, ok := xtob(b[x], b[x+1])
if !ok { if !ok {
return uuid, errors.New("invalid UUID format") return uuid, errors.New("invalid UUID format")

77
vendor/go.uber.org/zap/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,77 @@
output:
# Make output more digestible with quickfix in vim/emacs/etc.
sort-results: true
print-issued-lines: false
linters:
# We'll track the golangci-lint default linters manually
# instead of letting them change without our control.
disable-all: true
enable:
# golangci-lint defaults:
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- unused
# Our own extras:
- gofmt
- nolintlint # lints nolint directives
- revive
linters-settings:
govet:
# These govet checks are disabled by default, but they're useful.
enable:
- niliness
- reflectvaluecompare
- sortslice
- unusedwrite
errcheck:
exclude-functions:
# These methods can not fail.
# They operate on an in-memory buffer.
- (*go.uber.org/zap/buffer.Buffer).Write
- (*go.uber.org/zap/buffer.Buffer).WriteByte
- (*go.uber.org/zap/buffer.Buffer).WriteString
- (*go.uber.org/zap/zapio.Writer).Close
- (*go.uber.org/zap/zapio.Writer).Sync
- (*go.uber.org/zap/zapio.Writer).Write
# Write to zapio.Writer cannot fail,
# so io.WriteString on it cannot fail.
- io.WriteString(*go.uber.org/zap/zapio.Writer)
# Writing a plain string to a fmt.State cannot fail.
- io.WriteString(fmt.State)
issues:
# Print all issues reported by all linters.
max-issues-per-linter: 0
max-same-issues: 0
# Don't ignore some of the issues that golangci-lint considers okay.
# This includes documenting all exported entities.
exclude-use-default: false
exclude-rules:
# Don't warn on unused parameters.
# Parameter names are useful; replacing them with '_' is undesirable.
- linters: [revive]
text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _'
# staticcheck already has smarter checks for empty blocks.
# revive's empty-block linter has false positives.
# For example, as of writing this, the following is not allowed.
# for foo() { }
- linters: [revive]
text: 'empty-block: this block is empty, you can remove it'
# Ignore logger.Sync() errcheck failures in example_test.go
# since those are intended to be uncomplicated examples.
- linters: [errcheck]
path: example_test.go
text: 'Error return value of `logger.Sync` is not checked'

221
vendor/go.uber.org/zap/CHANGELOG.md generated vendored
View File

@ -1,7 +1,18 @@
# Changelog # Changelog
All notable changes to this project will be documented in this file. All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## 1.26.0 (14 Sep 2023)
Enhancements:
* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured
context.
* [#1350][]: String encoding is much (~50%) faster now.
Thanks to @jquirke, @cdvr1993 for their contributions to this release.
[#1319]: https://github.com/uber-go/zap/pull/1319
[#1350]: https://github.com/uber-go/zap/pull/1350
## 1.25.0 (1 Aug 2023) ## 1.25.0 (1 Aug 2023)
@ -48,7 +59,6 @@ Enhancements:
[#1147]: https://github.com/uber-go/zap/pull/1147 [#1147]: https://github.com/uber-go/zap/pull/1147
[#1155]: https://github.com/uber-go/zap/pull/1155 [#1155]: https://github.com/uber-go/zap/pull/1155
## 1.22.0 (8 Aug 2022) ## 1.22.0 (8 Aug 2022)
Enhancements: Enhancements:
@ -197,6 +207,16 @@ Enhancements:
Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
[#865]: https://github.com/uber-go/zap/pull/865
[#867]: https://github.com/uber-go/zap/pull/867
[#881]: https://github.com/uber-go/zap/pull/881
[#903]: https://github.com/uber-go/zap/pull/903
[#912]: https://github.com/uber-go/zap/pull/912
[#913]: https://github.com/uber-go/zap/pull/913
[#928]: https://github.com/uber-go/zap/pull/928
[#931]: https://github.com/uber-go/zap/pull/931
[#936]: https://github.com/uber-go/zap/pull/936
## 1.16.0 (1 Sep 2020) ## 1.16.0 (1 Sep 2020)
Bugfixes: Bugfixes:
@ -218,6 +238,17 @@ Enhancements:
Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
[#629]: https://github.com/uber-go/zap/pull/629
[#697]: https://github.com/uber-go/zap/pull/697
[#828]: https://github.com/uber-go/zap/pull/828
[#835]: https://github.com/uber-go/zap/pull/835
[#843]: https://github.com/uber-go/zap/pull/843
[#844]: https://github.com/uber-go/zap/pull/844
[#852]: https://github.com/uber-go/zap/pull/852
[#854]: https://github.com/uber-go/zap/pull/854
[#861]: https://github.com/uber-go/zap/pull/861
[#862]: https://github.com/uber-go/zap/pull/862
## 1.15.0 (23 Apr 2020) ## 1.15.0 (23 Apr 2020)
Bugfixes: Bugfixes:
@ -234,6 +265,11 @@ Enhancements:
Thanks to @danielbprice for their contributions to this release. Thanks to @danielbprice for their contributions to this release.
[#804]: https://github.com/uber-go/zap/pull/804
[#812]: https://github.com/uber-go/zap/pull/812
[#806]: https://github.com/uber-go/zap/pull/806
[#813]: https://github.com/uber-go/zap/pull/813
## 1.14.1 (14 Mar 2020) ## 1.14.1 (14 Mar 2020)
Bugfixes: Bugfixes:
@ -246,6 +282,10 @@ Bugfixes:
Thanks to @YashishDua for their contributions to this release. Thanks to @YashishDua for their contributions to this release.
[#791]: https://github.com/uber-go/zap/pull/791
[#795]: https://github.com/uber-go/zap/pull/795
[#799]: https://github.com/uber-go/zap/pull/799
## 1.14.0 (20 Feb 2020) ## 1.14.0 (20 Feb 2020)
Enhancements: Enhancements:
@ -256,6 +296,11 @@ Enhancements:
Thanks to @caibirdme for their contributions to this release. Thanks to @caibirdme for their contributions to this release.
[#771]: https://github.com/uber-go/zap/pull/771
[#773]: https://github.com/uber-go/zap/pull/773
[#775]: https://github.com/uber-go/zap/pull/775
[#786]: https://github.com/uber-go/zap/pull/786
## 1.13.0 (13 Nov 2019) ## 1.13.0 (13 Nov 2019)
Enhancements: Enhancements:
@ -264,11 +309,15 @@ Enhancements:
Thanks to @jbizzle for their contributions to this release. Thanks to @jbizzle for their contributions to this release.
[#758]: https://github.com/uber-go/zap/pull/758
## 1.12.0 (29 Oct 2019) ## 1.12.0 (29 Oct 2019)
Enhancements: Enhancements:
* [#751][]: Migrate to Go modules. * [#751][]: Migrate to Go modules.
[#751]: https://github.com/uber-go/zap/pull/751
## 1.11.0 (21 Oct 2019) ## 1.11.0 (21 Oct 2019)
Enhancements: Enhancements:
@ -277,6 +326,9 @@ Enhancements:
Thanks to @juicemia, @uhthomas for their contributions to this release. Thanks to @juicemia, @uhthomas for their contributions to this release.
[#725]: https://github.com/uber-go/zap/pull/725
[#736]: https://github.com/uber-go/zap/pull/736
## 1.10.0 (29 Apr 2019) ## 1.10.0 (29 Apr 2019)
Bugfixes: Bugfixes:
@ -294,12 +346,20 @@ Enhancements:
Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
to this release. to this release.
[#657]: https://github.com/uber-go/zap/pull/657
[#706]: https://github.com/uber-go/zap/pull/706
[#610]: https://github.com/uber-go/zap/pull/610
[#675]: https://github.com/uber-go/zap/pull/675
[#704]: https://github.com/uber-go/zap/pull/704
## v1.9.1 (06 Aug 2018) ## v1.9.1 (06 Aug 2018)
Bugfixes: Bugfixes:
* [#614][]: MapObjectEncoder should not ignore empty slices. * [#614][]: MapObjectEncoder should not ignore empty slices.
[#614]: https://github.com/uber-go/zap/pull/614
## v1.9.0 (19 Jul 2018) ## v1.9.0 (19 Jul 2018)
Enhancements: Enhancements:
@ -309,6 +369,10 @@ Enhancements:
Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
@dimroc for their contributions to this release. @dimroc for their contributions to this release.
[#602]: https://github.com/uber-go/zap/pull/602
[#572]: https://github.com/uber-go/zap/pull/572
[#606]: https://github.com/uber-go/zap/pull/606
## v1.8.0 (13 Apr 2018) ## v1.8.0 (13 Apr 2018)
Enhancements: Enhancements:
@ -322,11 +386,18 @@ Bugfixes:
Thanks to @DiSiqueira and @djui for their contributions to this release. Thanks to @DiSiqueira and @djui for their contributions to this release.
[#508]: https://github.com/uber-go/zap/pull/508
[#518]: https://github.com/uber-go/zap/pull/518
[#577]: https://github.com/uber-go/zap/pull/577
[#574]: https://github.com/uber-go/zap/pull/574
## v1.7.1 (25 Sep 2017) ## v1.7.1 (25 Sep 2017)
Bugfixes: Bugfixes:
* [#504][]: Store strings when using AddByteString with the map encoder. * [#504][]: Store strings when using AddByteString with the map encoder.
[#504]: https://github.com/uber-go/zap/pull/504
## v1.7.0 (21 Sep 2017) ## v1.7.0 (21 Sep 2017)
Enhancements: Enhancements:
@ -334,6 +405,8 @@ Enhancements:
* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user * [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
to specify the level of the logged messages. to specify the level of the logged messages.
[#487]: https://github.com/uber-go/zap/pull/487
## v1.6.0 (30 Aug 2017) ## v1.6.0 (30 Aug 2017)
Enhancements: Enhancements:
@ -342,6 +415,9 @@ Enhancements:
* [#490][]: Add a `ContextMap` method to observer logs for simpler * [#490][]: Add a `ContextMap` method to observer logs for simpler
field validation in tests. field validation in tests.
[#490]: https://github.com/uber-go/zap/pull/490
[#491]: https://github.com/uber-go/zap/pull/491
## v1.5.0 (22 Jul 2017) ## v1.5.0 (22 Jul 2017)
Enhancements: Enhancements:
@ -355,6 +431,11 @@ Bugfixes:
Thanks to @richard-tunein and @pavius for their contributions to this release. Thanks to @richard-tunein and @pavius for their contributions to this release.
[#477]: https://github.com/uber-go/zap/pull/477
[#465]: https://github.com/uber-go/zap/pull/465
[#460]: https://github.com/uber-go/zap/pull/460
[#470]: https://github.com/uber-go/zap/pull/470
## v1.4.1 (08 Jun 2017) ## v1.4.1 (08 Jun 2017)
This release fixes two bugs. This release fixes two bugs.
@ -364,6 +445,9 @@ Bugfixes:
* [#435][]: Support a variety of case conventions when unmarshaling levels. * [#435][]: Support a variety of case conventions when unmarshaling levels.
* [#444][]: Fix a panic in the observer. * [#444][]: Fix a panic in the observer.
[#435]: https://github.com/uber-go/zap/pull/435
[#444]: https://github.com/uber-go/zap/pull/444
## v1.4.0 (12 May 2017) ## v1.4.0 (12 May 2017)
This release adds a few small features and is fully backward-compatible. This release adds a few small features and is fully backward-compatible.
@ -376,6 +460,10 @@ Enhancements:
* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a * [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
variety of operations a bit simpler. variety of operations a bit simpler.
[#424]: https://github.com/uber-go/zap/pull/424
[#425]: https://github.com/uber-go/zap/pull/425
[#431]: https://github.com/uber-go/zap/pull/431
## v1.3.0 (25 Apr 2017) ## v1.3.0 (25 Apr 2017)
This release adds an enhancement to zap's testing helpers as well as the This release adds an enhancement to zap's testing helpers as well as the
@ -387,6 +475,9 @@ Enhancements:
particularly useful when testing the `SugaredLogger`. particularly useful when testing the `SugaredLogger`.
* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. * [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
[#415]: https://github.com/uber-go/zap/pull/415
[#416]: https://github.com/uber-go/zap/pull/416
## v1.2.0 (13 Apr 2017) ## v1.2.0 (13 Apr 2017)
This release adds a gRPC compatibility wrapper. It is fully backward-compatible. This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
@ -396,6 +487,8 @@ Enhancements:
* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements * [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
`grpclog.Logger`. `grpclog.Logger`.
[#402]: https://github.com/uber-go/zap/pull/402
## v1.1.0 (31 Mar 2017) ## v1.1.0 (31 Mar 2017)
This release fixes two bugs and adds some enhancements to zap's testing helpers. This release fixes two bugs and adds some enhancements to zap's testing helpers.
@ -413,6 +506,10 @@ Enhancements:
Thanks to @moitias for contributing to this release. Thanks to @moitias for contributing to this release.
[#385]: https://github.com/uber-go/zap/pull/385
[#396]: https://github.com/uber-go/zap/pull/396
[#386]: https://github.com/uber-go/zap/pull/386
## v1.0.0 (14 Mar 2017) ## v1.0.0 (14 Mar 2017)
This is zap's first stable release. All exported APIs are now final, and no This is zap's first stable release. All exported APIs are now final, and no
@ -458,6 +555,20 @@ Enhancements:
Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
contributions to this release. contributions to this release.
[#366]: https://github.com/uber-go/zap/pull/366
[#364]: https://github.com/uber-go/zap/pull/364
[#371]: https://github.com/uber-go/zap/pull/371
[#362]: https://github.com/uber-go/zap/pull/362
[#369]: https://github.com/uber-go/zap/pull/369
[#347]: https://github.com/uber-go/zap/pull/347
[#373]: https://github.com/uber-go/zap/pull/373
[#348]: https://github.com/uber-go/zap/pull/348
[#327]: https://github.com/uber-go/zap/pull/327
[#376]: https://github.com/uber-go/zap/pull/376
[#346]: https://github.com/uber-go/zap/pull/346
[#365]: https://github.com/uber-go/zap/pull/365
[#372]: https://github.com/uber-go/zap/pull/372
## v1.0.0-rc.3 (7 Mar 2017) ## v1.0.0-rc.3 (7 Mar 2017)
This is the third release candidate for zap's stable release. There are no This is the third release candidate for zap's stable release. There are no
@ -479,6 +590,11 @@ Enhancements:
Thanks to @ansel1 and @suyash for their contributions to this release. Thanks to @ansel1 and @suyash for their contributions to this release.
[#339]: https://github.com/uber-go/zap/pull/339
[#307]: https://github.com/uber-go/zap/pull/307
[#353]: https://github.com/uber-go/zap/pull/353
[#311]: https://github.com/uber-go/zap/pull/311
## v1.0.0-rc.2 (21 Feb 2017) ## v1.0.0-rc.2 (21 Feb 2017)
This is the second release candidate for zap's stable release. It includes two This is the second release candidate for zap's stable release. It includes two
@ -516,6 +632,15 @@ Enhancements:
Thanks to @skipor and @chapsuk for their contributions to this release. Thanks to @skipor and @chapsuk for their contributions to this release.
[#316]: https://github.com/uber-go/zap/pull/316
[#309]: https://github.com/uber-go/zap/pull/309
[#317]: https://github.com/uber-go/zap/pull/317
[#321]: https://github.com/uber-go/zap/pull/321
[#325]: https://github.com/uber-go/zap/pull/325
[#333]: https://github.com/uber-go/zap/pull/333
[#326]: https://github.com/uber-go/zap/pull/326
[#300]: https://github.com/uber-go/zap/pull/300
## v1.0.0-rc.1 (14 Feb 2017) ## v1.0.0-rc.1 (14 Feb 2017)
This is the first release candidate for zap's stable release. There are multiple This is the first release candidate for zap's stable release. There are multiple
@ -544,95 +669,3 @@ backward compatibility concerns and all functionality is new.
Early zap adopters should pin to the 0.1.x minor version until they're ready to Early zap adopters should pin to the 0.1.x minor version until they're ready to
upgrade to the upcoming stable release. upgrade to the upcoming stable release.
[#316]: https://github.com/uber-go/zap/pull/316
[#309]: https://github.com/uber-go/zap/pull/309
[#317]: https://github.com/uber-go/zap/pull/317
[#321]: https://github.com/uber-go/zap/pull/321
[#325]: https://github.com/uber-go/zap/pull/325
[#333]: https://github.com/uber-go/zap/pull/333
[#326]: https://github.com/uber-go/zap/pull/326
[#300]: https://github.com/uber-go/zap/pull/300
[#339]: https://github.com/uber-go/zap/pull/339
[#307]: https://github.com/uber-go/zap/pull/307
[#353]: https://github.com/uber-go/zap/pull/353
[#311]: https://github.com/uber-go/zap/pull/311
[#366]: https://github.com/uber-go/zap/pull/366
[#364]: https://github.com/uber-go/zap/pull/364
[#371]: https://github.com/uber-go/zap/pull/371
[#362]: https://github.com/uber-go/zap/pull/362
[#369]: https://github.com/uber-go/zap/pull/369
[#347]: https://github.com/uber-go/zap/pull/347
[#373]: https://github.com/uber-go/zap/pull/373
[#348]: https://github.com/uber-go/zap/pull/348
[#327]: https://github.com/uber-go/zap/pull/327
[#376]: https://github.com/uber-go/zap/pull/376
[#346]: https://github.com/uber-go/zap/pull/346
[#365]: https://github.com/uber-go/zap/pull/365
[#372]: https://github.com/uber-go/zap/pull/372
[#385]: https://github.com/uber-go/zap/pull/385
[#396]: https://github.com/uber-go/zap/pull/396
[#386]: https://github.com/uber-go/zap/pull/386
[#402]: https://github.com/uber-go/zap/pull/402
[#415]: https://github.com/uber-go/zap/pull/415
[#416]: https://github.com/uber-go/zap/pull/416
[#424]: https://github.com/uber-go/zap/pull/424
[#425]: https://github.com/uber-go/zap/pull/425
[#431]: https://github.com/uber-go/zap/pull/431
[#435]: https://github.com/uber-go/zap/pull/435
[#444]: https://github.com/uber-go/zap/pull/444
[#477]: https://github.com/uber-go/zap/pull/477
[#465]: https://github.com/uber-go/zap/pull/465
[#460]: https://github.com/uber-go/zap/pull/460
[#470]: https://github.com/uber-go/zap/pull/470
[#487]: https://github.com/uber-go/zap/pull/487
[#490]: https://github.com/uber-go/zap/pull/490
[#491]: https://github.com/uber-go/zap/pull/491
[#504]: https://github.com/uber-go/zap/pull/504
[#508]: https://github.com/uber-go/zap/pull/508
[#518]: https://github.com/uber-go/zap/pull/518
[#577]: https://github.com/uber-go/zap/pull/577
[#574]: https://github.com/uber-go/zap/pull/574
[#602]: https://github.com/uber-go/zap/pull/602
[#572]: https://github.com/uber-go/zap/pull/572
[#606]: https://github.com/uber-go/zap/pull/606
[#614]: https://github.com/uber-go/zap/pull/614
[#657]: https://github.com/uber-go/zap/pull/657
[#706]: https://github.com/uber-go/zap/pull/706
[#610]: https://github.com/uber-go/zap/pull/610
[#675]: https://github.com/uber-go/zap/pull/675
[#704]: https://github.com/uber-go/zap/pull/704
[#725]: https://github.com/uber-go/zap/pull/725
[#736]: https://github.com/uber-go/zap/pull/736
[#751]: https://github.com/uber-go/zap/pull/751
[#758]: https://github.com/uber-go/zap/pull/758
[#771]: https://github.com/uber-go/zap/pull/771
[#773]: https://github.com/uber-go/zap/pull/773
[#775]: https://github.com/uber-go/zap/pull/775
[#786]: https://github.com/uber-go/zap/pull/786
[#791]: https://github.com/uber-go/zap/pull/791
[#795]: https://github.com/uber-go/zap/pull/795
[#799]: https://github.com/uber-go/zap/pull/799
[#804]: https://github.com/uber-go/zap/pull/804
[#812]: https://github.com/uber-go/zap/pull/812
[#806]: https://github.com/uber-go/zap/pull/806
[#813]: https://github.com/uber-go/zap/pull/813
[#629]: https://github.com/uber-go/zap/pull/629
[#697]: https://github.com/uber-go/zap/pull/697
[#828]: https://github.com/uber-go/zap/pull/828
[#835]: https://github.com/uber-go/zap/pull/835
[#843]: https://github.com/uber-go/zap/pull/843
[#844]: https://github.com/uber-go/zap/pull/844
[#852]: https://github.com/uber-go/zap/pull/852
[#854]: https://github.com/uber-go/zap/pull/854
[#861]: https://github.com/uber-go/zap/pull/861
[#862]: https://github.com/uber-go/zap/pull/862
[#865]: https://github.com/uber-go/zap/pull/865
[#867]: https://github.com/uber-go/zap/pull/867
[#881]: https://github.com/uber-go/zap/pull/881
[#903]: https://github.com/uber-go/zap/pull/903
[#912]: https://github.com/uber-go/zap/pull/912
[#913]: https://github.com/uber-go/zap/pull/913
[#928]: https://github.com/uber-go/zap/pull/928
[#931]: https://github.com/uber-go/zap/pull/931
[#936]: https://github.com/uber-go/zap/pull/936

78
vendor/go.uber.org/zap/Makefile generated vendored
View File

@ -1,64 +1,62 @@
export GOBIN ?= $(shell pwd)/bin # Directory containing the Makefile.
PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
export GOBIN ?= $(PROJECT_ROOT)/bin
export PATH := $(GOBIN):$(PATH)
REVIVE = $(GOBIN)/revive
STATICCHECK = $(GOBIN)/staticcheck
GOVULNCHECK = $(GOBIN)/govulncheck GOVULNCHECK = $(GOBIN)/govulncheck
BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
# Directories containing independent Go modules. # Directories containing independent Go modules.
#
# We track coverage only for the main module.
MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test
# Many Go tools take file globs or directories as arguments instead of packages. # Directories that we want to track coverage for.
GO_FILES := $(shell \ COVER_DIRS = . ./exp
find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
-o -name '*.go' -print | cut -b3-)
.PHONY: all .PHONY: all
all: lint test all: lint test
.PHONY: lint .PHONY: lint
lint: $(REVIVE) $(STATICCHECK) lint: golangci-lint tidy-lint license-lint
@rm -rf lint.log
@echo "Checking formatting..."
@gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
@echo "Checking vet..."
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
@echo "Checking lint..."
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && \
$(REVIVE) -set_exit_status ./... 2>&1) &&) true | tee -a lint.log
@echo "Checking staticcheck..."
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
@echo "Checking for unresolved FIXMEs..."
@git grep -i fixme | grep -v -e Makefile | tee -a lint.log
@echo "Checking for license headers..."
@./checklicense.sh | tee -a lint.log
@[ ! -s lint.log ]
@echo "Checking 'go mod tidy'..."
@make tidy
@if ! git diff --quiet; then \
echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
git --no-pager diff; \
fi
$(REVIVE): .PHONY: golangci-lint
cd tools && go install github.com/mgechev/revive golangci-lint:
@$(foreach mod,$(MODULE_DIRS), \
(cd $(mod) && \
echo "[lint] golangci-lint: $(mod)" && \
golangci-lint run --path-prefix $(mod)) &&) true
.PHONY: tidy
tidy:
@$(foreach dir,$(MODULE_DIRS), \
(cd $(dir) && go mod tidy) &&) true
.PHONY: tidy-lint
tidy-lint:
@$(foreach mod,$(MODULE_DIRS), \
(cd $(mod) && \
echo "[lint] tidy: $(mod)" && \
go mod tidy && \
git diff --exit-code -- go.mod go.sum) &&) true
.PHONY: license-lint
license-lint:
./checklicense.sh
$(GOVULNCHECK): $(GOVULNCHECK):
cd tools && go install golang.org/x/vuln/cmd/govulncheck cd tools && go install golang.org/x/vuln/cmd/govulncheck
$(STATICCHECK):
cd tools && go install honnef.co/go/tools/cmd/staticcheck
.PHONY: test .PHONY: test
test: test:
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true
.PHONY: cover .PHONY: cover
cover: cover:
go test -race -coverprofile=cover.out -coverpkg=./... ./... @$(foreach dir,$(COVER_DIRS), ( \
go tool cover -html=cover.out -o cover.html cd $(dir) && \
go test -race -coverprofile=cover.out -coverpkg=./... ./... \
&& go tool cover -html=cover.out -o cover.html) &&) true
.PHONY: bench .PHONY: bench
BENCH ?= . BENCH ?= .
@ -73,10 +71,6 @@ updatereadme:
rm -f README.md rm -f README.md
cat .readme.tmpl | go run internal/readme/readme.go > README.md cat .readme.tmpl | go run internal/readme/readme.go > README.md
.PHONY: tidy
tidy:
@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true
.PHONY: vulncheck .PHONY: vulncheck
vulncheck: $(GOVULNCHECK) vulncheck: $(GOVULNCHECK)
$(GOVULNCHECK) ./... $(GOVULNCHECK) ./...

127
vendor/go.uber.org/zap/array.go generated vendored
View File

@ -21,6 +21,7 @@
package zap package zap
import ( import (
"fmt"
"time" "time"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
@ -94,11 +95,137 @@ func Int8s(key string, nums []int8) Field {
return Array(key, int8s(nums)) return Array(key, int8s(nums))
} }
// Objects constructs a field with the given key, holding a list of the
// provided objects that can be marshaled by Zap.
//
// Note that these objects must implement zapcore.ObjectMarshaler directly.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the Request type, not its pointer (*Request).
// If it's on the pointer, use ObjectValues.
//
// Given an object that implements MarshalLogObject on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Author struct{ ... }
// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var authors []Author = ...
// logger.Info("loading article", zap.Objects("authors", authors))
//
// Similarly, given a type that implements MarshalLogObject on its pointer
// receiver, you can log a slice of pointers to that object with Objects like
// so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
//
// If instead, you have a slice of values of such an object, use the
// ObjectValues constructor.
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
return Array(key, objects[T](values))
}
type objects[T zapcore.ObjectMarshaler] []T
func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
if err := arr.AppendObject(o); err != nil {
return err
}
}
return nil
}
// ObjectMarshalerPtr is a constraint that specifies that the given type
// implements zapcore.ObjectMarshaler on a pointer receiver.
type ObjectMarshalerPtr[T any] interface {
*T
zapcore.ObjectMarshaler
}
// ObjectValues constructs a field with the given key, holding a list of the
// provided objects, where pointers to these objects can be marshaled by Zap.
//
// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the *Request type, not the value (Request).
// If it's on the value, use Objects.
//
// Given an object that implements MarshalLogObject on the pointer receiver,
// you can log a slice of those objects with ObjectValues like so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
//
// If instead, you have a slice of pointers of such an object, use the Objects
// field constructor.
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
return Array(key, objectValues[T, P](values))
}
type objectValues[T any, P ObjectMarshalerPtr[T]] []T
func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range os {
// It is necessary for us to explicitly reference the "P" type.
// We cannot simply pass "&os[i]" to AppendObject because its type
// is "*T", which the type system does not consider as
// implementing ObjectMarshaler.
// Only the type "P" satisfies ObjectMarshaler, which we have
// to convert "*T" to explicitly.
var p P = &os[i]
if err := arr.AppendObject(p); err != nil {
return err
}
}
return nil
}
// Strings constructs a field that carries a slice of strings. // Strings constructs a field that carries a slice of strings.
func Strings(key string, ss []string) Field { func Strings(key string, ss []string) Field {
return Array(key, stringArray(ss)) return Array(key, stringArray(ss))
} }
// Stringers constructs a field with the given key, holding a list of the
// output provided by the value's String method
//
// Given an object that implements String on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Request struct{ ... }
// func (a Request) String() string
//
// var requests []Request = ...
// logger.Info("sending requests", zap.Stringers("requests", requests))
//
// Note that these objects must implement fmt.Stringer directly.
// That is, if you're trying to marshal a []Request, the String method
// must be declared on the Request type, not its pointer (*Request).
func Stringers[T fmt.Stringer](key string, values []T) Field {
return Array(key, stringers[T](values))
}
type stringers[T fmt.Stringer] []T
func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
arr.AppendString(o.String())
}
return nil
}
// Times constructs a field that carries a slice of time.Times. // Times constructs a field that carries a slice of time.Times.
func Times(key string, ts []time.Time) Field { func Times(key string, ts []time.Time) Field {
return Array(key, times(ts)) return Array(key, times(ts))

156
vendor/go.uber.org/zap/array_go118.go generated vendored
View File

@ -1,156 +0,0 @@
// Copyright (c) 2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build go1.18
// +build go1.18
package zap
import (
"fmt"
"go.uber.org/zap/zapcore"
)
// Objects constructs a field with the given key, holding a list of the
// provided objects that can be marshaled by Zap.
//
// Note that these objects must implement zapcore.ObjectMarshaler directly.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the Request type, not its pointer (*Request).
// If it's on the pointer, use ObjectValues.
//
// Given an object that implements MarshalLogObject on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Author struct{ ... }
// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var authors []Author = ...
// logger.Info("loading article", zap.Objects("authors", authors))
//
// Similarly, given a type that implements MarshalLogObject on its pointer
// receiver, you can log a slice of pointers to that object with Objects like
// so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
//
// If instead, you have a slice of values of such an object, use the
// ObjectValues constructor.
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
return Array(key, objects[T](values))
}
type objects[T zapcore.ObjectMarshaler] []T
func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
if err := arr.AppendObject(o); err != nil {
return err
}
}
return nil
}
// ObjectMarshalerPtr is a constraint that specifies that the given type
// implements zapcore.ObjectMarshaler on a pointer receiver.
type ObjectMarshalerPtr[T any] interface {
*T
zapcore.ObjectMarshaler
}
// ObjectValues constructs a field with the given key, holding a list of the
// provided objects, where pointers to these objects can be marshaled by Zap.
//
// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
// That is, if you're trying to marshal a []Request, the MarshalLogObject
// method must be declared on the *Request type, not the value (Request).
// If it's on the value, use Objects.
//
// Given an object that implements MarshalLogObject on the pointer receiver,
// you can log a slice of those objects with ObjectValues like so:
//
// type Request struct{ ... }
// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
//
// var requests []Request = ...
// logger.Info("sending requests", zap.ObjectValues("requests", requests))
//
// If instead, you have a slice of pointers of such an object, use the Objects
// field constructor.
//
// var requests []*Request = ...
// logger.Info("sending requests", zap.Objects("requests", requests))
func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
return Array(key, objectValues[T, P](values))
}
type objectValues[T any, P ObjectMarshalerPtr[T]] []T
func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for i := range os {
// It is necessary for us to explicitly reference the "P" type.
// We cannot simply pass "&os[i]" to AppendObject because its type
// is "*T", which the type system does not consider as
// implementing ObjectMarshaler.
// Only the type "P" satisfies ObjectMarshaler, which we have
// to convert "*T" to explicitly.
var p P = &os[i]
if err := arr.AppendObject(p); err != nil {
return err
}
}
return nil
}
// Stringers constructs a field with the given key, holding a list of the
// output provided by the value's String method
//
// Given an object that implements String on the value receiver, you
// can log a slice of those objects with Objects like so:
//
// type Request struct{ ... }
// func (a Request) String() string
//
// var requests []Request = ...
// logger.Info("sending requests", zap.Stringers("requests", requests))
//
// Note that these objects must implement fmt.Stringer directly.
// That is, if you're trying to marshal a []Request, the String method
// must be declared on the Request type, not its pointer (*Request).
func Stringers[T fmt.Stringer](key string, values []T) Field {
return Array(key, stringers[T](values))
}
type stringers[T fmt.Stringer] []T
func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
for _, o := range os {
arr.AppendString(o.String())
}
return nil
}

View File

@ -42,6 +42,11 @@ func (b *Buffer) AppendByte(v byte) {
b.bs = append(b.bs, v) b.bs = append(b.bs, v)
} }
// AppendBytes writes a single byte to the Buffer.
func (b *Buffer) AppendBytes(v []byte) {
b.bs = append(b.bs, v...)
}
// AppendString writes a string to the Buffer. // AppendString writes a string to the Buffer.
func (b *Buffer) AppendString(s string) { func (b *Buffer) AppendString(s string) {
b.bs = append(b.bs, s...) b.bs = append(b.bs, s...)

5
vendor/go.uber.org/zap/error.go generated vendored
View File

@ -61,9 +61,12 @@ func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
// allocating, pool the wrapper type. // allocating, pool the wrapper type.
elem := _errArrayElemPool.Get() elem := _errArrayElemPool.Get()
elem.error = errs[i] elem.error = errs[i]
arr.AppendObject(elem) err := arr.AppendObject(elem)
elem.error = nil elem.error = nil
_errArrayElemPool.Put(elem) _errArrayElemPool.Put(elem)
if err != nil {
return err
}
} }
return nil return nil
} }

25
vendor/go.uber.org/zap/field.go generated vendored
View File

@ -25,6 +25,7 @@ import (
"math" "math"
"time" "time"
"go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
) )
@ -374,7 +375,7 @@ func StackSkip(key string, skip int) Field {
// from expanding the zapcore.Field union struct to include a byte slice. Since // from expanding the zapcore.Field union struct to include a byte slice. Since
// taking a stacktrace is already so expensive (~10us), the extra allocation // taking a stacktrace is already so expensive (~10us), the extra allocation
// is okay. // is okay.
return String(key, takeStacktrace(skip+1)) // skip StackSkip return String(key, stacktrace.Take(skip+1)) // skip StackSkip
} }
// Duration constructs a field with the given key and value. The encoder // Duration constructs a field with the given key and value. The encoder
@ -410,6 +411,26 @@ func Inline(val zapcore.ObjectMarshaler) Field {
} }
} }
// Dict constructs a field containing the provided key-value pairs.
// It acts similar to [Object], but with the fields specified as arguments.
func Dict(key string, val ...Field) Field {
return dictField(key, val)
}
// We need a function with the signature (string, T) for zap.Any.
func dictField(key string, val []Field) Field {
return Object(key, dictObject(val))
}
type dictObject []Field
func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
for _, f := range d {
f.AddTo(enc)
}
return nil
}
// We discovered an issue where zap.Any can cause a performance degradation // We discovered an issue where zap.Any can cause a performance degradation
// when used in new goroutines. // when used in new goroutines.
// //
@ -462,6 +483,8 @@ func Any(key string, value interface{}) Field {
c = anyFieldC[zapcore.ObjectMarshaler](Object) c = anyFieldC[zapcore.ObjectMarshaler](Object)
case zapcore.ArrayMarshaler: case zapcore.ArrayMarshaler:
c = anyFieldC[zapcore.ArrayMarshaler](Array) c = anyFieldC[zapcore.ArrayMarshaler](Array)
case []Field:
c = anyFieldC[[]Field](dictField)
case bool: case bool:
c = anyFieldC[bool](Bool) c = anyFieldC[bool](Bool)
case *bool: case *bool:

View File

@ -69,6 +69,13 @@ import (
// //
// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' // curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if err := lvl.serveHTTP(w, r); err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "internal error: %v", err)
}
}
func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error {
type errorResponse struct { type errorResponse struct {
Error string `json:"error"` Error string `json:"error"`
} }
@ -80,19 +87,20 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch r.Method { switch r.Method {
case http.MethodGet: case http.MethodGet:
enc.Encode(payload{Level: lvl.Level()}) return enc.Encode(payload{Level: lvl.Level()})
case http.MethodPut: case http.MethodPut:
requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
if err != nil { if err != nil {
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
enc.Encode(errorResponse{Error: err.Error()}) return enc.Encode(errorResponse{Error: err.Error()})
return
} }
lvl.SetLevel(requestedLvl) lvl.SetLevel(requestedLvl)
enc.Encode(payload{Level: lvl.Level()}) return enc.Encode(payload{Level: lvl.Level()})
default: default:
w.WriteHeader(http.StatusMethodNotAllowed) w.WriteHeader(http.StatusMethodNotAllowed)
enc.Encode(errorResponse{ return enc.Encode(errorResponse{
Error: "Only GET and PUT are supported.", Error: "Only GET and PUT are supported.",
}) })
} }
@ -129,5 +137,4 @@ func decodePutJSON(body io.Reader) (zapcore.Level, error) {
return 0, errors.New("must specify logging level") return 0, errors.New("must specify logging level")
} }
return *pld.Level, nil return *pld.Level, nil
} }

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Uber Technologies, Inc. // Copyright (c) 2023 Uber Technologies, Inc.
// //
// Permission is hereby granted, free of charge, to any person obtaining a copy // Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal // of this software and associated documentation files (the "Software"), to deal
@ -18,7 +18,9 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE. // THE SOFTWARE.
package zap // Package stacktrace provides support for gathering stack traces
// efficiently.
package stacktrace
import ( import (
"runtime" "runtime"
@ -28,13 +30,14 @@ import (
"go.uber.org/zap/internal/pool" "go.uber.org/zap/internal/pool"
) )
var _stacktracePool = pool.New(func() *stacktrace { var _stackPool = pool.New(func() *Stack {
return &stacktrace{ return &Stack{
storage: make([]uintptr, 64), storage: make([]uintptr, 64),
} }
}) })
type stacktrace struct { // Stack is a captured stack trace.
type Stack struct {
pcs []uintptr // program counters; always a subslice of storage pcs []uintptr // program counters; always a subslice of storage
frames *runtime.Frames frames *runtime.Frames
@ -48,30 +51,30 @@ type stacktrace struct {
storage []uintptr storage []uintptr
} }
// stacktraceDepth specifies how deep of a stack trace should be captured. // Depth specifies how deep of a stack trace should be captured.
type stacktraceDepth int type Depth int
const ( const (
// stacktraceFirst captures only the first frame. // First captures only the first frame.
stacktraceFirst stacktraceDepth = iota First Depth = iota
// stacktraceFull captures the entire call stack, allocating more // Full captures the entire call stack, allocating more
// storage for it if needed. // storage for it if needed.
stacktraceFull Full
) )
// captureStacktrace captures a stack trace of the specified depth, skipping // Capture captures a stack trace of the specified depth, skipping
// the provided number of frames. skip=0 identifies the caller of // the provided number of frames. skip=0 identifies the caller of
// captureStacktrace. // Capture.
// //
// The caller must call Free on the returned stacktrace after using it. // The caller must call Free on the returned stacktrace after using it.
func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { func Capture(skip int, depth Depth) *Stack {
stack := _stacktracePool.Get() stack := _stackPool.Get()
switch depth { switch depth {
case stacktraceFirst: case First:
stack.pcs = stack.storage[:1] stack.pcs = stack.storage[:1]
case stacktraceFull: case Full:
stack.pcs = stack.storage stack.pcs = stack.storage
} }
@ -85,7 +88,7 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
// runtime.Callers truncates the recorded stacktrace if there is no // runtime.Callers truncates the recorded stacktrace if there is no
// room in the provided slice. For the full stack trace, keep expanding // room in the provided slice. For the full stack trace, keep expanding
// storage until there are fewer frames than there is room. // storage until there are fewer frames than there is room.
if depth == stacktraceFull { if depth == Full {
pcs := stack.pcs pcs := stack.pcs
for numFrames == len(pcs) { for numFrames == len(pcs) {
pcs = make([]uintptr, len(pcs)*2) pcs = make([]uintptr, len(pcs)*2)
@ -107,50 +110,54 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace {
// Free releases resources associated with this stacktrace // Free releases resources associated with this stacktrace
// and returns it back to the pool. // and returns it back to the pool.
func (st *stacktrace) Free() { func (st *Stack) Free() {
st.frames = nil st.frames = nil
st.pcs = nil st.pcs = nil
_stacktracePool.Put(st) _stackPool.Put(st)
} }
// Count reports the total number of frames in this stacktrace. // Count reports the total number of frames in this stacktrace.
// Count DOES NOT change as Next is called. // Count DOES NOT change as Next is called.
func (st *stacktrace) Count() int { func (st *Stack) Count() int {
return len(st.pcs) return len(st.pcs)
} }
// Next returns the next frame in the stack trace, // Next returns the next frame in the stack trace,
// and a boolean indicating whether there are more after it. // and a boolean indicating whether there are more after it.
func (st *stacktrace) Next() (_ runtime.Frame, more bool) { func (st *Stack) Next() (_ runtime.Frame, more bool) {
return st.frames.Next() return st.frames.Next()
} }
func takeStacktrace(skip int) string { // Take returns a string representation of the current stacktrace.
stack := captureStacktrace(skip+1, stacktraceFull) //
// skip is the number of frames to skip before recording the stack trace.
// skip=0 identifies the caller of Take.
func Take(skip int) string {
stack := Capture(skip+1, Full)
defer stack.Free() defer stack.Free()
buffer := bufferpool.Get() buffer := bufferpool.Get()
defer buffer.Free() defer buffer.Free()
stackfmt := newStackFormatter(buffer) stackfmt := NewFormatter(buffer)
stackfmt.FormatStack(stack) stackfmt.FormatStack(stack)
return buffer.String() return buffer.String()
} }
// stackFormatter formats a stack trace into a readable string representation. // Formatter formats a stack trace into a readable string representation.
type stackFormatter struct { type Formatter struct {
b *buffer.Buffer b *buffer.Buffer
nonEmpty bool // whehther we've written at least one frame already nonEmpty bool // whehther we've written at least one frame already
} }
// newStackFormatter builds a new stackFormatter. // NewFormatter builds a new Formatter.
func newStackFormatter(b *buffer.Buffer) stackFormatter { func NewFormatter(b *buffer.Buffer) Formatter {
return stackFormatter{b: b} return Formatter{b: b}
} }
// FormatStack formats all remaining frames in the provided stacktrace -- minus // FormatStack formats all remaining frames in the provided stacktrace -- minus
// the final runtime.main/runtime.goexit frame. // the final runtime.main/runtime.goexit frame.
func (sf *stackFormatter) FormatStack(stack *stacktrace) { func (sf *Formatter) FormatStack(stack *Stack) {
// Note: On the last iteration, frames.Next() returns false, with a valid // Note: On the last iteration, frames.Next() returns false, with a valid
// frame, but we ignore this frame. The last frame is a runtime frame which // frame, but we ignore this frame. The last frame is a runtime frame which
// adds noise, since it's only either runtime.main or runtime.goexit. // adds noise, since it's only either runtime.main or runtime.goexit.
@ -160,7 +167,7 @@ func (sf *stackFormatter) FormatStack(stack *stacktrace) {
} }
// FormatFrame formats the given frame. // FormatFrame formats the given frame.
func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { func (sf *Formatter) FormatFrame(frame runtime.Frame) {
if sf.nonEmpty { if sf.nonEmpty {
sf.b.AppendByte('\n') sf.b.AppendByte('\n')
} }

42
vendor/go.uber.org/zap/logger.go generated vendored
View File

@ -27,6 +27,7 @@ import (
"strings" "strings"
"go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
) )
@ -173,7 +174,8 @@ func (log *Logger) WithOptions(opts ...Option) *Logger {
} }
// With creates a child logger and adds structured context to it. Fields added // With creates a child logger and adds structured context to it. Fields added
// to the child don't affect the parent, and vice versa. // to the child don't affect the parent, and vice versa. Any fields that
// require evaluation (such as Objects) are evaluated upon invocation of With.
func (log *Logger) With(fields ...Field) *Logger { func (log *Logger) With(fields ...Field) *Logger {
if len(fields) == 0 { if len(fields) == 0 {
return log return log
@ -183,6 +185,28 @@ func (log *Logger) With(fields ...Field) *Logger {
return l return l
} }
// WithLazy creates a child logger and adds structured context to it lazily.
//
// The fields are evaluated only if the logger is further chained with [With]
// or is written to with any of the log level methods.
// Until that occurs, the logger may retain references to objects inside the fields,
// and logging will reflect the state of an object at the time of logging,
// not the time of WithLazy().
//
// WithLazy provides a worthwhile performance optimization for contextual loggers
// when the likelihood of using the child logger is low,
// such as error paths and rarely taken branches.
//
// Similar to [With], fields added to the child don't affect the parent, and vice versa.
func (log *Logger) WithLazy(fields ...Field) *Logger {
if len(fields) == 0 {
return log
}
return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core {
return zapcore.NewLazyWith(core, fields)
}))
}
// Level reports the minimum enabled level for this logger. // Level reports the minimum enabled level for this logger.
// //
// For NopLoggers, this is [zapcore.InvalidLevel]. // For NopLoggers, this is [zapcore.InvalidLevel].
@ -199,6 +223,8 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Log logs a message at the specified level. The message includes any fields // Log logs a message at the specified level. The message includes any fields
// passed at the log site, as well as any fields accumulated on the logger. // passed at the log site, as well as any fields accumulated on the logger.
// Any Fields that require evaluation (such as Objects) are evaluated upon
// invocation of Log.
func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) { func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
if ce := log.check(lvl, msg); ce != nil { if ce := log.check(lvl, msg); ce != nil {
ce.Write(fields...) ce.Write(fields...)
@ -288,8 +314,8 @@ func (log *Logger) Name() string {
} }
func (log *Logger) clone() *Logger { func (log *Logger) clone() *Logger {
copy := *log clone := *log
return &copy return &clone
} }
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
@ -360,17 +386,17 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Adding the caller or stack trace requires capturing the callers of // Adding the caller or stack trace requires capturing the callers of
// this function. We'll share information between these two. // this function. We'll share information between these two.
stackDepth := stacktraceFirst stackDepth := stacktrace.First
if addStack { if addStack {
stackDepth = stacktraceFull stackDepth = stacktrace.Full
} }
stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth)
defer stack.Free() defer stack.Free()
if stack.Count() == 0 { if stack.Count() == 0 {
if log.addCaller { if log.addCaller {
fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
log.errorOutput.Sync() _ = log.errorOutput.Sync()
} }
return ce return ce
} }
@ -391,7 +417,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
buffer := bufferpool.Get() buffer := bufferpool.Get()
defer buffer.Free() defer buffer.Free()
stackfmt := newStackFormatter(buffer) stackfmt := stacktrace.NewFormatter(buffer)
// We've already extracted the first frame, so format that // We've already extracted the first frame, so format that
// separately and defer to stackfmt for the rest. // separately and defer to stackfmt for the rest.

5
vendor/go.uber.org/zap/sink.go generated vendored
View File

@ -66,7 +66,8 @@ func newSinkRegistry() *sinkRegistry {
factories: make(map[string]func(*url.URL) (Sink, error)), factories: make(map[string]func(*url.URL) (Sink, error)),
openFile: os.OpenFile, openFile: os.OpenFile,
} }
sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) // Infallible operation: the registry is empty, so we can't have a conflict.
_ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
return sr return sr
} }
@ -154,7 +155,7 @@ func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
case "stderr": case "stderr":
return nopCloserSink{os.Stderr}, nil return nopCloserSink{os.Stderr}, nil
} }
return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666)
} }
func normalizeScheme(s string) (string, error) { func normalizeScheme(s string) (string, error) {

12
vendor/go.uber.org/zap/writer.go generated vendored
View File

@ -48,21 +48,21 @@ import (
// os.Stdout and os.Stderr. When specified without a scheme, relative file // os.Stdout and os.Stderr. When specified without a scheme, relative file
// paths also work. // paths also work.
func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
writers, close, err := open(paths) writers, closeAll, err := open(paths)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
writer := CombineWriteSyncers(writers...) writer := CombineWriteSyncers(writers...)
return writer, close, nil return writer, closeAll, nil
} }
func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
writers := make([]zapcore.WriteSyncer, 0, len(paths)) writers := make([]zapcore.WriteSyncer, 0, len(paths))
closers := make([]io.Closer, 0, len(paths)) closers := make([]io.Closer, 0, len(paths))
close := func() { closeAll := func() {
for _, c := range closers { for _, c := range closers {
c.Close() _ = c.Close()
} }
} }
@ -77,11 +77,11 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
closers = append(closers, sink) closers = append(closers, sink)
} }
if openErr != nil { if openErr != nil {
close() closeAll()
return nil, nil, openErr return nil, nil, openErr
} }
return writers, close, nil return writers, closeAll, nil
} }
// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a // CombineWriteSyncers is a utility that combines multiple WriteSyncers into a

View File

@ -102,9 +102,9 @@ func (c *ioCore) Write(ent Entry, fields []Field) error {
return err return err
} }
if ent.Level > ErrorLevel { if ent.Level > ErrorLevel {
// Since we may be crashing the program, sync the output. Ignore Sync // Since we may be crashing the program, sync the output.
// errors, pending a clean solution to issue #370. // Ignore Sync errors, pending a clean solution to issue #370.
c.Sync() _ = c.Sync()
} }
return nil return nil
} }

View File

@ -242,7 +242,7 @@ func (ce *CheckedEntry) Write(fields ...Field) {
// CheckedEntry is being used after it was returned to the pool, // CheckedEntry is being used after it was returned to the pool,
// the message may be an amalgamation from multiple call sites. // the message may be an amalgamation from multiple call sites.
fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
ce.ErrorOutput.Sync() _ = ce.ErrorOutput.Sync() // ignore error
} }
return return
} }
@ -254,7 +254,7 @@ func (ce *CheckedEntry) Write(fields ...Field) {
} }
if err != nil && ce.ErrorOutput != nil { if err != nil && ce.ErrorOutput != nil {
fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
ce.ErrorOutput.Sync() _ = ce.ErrorOutput.Sync() // ignore error
} }
hook := ce.after hook := ce.after

View File

@ -98,8 +98,11 @@ func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
} }
el := newErrArrayElem(errs[i]) el := newErrArrayElem(errs[i])
arr.AppendObject(el) err := arr.AppendObject(el)
el.Free() el.Free()
if err != nil {
return err
}
} }
return nil return nil
} }

View File

@ -486,73 +486,98 @@ func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
// Unlike the standard library's encoder, it doesn't attempt to protect the // Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems. // user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) { func (enc *jsonEncoder) safeAddString(s string) {
for i := 0; i < len(s); { safeAppendStringLike(
if enc.tryAddRuneSelf(s[i]) { (*buffer.Buffer).AppendString,
i++ utf8.DecodeRuneInString,
continue enc.buf,
} s,
r, size := utf8.DecodeRuneInString(s[i:]) )
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.AppendString(s[i : i+size])
i += size
}
} }
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. // safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) { func (enc *jsonEncoder) safeAddByteString(s []byte) {
for i := 0; i < len(s); { safeAppendStringLike(
if enc.tryAddRuneSelf(s[i]) { (*buffer.Buffer).AppendBytes,
i++ utf8.DecodeRune,
continue enc.buf,
} s,
r, size := utf8.DecodeRune(s[i:]) )
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.Write(s[i : i+size])
i += size
}
} }
// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. // safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString.
func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { // It appends a string or byte slice to the buffer, escaping all special characters.
if b >= utf8.RuneSelf { func safeAppendStringLike[S []byte | string](
return false // appendTo appends this string-like object to the buffer.
appendTo func(*buffer.Buffer, S),
// decodeRune decodes the next rune from the string-like object
// and returns its value and width in bytes.
decodeRune func(S) (rune, int),
buf *buffer.Buffer,
s S,
) {
// The encoding logic below works by skipping over characters
// that can be safely copied as-is,
// until a character is found that needs special handling.
// At that point, we copy everything we've seen so far,
// and then handle that special character.
//
// last is the index of the last byte that was copied to the buffer.
last := 0
for i := 0; i < len(s); {
if s[i] >= utf8.RuneSelf {
// Character >= RuneSelf may be part of a multi-byte rune.
// They need to be decoded before we can decide how to handle them.
r, size := decodeRune(s[i:])
if r != utf8.RuneError || size != 1 {
// No special handling required.
// Skip over this rune and continue.
i += size
continue
} }
if b >= 0x20 && b != '\\' && b != '"' {
enc.buf.AppendByte(b) // Invalid UTF-8 sequence.
return true // Replace it with the Unicode replacement character.
appendTo(buf, s[last:i])
buf.AppendString(`\ufffd`)
i++
last = i
} else {
// Character < RuneSelf is a single-byte UTF-8 rune.
if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' {
// No escaping necessary.
// Skip over this character and continue.
i++
continue
} }
switch b {
// This character needs to be escaped.
appendTo(buf, s[last:i])
switch s[i] {
case '\\', '"': case '\\', '"':
enc.buf.AppendByte('\\') buf.AppendByte('\\')
enc.buf.AppendByte(b) buf.AppendByte(s[i])
case '\n': case '\n':
enc.buf.AppendByte('\\') buf.AppendByte('\\')
enc.buf.AppendByte('n') buf.AppendByte('n')
case '\r': case '\r':
enc.buf.AppendByte('\\') buf.AppendByte('\\')
enc.buf.AppendByte('r') buf.AppendByte('r')
case '\t': case '\t':
enc.buf.AppendByte('\\') buf.AppendByte('\\')
enc.buf.AppendByte('t') buf.AppendByte('t')
default: default:
// Encode bytes < 0x20, except for the escape sequences above. // Encode bytes < 0x20, except for the escape sequences above.
enc.buf.AppendString(`\u00`) buf.AppendString(`\u00`)
enc.buf.AppendByte(_hex[b>>4]) buf.AppendByte(_hex[s[i]>>4])
enc.buf.AppendByte(_hex[b&0xF]) buf.AppendByte(_hex[s[i]&0xF])
}
return true
} }
func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { i++
if r == utf8.RuneError && size == 1 { last = i
enc.buf.AppendString(`\ufffd`)
return true
} }
return false }
// add remaining
appendTo(buf, s[last:])
} }

54
vendor/go.uber.org/zap/zapcore/lazy_with.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright (c) 2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zapcore
import "sync"
type lazyWithCore struct {
Core
sync.Once
fields []Field
}
// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if
// the logger is written to (or is further chained in a lon-lazy manner).
func NewLazyWith(core Core, fields []Field) Core {
return &lazyWithCore{
Core: core,
fields: fields,
}
}
func (d *lazyWithCore) initOnce() {
d.Once.Do(func() {
d.Core = d.Core.With(d.fields)
})
}
func (d *lazyWithCore) With(fields []Field) Core {
d.initOnce()
return d.Core.With(fields)
}
func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry {
d.initOnce()
return d.Core.Check(e, ce)
}

View File

@ -1,51 +0,0 @@
#
# This Dockerfile builds a recent curl with HTTP/2 client support, using
# a recent nghttp2 build.
#
# See the Makefile for how to tag it. If Docker and that image is found, the
# Go tests use this curl binary for integration tests.
#
FROM ubuntu:trusty
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y git-core build-essential wget
RUN apt-get install -y --no-install-recommends \
autotools-dev libtool pkg-config zlib1g-dev \
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
automake autoconf
# The list of packages nghttp2 recommends for h2load:
RUN apt-get install -y --no-install-recommends make binutils \
autoconf automake autotools-dev \
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
cython python3.4-dev python-setuptools
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
ENV NGHTTP2_VER 895da9a
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
WORKDIR /root/nghttp2
RUN git reset --hard $NGHTTP2_VER
RUN autoreconf -i
RUN automake
RUN autoconf
RUN ./configure
RUN make
RUN make install
WORKDIR /root
RUN wget https://curl.se/download/curl-7.45.0.tar.gz
RUN tar -zxvf curl-7.45.0.tar.gz
WORKDIR /root/curl-7.45.0
RUN ./configure --with-ssl --with-nghttp2=/usr/local
RUN make
RUN make install
RUN ldconfig
CMD ["-h"]
ENTRYPOINT ["/usr/local/bin/curl"]

View File

@ -1,3 +0,0 @@
curlimage:
docker build -t gohttp2/curl .

View File

@ -1012,14 +1012,6 @@ func (sc *serverConn) serve() {
} }
} }
func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
select {
case <-sc.doneServing:
case <-sharedCh:
close(privateCh)
}
}
type serverMessage int type serverMessage int
// Message values sent to serveMsgCh. // Message values sent to serveMsgCh.

View File

@ -292,7 +292,6 @@ func (t *Transport) initConnPool() {
type ClientConn struct { type ClientConn struct {
t *Transport t *Transport
tconn net.Conn // usually *tls.Conn, except specialized impls tconn net.Conn // usually *tls.Conn, except specialized impls
tconnClosed bool
tlsState *tls.ConnectionState // nil only for specialized impls tlsState *tls.ConnectionState // nil only for specialized impls
reused uint32 // whether conn is being reused; atomic reused uint32 // whether conn is being reused; atomic
singleUse bool // whether being used for a single http.Request singleUse bool // whether being used for a single http.Request

View File

@ -18,6 +18,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
) )
@ -115,41 +116,60 @@ const (
AuthStyleInHeader AuthStyle = 2 AuthStyleInHeader AuthStyle = 2
) )
// authStyleCache is the set of tokenURLs we've successfully used via // LazyAuthStyleCache is a backwards compatibility compromise to let Configs
// have a lazily-initialized AuthStyleCache.
//
// The two users of this, oauth2.Config and oauth2/clientcredentials.Config,
// both would ideally just embed an unexported AuthStyleCache but because both
// were historically allowed to be copied by value we can't retroactively add an
// uncopyable Mutex to them.
//
// We could use an atomic.Pointer, but that was added recently enough (in Go
// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03
// still pass. By using an atomic.Value, it supports both Go 1.17 and
// copying by value, even if that's not ideal.
type LazyAuthStyleCache struct {
v atomic.Value // of *AuthStyleCache
}
func (lc *LazyAuthStyleCache) Get() *AuthStyleCache {
if c, ok := lc.v.Load().(*AuthStyleCache); ok {
return c
}
c := new(AuthStyleCache)
if !lc.v.CompareAndSwap(nil, c) {
c = lc.v.Load().(*AuthStyleCache)
}
return c
}
// AuthStyleCache is the set of tokenURLs we've successfully used via
// RetrieveToken and which style auth we ended up using. // RetrieveToken and which style auth we ended up using.
// It's called a cache, but it doesn't (yet?) shrink. It's expected that // It's called a cache, but it doesn't (yet?) shrink. It's expected that
// the set of OAuth2 servers a program contacts over time is fixed and // the set of OAuth2 servers a program contacts over time is fixed and
// small. // small.
var authStyleCache struct { type AuthStyleCache struct {
sync.Mutex mu sync.Mutex
m map[string]AuthStyle // keyed by tokenURL m map[string]AuthStyle // keyed by tokenURL
} }
// ResetAuthCache resets the global authentication style cache used
// for AuthStyleUnknown token requests.
func ResetAuthCache() {
authStyleCache.Lock()
defer authStyleCache.Unlock()
authStyleCache.m = nil
}
// lookupAuthStyle reports which auth style we last used with tokenURL // lookupAuthStyle reports which auth style we last used with tokenURL
// when calling RetrieveToken and whether we have ever done so. // when calling RetrieveToken and whether we have ever done so.
func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
authStyleCache.Lock() c.mu.Lock()
defer authStyleCache.Unlock() defer c.mu.Unlock()
style, ok = authStyleCache.m[tokenURL] style, ok = c.m[tokenURL]
return return
} }
// setAuthStyle adds an entry to authStyleCache, documented above. // setAuthStyle adds an entry to authStyleCache, documented above.
func setAuthStyle(tokenURL string, v AuthStyle) { func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) {
authStyleCache.Lock() c.mu.Lock()
defer authStyleCache.Unlock() defer c.mu.Unlock()
if authStyleCache.m == nil { if c.m == nil {
authStyleCache.m = make(map[string]AuthStyle) c.m = make(map[string]AuthStyle)
} }
authStyleCache.m[tokenURL] = v c.m[tokenURL] = v
} }
// newTokenRequest returns a new *http.Request to retrieve a new token // newTokenRequest returns a new *http.Request to retrieve a new token
@ -189,10 +209,10 @@ func cloneURLValues(v url.Values) url.Values {
return v2 return v2
} }
func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) {
needsAuthStyleProbe := authStyle == 0 needsAuthStyleProbe := authStyle == 0
if needsAuthStyleProbe { if needsAuthStyleProbe {
if style, ok := lookupAuthStyle(tokenURL); ok { if style, ok := styleCache.lookupAuthStyle(tokenURL); ok {
authStyle = style authStyle = style
needsAuthStyleProbe = false needsAuthStyleProbe = false
} else { } else {
@ -222,7 +242,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
token, err = doTokenRoundTrip(ctx, req) token, err = doTokenRoundTrip(ctx, req)
} }
if needsAuthStyleProbe && err == nil { if needsAuthStyleProbe && err == nil {
setAuthStyle(tokenURL, authStyle) styleCache.setAuthStyle(tokenURL, authStyle)
} }
// Don't overwrite `RefreshToken` with an empty value // Don't overwrite `RefreshToken` with an empty value
// if this was a token refreshing request. // if this was a token refreshing request.

View File

@ -58,6 +58,10 @@ type Config struct {
// Scope specifies optional requested permissions. // Scope specifies optional requested permissions.
Scopes []string Scopes []string
// authStyleCache caches which auth style to use when Endpoint.AuthStyle is
// the zero value (AuthStyleAutoDetect).
authStyleCache internal.LazyAuthStyleCache
} }
// A TokenSource is anything that can return a token. // A TokenSource is anything that can return a token.

View File

@ -164,7 +164,7 @@ func tokenFromInternal(t *internal.Token) *Token {
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
// with an error.. // with an error..
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get())
if err != nil { if err != nil {
if rErr, ok := err.(*internal.RetrieveError); ok { if rErr, ok := err.(*internal.RetrieveError); ok {
return nil, (*RetrieveError)(rErr) return nil, (*RetrieveError)(rErr)

View File

@ -583,6 +583,7 @@ ccflags="$@"
$2 ~ /^PERF_/ || $2 ~ /^PERF_/ ||
$2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SECCOMP_MODE_/ ||
$2 ~ /^SEEK_/ || $2 ~ /^SEEK_/ ||
$2 ~ /^SCHED_/ ||
$2 ~ /^SPLICE_/ || $2 ~ /^SPLICE_/ ||
$2 ~ /^SYNC_FILE_RANGE_/ || $2 ~ /^SYNC_FILE_RANGE_/ ||
$2 !~ /IOC_MAGIC/ && $2 !~ /IOC_MAGIC/ &&

View File

@ -2471,6 +2471,29 @@ func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *
return pselect6(nfd, r, w, e, mutableTimeout, kernelMask) return pselect6(nfd, r, w, e, mutableTimeout, kernelMask)
} }
//sys schedSetattr(pid int, attr *SchedAttr, flags uint) (err error)
//sys schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error)
// SchedSetAttr is a wrapper for sched_setattr(2) syscall.
// https://man7.org/linux/man-pages/man2/sched_setattr.2.html
func SchedSetAttr(pid int, attr *SchedAttr, flags uint) error {
if attr == nil {
return EINVAL
}
attr.Size = SizeofSchedAttr
return schedSetattr(pid, attr, flags)
}
// SchedGetAttr is a wrapper for sched_getattr(2) syscall.
// https://man7.org/linux/man-pages/man2/sched_getattr.2.html
func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) {
attr := &SchedAttr{}
if err := schedGetattr(pid, attr, SizeofSchedAttr, flags); err != nil {
return nil, err
}
return attr, nil
}
/* /*
* Unimplemented * Unimplemented
*/ */

View File

@ -549,6 +549,9 @@ func SetNonblock(fd int, nonblocking bool) (err error) {
if err != nil { if err != nil {
return err return err
} }
if (flag&O_NONBLOCK != 0) == nonblocking {
return nil
}
if nonblocking { if nonblocking {
flag |= O_NONBLOCK flag |= O_NONBLOCK
} else { } else {

View File

@ -2821,6 +2821,23 @@ const (
RWF_SUPPORTED = 0x1f RWF_SUPPORTED = 0x1f
RWF_SYNC = 0x4 RWF_SYNC = 0x4
RWF_WRITE_LIFE_NOT_SET = 0x0 RWF_WRITE_LIFE_NOT_SET = 0x0
SCHED_BATCH = 0x3
SCHED_DEADLINE = 0x6
SCHED_FIFO = 0x1
SCHED_FLAG_ALL = 0x7f
SCHED_FLAG_DL_OVERRUN = 0x4
SCHED_FLAG_KEEP_ALL = 0x18
SCHED_FLAG_KEEP_PARAMS = 0x10
SCHED_FLAG_KEEP_POLICY = 0x8
SCHED_FLAG_RECLAIM = 0x2
SCHED_FLAG_RESET_ON_FORK = 0x1
SCHED_FLAG_UTIL_CLAMP = 0x60
SCHED_FLAG_UTIL_CLAMP_MAX = 0x40
SCHED_FLAG_UTIL_CLAMP_MIN = 0x20
SCHED_IDLE = 0x5
SCHED_NORMAL = 0x0
SCHED_RESET_ON_FORK = 0x40000000
SCHED_RR = 0x2
SCM_CREDENTIALS = 0x2 SCM_CREDENTIALS = 0x2
SCM_RIGHTS = 0x1 SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x1d SCM_TIMESTAMP = 0x1d

View File

@ -2197,3 +2197,23 @@ func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) {
RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid)))
return return
} }
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) {
_, _, e1 := Syscall(SYS_SCHED_SETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) {
_, _, e1 := Syscall6(SYS_SCHED_GETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(size), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View File

@ -5868,3 +5868,18 @@ const (
VIRTIO_NET_HDR_GSO_UDP_L4 = 0x5 VIRTIO_NET_HDR_GSO_UDP_L4 = 0x5
VIRTIO_NET_HDR_GSO_ECN = 0x80 VIRTIO_NET_HDR_GSO_ECN = 0x80
) )
type SchedAttr struct {
Size uint32
Policy uint32
Flags uint64
Nice int32
Priority uint32
Runtime uint64
Deadline uint64
Period uint64
Util_min uint32
Util_max uint32
}
const SizeofSchedAttr = 0x38

View File

@ -216,7 +216,7 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath
//sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys TerminateProcess(handle Handle, exitcode uint32) (err error)
//sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error)
//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW //sys getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW
//sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error)
//sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error)
//sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff]
@ -437,6 +437,10 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute //sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute
//sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute //sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute
// Windows Multimedia API
//sys TimeBeginPeriod (period uint32) (err error) [failretval != 0] = winmm.timeBeginPeriod
//sys TimeEndPeriod (period uint32) (err error) [failretval != 0] = winmm.timeEndPeriod
// syscall interface implementation for other packages // syscall interface implementation for other packages
// GetCurrentProcess returns the handle for the current process. // GetCurrentProcess returns the handle for the current process.
@ -1624,6 +1628,11 @@ func SetConsoleCursorPosition(console Handle, position Coord) error {
return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position))))
} }
func GetStartupInfo(startupInfo *StartupInfo) error {
getStartupInfo(startupInfo)
return nil
}
func (s NTStatus) Errno() syscall.Errno { func (s NTStatus) Errno() syscall.Errno {
return rtlNtStatusToDosErrorNoTeb(s) return rtlNtStatusToDosErrorNoTeb(s)
} }

View File

@ -55,6 +55,7 @@ var (
moduser32 = NewLazySystemDLL("user32.dll") moduser32 = NewLazySystemDLL("user32.dll")
moduserenv = NewLazySystemDLL("userenv.dll") moduserenv = NewLazySystemDLL("userenv.dll")
modversion = NewLazySystemDLL("version.dll") modversion = NewLazySystemDLL("version.dll")
modwinmm = NewLazySystemDLL("winmm.dll")
modwintrust = NewLazySystemDLL("wintrust.dll") modwintrust = NewLazySystemDLL("wintrust.dll")
modws2_32 = NewLazySystemDLL("ws2_32.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll")
modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll")
@ -468,6 +469,8 @@ var (
procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW")
procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW")
procVerQueryValueW = modversion.NewProc("VerQueryValueW") procVerQueryValueW = modversion.NewProc("VerQueryValueW")
proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod")
proctimeEndPeriod = modwinmm.NewProc("timeEndPeriod")
procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx")
procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW")
procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW")
@ -2367,11 +2370,8 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin
return return
} }
func GetStartupInfo(startupInfo *StartupInfo) (err error) { func getStartupInfo(startupInfo *StartupInfo) {
r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0)
if r1 == 0 {
err = errnoErr(e1)
}
return return
} }
@ -4017,6 +4017,22 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint
return return
} }
func TimeBeginPeriod(period uint32) (err error) {
r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0)
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func TimeEndPeriod(period uint32) (err error) {
r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0)
if r1 != 0 {
err = errnoErr(e1)
}
return
}
func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) {
r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
if r0 != 0 { if r0 != 0 {

View File

@ -29,7 +29,7 @@ var (
nfkcData = newNfkcTrie(0) nfkcData = newNfkcTrie(0)
) )
// lookupValue determines the type of block n and looks up the value for b. // lookup determines the type of block n and looks up the value for b.
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block // For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
// is a list of ranges with an accompanying value. Given a matching range r, // is a list of ranges with an accompanying value. Given a matching range r,
// the value for b is by r.value + (b - r.lo) * stride. // the value for b is by r.value + (b - r.lo) * stride.

View File

@ -93,6 +93,7 @@ var stdlib = map[string][]string{
"Compare", "Compare",
"Contains", "Contains",
"ContainsAny", "ContainsAny",
"ContainsFunc",
"ContainsRune", "ContainsRune",
"Count", "Count",
"Cut", "Cut",
@ -147,6 +148,11 @@ var stdlib = map[string][]string{
"TrimSpace", "TrimSpace",
"TrimSuffix", "TrimSuffix",
}, },
"cmp": {
"Compare",
"Less",
"Ordered",
},
"compress/bzip2": { "compress/bzip2": {
"NewReader", "NewReader",
"StructuralError", "StructuralError",
@ -228,6 +234,7 @@ var stdlib = map[string][]string{
"Ring", "Ring",
}, },
"context": { "context": {
"AfterFunc",
"Background", "Background",
"CancelCauseFunc", "CancelCauseFunc",
"CancelFunc", "CancelFunc",
@ -239,8 +246,11 @@ var stdlib = map[string][]string{
"WithCancel", "WithCancel",
"WithCancelCause", "WithCancelCause",
"WithDeadline", "WithDeadline",
"WithDeadlineCause",
"WithTimeout", "WithTimeout",
"WithTimeoutCause",
"WithValue", "WithValue",
"WithoutCancel",
}, },
"crypto": { "crypto": {
"BLAKE2b_256", "BLAKE2b_256",
@ -445,6 +455,7 @@ var stdlib = map[string][]string{
"XORBytes", "XORBytes",
}, },
"crypto/tls": { "crypto/tls": {
"AlertError",
"Certificate", "Certificate",
"CertificateRequestInfo", "CertificateRequestInfo",
"CertificateVerificationError", "CertificateVerificationError",
@ -476,6 +487,7 @@ var stdlib = map[string][]string{
"LoadX509KeyPair", "LoadX509KeyPair",
"NewLRUClientSessionCache", "NewLRUClientSessionCache",
"NewListener", "NewListener",
"NewResumptionState",
"NoClientCert", "NoClientCert",
"PKCS1WithSHA1", "PKCS1WithSHA1",
"PKCS1WithSHA256", "PKCS1WithSHA256",
@ -484,6 +496,27 @@ var stdlib = map[string][]string{
"PSSWithSHA256", "PSSWithSHA256",
"PSSWithSHA384", "PSSWithSHA384",
"PSSWithSHA512", "PSSWithSHA512",
"ParseSessionState",
"QUICClient",
"QUICConfig",
"QUICConn",
"QUICEncryptionLevel",
"QUICEncryptionLevelApplication",
"QUICEncryptionLevelEarly",
"QUICEncryptionLevelHandshake",
"QUICEncryptionLevelInitial",
"QUICEvent",
"QUICEventKind",
"QUICHandshakeDone",
"QUICNoEvent",
"QUICRejectedEarlyData",
"QUICServer",
"QUICSessionTicketOptions",
"QUICSetReadSecret",
"QUICSetWriteSecret",
"QUICTransportParameters",
"QUICTransportParametersRequired",
"QUICWriteData",
"RecordHeaderError", "RecordHeaderError",
"RenegotiateFreelyAsClient", "RenegotiateFreelyAsClient",
"RenegotiateNever", "RenegotiateNever",
@ -493,6 +526,7 @@ var stdlib = map[string][]string{
"RequireAndVerifyClientCert", "RequireAndVerifyClientCert",
"RequireAnyClientCert", "RequireAnyClientCert",
"Server", "Server",
"SessionState",
"SignatureScheme", "SignatureScheme",
"TLS_AES_128_GCM_SHA256", "TLS_AES_128_GCM_SHA256",
"TLS_AES_256_GCM_SHA384", "TLS_AES_256_GCM_SHA384",
@ -523,6 +557,7 @@ var stdlib = map[string][]string{
"TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_RC4_128_SHA", "TLS_RSA_WITH_RC4_128_SHA",
"VerifyClientCertIfGiven", "VerifyClientCertIfGiven",
"VersionName",
"VersionSSL30", "VersionSSL30",
"VersionTLS10", "VersionTLS10",
"VersionTLS11", "VersionTLS11",
@ -618,6 +653,7 @@ var stdlib = map[string][]string{
"PureEd25519", "PureEd25519",
"RSA", "RSA",
"RevocationList", "RevocationList",
"RevocationListEntry",
"SHA1WithRSA", "SHA1WithRSA",
"SHA256WithRSA", "SHA256WithRSA",
"SHA256WithRSAPSS", "SHA256WithRSAPSS",
@ -1002,10 +1038,42 @@ var stdlib = map[string][]string{
"COMPRESS_LOOS", "COMPRESS_LOOS",
"COMPRESS_LOPROC", "COMPRESS_LOPROC",
"COMPRESS_ZLIB", "COMPRESS_ZLIB",
"COMPRESS_ZSTD",
"Chdr32", "Chdr32",
"Chdr64", "Chdr64",
"Class", "Class",
"CompressionType", "CompressionType",
"DF_1_CONFALT",
"DF_1_DIRECT",
"DF_1_DISPRELDNE",
"DF_1_DISPRELPND",
"DF_1_EDITED",
"DF_1_ENDFILTEE",
"DF_1_GLOBAL",
"DF_1_GLOBAUDIT",
"DF_1_GROUP",
"DF_1_IGNMULDEF",
"DF_1_INITFIRST",
"DF_1_INTERPOSE",
"DF_1_KMOD",
"DF_1_LOADFLTR",
"DF_1_NOCOMMON",
"DF_1_NODEFLIB",
"DF_1_NODELETE",
"DF_1_NODIRECT",
"DF_1_NODUMP",
"DF_1_NOHDR",
"DF_1_NOKSYMS",
"DF_1_NOOPEN",
"DF_1_NORELOC",
"DF_1_NOW",
"DF_1_ORIGIN",
"DF_1_PIE",
"DF_1_SINGLETON",
"DF_1_STUB",
"DF_1_SYMINTPOSE",
"DF_1_TRANS",
"DF_1_WEAKFILTER",
"DF_BIND_NOW", "DF_BIND_NOW",
"DF_ORIGIN", "DF_ORIGIN",
"DF_STATIC_TLS", "DF_STATIC_TLS",
@ -1144,6 +1212,7 @@ var stdlib = map[string][]string{
"Dyn32", "Dyn32",
"Dyn64", "Dyn64",
"DynFlag", "DynFlag",
"DynFlag1",
"DynTag", "DynTag",
"EI_ABIVERSION", "EI_ABIVERSION",
"EI_CLASS", "EI_CLASS",
@ -2111,6 +2180,7 @@ var stdlib = map[string][]string{
"R_PPC64_REL16_LO", "R_PPC64_REL16_LO",
"R_PPC64_REL24", "R_PPC64_REL24",
"R_PPC64_REL24_NOTOC", "R_PPC64_REL24_NOTOC",
"R_PPC64_REL24_P9NOTOC",
"R_PPC64_REL30", "R_PPC64_REL30",
"R_PPC64_REL32", "R_PPC64_REL32",
"R_PPC64_REL64", "R_PPC64_REL64",
@ -2848,6 +2918,7 @@ var stdlib = map[string][]string{
"MaxVarintLen16", "MaxVarintLen16",
"MaxVarintLen32", "MaxVarintLen32",
"MaxVarintLen64", "MaxVarintLen64",
"NativeEndian",
"PutUvarint", "PutUvarint",
"PutVarint", "PutVarint",
"Read", "Read",
@ -2963,6 +3034,7 @@ var stdlib = map[string][]string{
}, },
"errors": { "errors": {
"As", "As",
"ErrUnsupported",
"Is", "Is",
"Join", "Join",
"New", "New",
@ -2989,6 +3061,7 @@ var stdlib = map[string][]string{
"Arg", "Arg",
"Args", "Args",
"Bool", "Bool",
"BoolFunc",
"BoolVar", "BoolVar",
"CommandLine", "CommandLine",
"ContinueOnError", "ContinueOnError",
@ -3119,6 +3192,7 @@ var stdlib = map[string][]string{
"Inspect", "Inspect",
"InterfaceType", "InterfaceType",
"IsExported", "IsExported",
"IsGenerated",
"KeyValueExpr", "KeyValueExpr",
"LabeledStmt", "LabeledStmt",
"Lbl", "Lbl",
@ -3169,6 +3243,7 @@ var stdlib = map[string][]string{
"ArchChar", "ArchChar",
"Context", "Context",
"Default", "Default",
"Directive",
"FindOnly", "FindOnly",
"IgnoreVendor", "IgnoreVendor",
"Import", "Import",
@ -3184,6 +3259,7 @@ var stdlib = map[string][]string{
"go/build/constraint": { "go/build/constraint": {
"AndExpr", "AndExpr",
"Expr", "Expr",
"GoVersion",
"IsGoBuild", "IsGoBuild",
"IsPlusBuild", "IsPlusBuild",
"NotExpr", "NotExpr",
@ -3626,6 +3702,7 @@ var stdlib = map[string][]string{
"ErrBadHTML", "ErrBadHTML",
"ErrBranchEnd", "ErrBranchEnd",
"ErrEndContext", "ErrEndContext",
"ErrJSTemplate",
"ErrNoSuchTemplate", "ErrNoSuchTemplate",
"ErrOutputContext", "ErrOutputContext",
"ErrPartialCharset", "ErrPartialCharset",
@ -3870,6 +3947,8 @@ var stdlib = map[string][]string{
"FileInfo", "FileInfo",
"FileInfoToDirEntry", "FileInfoToDirEntry",
"FileMode", "FileMode",
"FormatDirEntry",
"FormatFileInfo",
"Glob", "Glob",
"GlobFS", "GlobFS",
"ModeAppend", "ModeAppend",
@ -3942,6 +4021,78 @@ var stdlib = map[string][]string{
"SetPrefix", "SetPrefix",
"Writer", "Writer",
}, },
"log/slog": {
"Any",
"AnyValue",
"Attr",
"Bool",
"BoolValue",
"Debug",
"DebugContext",
"Default",
"Duration",
"DurationValue",
"Error",
"ErrorContext",
"Float64",
"Float64Value",
"Group",
"GroupValue",
"Handler",
"HandlerOptions",
"Info",
"InfoContext",
"Int",
"Int64",
"Int64Value",
"IntValue",
"JSONHandler",
"Kind",
"KindAny",
"KindBool",
"KindDuration",
"KindFloat64",
"KindGroup",
"KindInt64",
"KindLogValuer",
"KindString",
"KindTime",
"KindUint64",
"Level",
"LevelDebug",
"LevelError",
"LevelInfo",
"LevelKey",
"LevelVar",
"LevelWarn",
"Leveler",
"Log",
"LogAttrs",
"LogValuer",
"Logger",
"MessageKey",
"New",
"NewJSONHandler",
"NewLogLogger",
"NewRecord",
"NewTextHandler",
"Record",
"SetDefault",
"Source",
"SourceKey",
"String",
"StringValue",
"TextHandler",
"Time",
"TimeKey",
"TimeValue",
"Uint64",
"Uint64Value",
"Value",
"Warn",
"WarnContext",
"With",
},
"log/syslog": { "log/syslog": {
"Dial", "Dial",
"LOG_ALERT", "LOG_ALERT",
@ -3977,6 +4128,13 @@ var stdlib = map[string][]string{
"Priority", "Priority",
"Writer", "Writer",
}, },
"maps": {
"Clone",
"Copy",
"DeleteFunc",
"Equal",
"EqualFunc",
},
"math": { "math": {
"Abs", "Abs",
"Acos", "Acos",
@ -4371,6 +4529,7 @@ var stdlib = map[string][]string{
"ErrNoLocation", "ErrNoLocation",
"ErrNotMultipart", "ErrNotMultipart",
"ErrNotSupported", "ErrNotSupported",
"ErrSchemeMismatch",
"ErrServerClosed", "ErrServerClosed",
"ErrShortBody", "ErrShortBody",
"ErrSkipAltProtocol", "ErrSkipAltProtocol",
@ -5084,6 +5243,8 @@ var stdlib = map[string][]string{
"NumCPU", "NumCPU",
"NumCgoCall", "NumCgoCall",
"NumGoroutine", "NumGoroutine",
"PanicNilError",
"Pinner",
"ReadMemStats", "ReadMemStats",
"ReadTrace", "ReadTrace",
"SetBlockProfileRate", "SetBlockProfileRate",
@ -5172,6 +5333,37 @@ var stdlib = map[string][]string{
"Task", "Task",
"WithRegion", "WithRegion",
}, },
"slices": {
"BinarySearch",
"BinarySearchFunc",
"Clip",
"Clone",
"Compact",
"CompactFunc",
"Compare",
"CompareFunc",
"Contains",
"ContainsFunc",
"Delete",
"DeleteFunc",
"Equal",
"EqualFunc",
"Grow",
"Index",
"IndexFunc",
"Insert",
"IsSorted",
"IsSortedFunc",
"Max",
"MaxFunc",
"Min",
"MinFunc",
"Replace",
"Reverse",
"Sort",
"SortFunc",
"SortStableFunc",
},
"sort": { "sort": {
"Find", "Find",
"Float64Slice", "Float64Slice",
@ -5242,6 +5434,7 @@ var stdlib = map[string][]string{
"Compare", "Compare",
"Contains", "Contains",
"ContainsAny", "ContainsAny",
"ContainsFunc",
"ContainsRune", "ContainsRune",
"Count", "Count",
"Cut", "Cut",
@ -5299,6 +5492,9 @@ var stdlib = map[string][]string{
"Mutex", "Mutex",
"NewCond", "NewCond",
"Once", "Once",
"OnceFunc",
"OnceValue",
"OnceValues",
"Pool", "Pool",
"RWMutex", "RWMutex",
"WaitGroup", "WaitGroup",
@ -9135,10 +9331,12 @@ var stdlib = map[string][]string{
"SYS_AIO_CANCEL", "SYS_AIO_CANCEL",
"SYS_AIO_ERROR", "SYS_AIO_ERROR",
"SYS_AIO_FSYNC", "SYS_AIO_FSYNC",
"SYS_AIO_MLOCK",
"SYS_AIO_READ", "SYS_AIO_READ",
"SYS_AIO_RETURN", "SYS_AIO_RETURN",
"SYS_AIO_SUSPEND", "SYS_AIO_SUSPEND",
"SYS_AIO_SUSPEND_NOCANCEL", "SYS_AIO_SUSPEND_NOCANCEL",
"SYS_AIO_WAITCOMPLETE",
"SYS_AIO_WRITE", "SYS_AIO_WRITE",
"SYS_ALARM", "SYS_ALARM",
"SYS_ARCH_PRCTL", "SYS_ARCH_PRCTL",
@ -9368,6 +9566,7 @@ var stdlib = map[string][]string{
"SYS_GET_MEMPOLICY", "SYS_GET_MEMPOLICY",
"SYS_GET_ROBUST_LIST", "SYS_GET_ROBUST_LIST",
"SYS_GET_THREAD_AREA", "SYS_GET_THREAD_AREA",
"SYS_GSSD_SYSCALL",
"SYS_GTTY", "SYS_GTTY",
"SYS_IDENTITYSVC", "SYS_IDENTITYSVC",
"SYS_IDLE", "SYS_IDLE",
@ -9411,8 +9610,24 @@ var stdlib = map[string][]string{
"SYS_KLDSYM", "SYS_KLDSYM",
"SYS_KLDUNLOAD", "SYS_KLDUNLOAD",
"SYS_KLDUNLOADF", "SYS_KLDUNLOADF",
"SYS_KMQ_NOTIFY",
"SYS_KMQ_OPEN",
"SYS_KMQ_SETATTR",
"SYS_KMQ_TIMEDRECEIVE",
"SYS_KMQ_TIMEDSEND",
"SYS_KMQ_UNLINK",
"SYS_KQUEUE", "SYS_KQUEUE",
"SYS_KQUEUE1", "SYS_KQUEUE1",
"SYS_KSEM_CLOSE",
"SYS_KSEM_DESTROY",
"SYS_KSEM_GETVALUE",
"SYS_KSEM_INIT",
"SYS_KSEM_OPEN",
"SYS_KSEM_POST",
"SYS_KSEM_TIMEDWAIT",
"SYS_KSEM_TRYWAIT",
"SYS_KSEM_UNLINK",
"SYS_KSEM_WAIT",
"SYS_KTIMER_CREATE", "SYS_KTIMER_CREATE",
"SYS_KTIMER_DELETE", "SYS_KTIMER_DELETE",
"SYS_KTIMER_GETOVERRUN", "SYS_KTIMER_GETOVERRUN",
@ -9504,11 +9719,14 @@ var stdlib = map[string][]string{
"SYS_NFSSVC", "SYS_NFSSVC",
"SYS_NFSTAT", "SYS_NFSTAT",
"SYS_NICE", "SYS_NICE",
"SYS_NLM_SYSCALL",
"SYS_NLSTAT", "SYS_NLSTAT",
"SYS_NMOUNT", "SYS_NMOUNT",
"SYS_NSTAT", "SYS_NSTAT",
"SYS_NTP_ADJTIME", "SYS_NTP_ADJTIME",
"SYS_NTP_GETTIME", "SYS_NTP_GETTIME",
"SYS_NUMA_GETAFFINITY",
"SYS_NUMA_SETAFFINITY",
"SYS_OABI_SYSCALL_BASE", "SYS_OABI_SYSCALL_BASE",
"SYS_OBREAK", "SYS_OBREAK",
"SYS_OLDFSTAT", "SYS_OLDFSTAT",
@ -9891,6 +10109,7 @@ var stdlib = map[string][]string{
"SYS___ACL_SET_FD", "SYS___ACL_SET_FD",
"SYS___ACL_SET_FILE", "SYS___ACL_SET_FILE",
"SYS___ACL_SET_LINK", "SYS___ACL_SET_LINK",
"SYS___CAP_RIGHTS_GET",
"SYS___CLONE", "SYS___CLONE",
"SYS___DISABLE_THREADSIGNAL", "SYS___DISABLE_THREADSIGNAL",
"SYS___GETCWD", "SYS___GETCWD",
@ -10574,6 +10793,7 @@ var stdlib = map[string][]string{
"Short", "Short",
"T", "T",
"TB", "TB",
"Testing",
"Verbose", "Verbose",
}, },
"testing/fstest": { "testing/fstest": {
@ -10603,6 +10823,9 @@ var stdlib = map[string][]string{
"SetupError", "SetupError",
"Value", "Value",
}, },
"testing/slogtest": {
"TestHandler",
},
"text/scanner": { "text/scanner": {
"Char", "Char",
"Comment", "Comment",
@ -10826,6 +11049,7 @@ var stdlib = map[string][]string{
"Cs", "Cs",
"Cuneiform", "Cuneiform",
"Cypriot", "Cypriot",
"Cypro_Minoan",
"Cyrillic", "Cyrillic",
"Dash", "Dash",
"Deprecated", "Deprecated",
@ -10889,6 +11113,7 @@ var stdlib = map[string][]string{
"Kaithi", "Kaithi",
"Kannada", "Kannada",
"Katakana", "Katakana",
"Kawi",
"Kayah_Li", "Kayah_Li",
"Kharoshthi", "Kharoshthi",
"Khitan_Small_Script", "Khitan_Small_Script",
@ -10943,6 +11168,7 @@ var stdlib = map[string][]string{
"Myanmar", "Myanmar",
"N", "N",
"Nabataean", "Nabataean",
"Nag_Mundari",
"Nandinagari", "Nandinagari",
"Nd", "Nd",
"New_Tai_Lue", "New_Tai_Lue",
@ -10964,6 +11190,7 @@ var stdlib = map[string][]string{
"Old_Sogdian", "Old_Sogdian",
"Old_South_Arabian", "Old_South_Arabian",
"Old_Turkic", "Old_Turkic",
"Old_Uyghur",
"Oriya", "Oriya",
"Osage", "Osage",
"Osmanya", "Osmanya",
@ -11038,6 +11265,7 @@ var stdlib = map[string][]string{
"Tai_Viet", "Tai_Viet",
"Takri", "Takri",
"Tamil", "Tamil",
"Tangsa",
"Tangut", "Tangut",
"Telugu", "Telugu",
"Terminal_Punctuation", "Terminal_Punctuation",
@ -11052,6 +11280,7 @@ var stdlib = map[string][]string{
"ToLower", "ToLower",
"ToTitle", "ToTitle",
"ToUpper", "ToUpper",
"Toto",
"TurkishCase", "TurkishCase",
"Ugaritic", "Ugaritic",
"Unified_Ideograph", "Unified_Ideograph",
@ -11061,6 +11290,7 @@ var stdlib = map[string][]string{
"Vai", "Vai",
"Variation_Selector", "Variation_Selector",
"Version", "Version",
"Vithkuqi",
"Wancho", "Wancho",
"Warang_Citi", "Warang_Citi",
"White_Space", "White_Space",

View File

@ -30,7 +30,7 @@ func (xl termlist) String() string {
var buf bytes.Buffer var buf bytes.Buffer
for i, x := range xl { for i, x := range xl {
if i > 0 { if i > 0 {
buf.WriteString(" ") buf.WriteString(" | ")
} }
buf.WriteString(x.String()) buf.WriteString(x.String())
} }

View File

@ -14,7 +14,6 @@ import "go/types"
// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) // 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
// T: &term{false, T} == {T} // set of type T // T: &term{false, T} == {T} // set of type T
// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t // ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
//
type term struct { type term struct {
tilde bool // valid if typ != nil tilde bool // valid if typ != nil
typ types.Type typ types.Type

View File

@ -1,6 +1,7 @@
package jsonpatch package jsonpatch
import ( import (
"bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"reflect" "reflect"
@ -64,6 +65,9 @@ func NewOperation(op, path string, value interface{}) Operation {
// //
// An error will be returned if any of the two documents are invalid. // An error will be returned if any of the two documents are invalid.
func CreatePatch(a, b []byte) ([]Operation, error) { func CreatePatch(a, b []byte) ([]Operation, error) {
if bytes.Equal(a, b) {
return []Operation{}, nil
}
var aI interface{} var aI interface{}
var bI interface{} var bI interface{}
err := json.Unmarshal(a, &aI) err := json.Unmarshal(a, &aI)

View File

@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the
## Installation ## Installation
With [Go module][] support (Go 1.11+), simply add the following import Simply add the following import to your code, and then `go [build|run|test]`
will automatically fetch the necessary dependencies:
```go ```go
import "google.golang.org/grpc" import "google.golang.org/grpc"
``` ```
to your code, and then `go [build|run|test]` will automatically fetch the
necessary dependencies.
Otherwise, to install the `grpc-go` package, run the following command:
```console
$ go get -u google.golang.org/grpc
```
> **Note:** If you are trying to access `grpc-go` from **China**, see the > **Note:** If you are trying to access `grpc-go` from **China**, see the
> [FAQ](#FAQ) below. > [FAQ](#FAQ) below.
@ -56,15 +49,6 @@ To build Go code, there are several options:
- Set up a VPN and access google.golang.org through that. - Set up a VPN and access google.golang.org through that.
- Without Go module support: `git clone` the repo manually:
```sh
git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
```
You will need to do the same for all of grpc's dependencies in `golang.org`,
e.g. `golang.org/x/net`.
- With Go module support: it is possible to use the `replace` feature of `go - With Go module support: it is possible to use the `replace` feature of `go
mod` to create aliases for golang.org packages. In your project's directory: mod` to create aliases for golang.org packages. In your project's directory:
@ -76,33 +60,13 @@ To build Go code, there are several options:
``` ```
Again, this will need to be done for all transitive dependencies hosted on Again, this will need to be done for all transitive dependencies hosted on
golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). golang.org as well. For details, refer to [golang/go issue
#28652](https://github.com/golang/go/issues/28652).
### Compiling error, undefined: grpc.SupportPackageIsVersion ### Compiling error, undefined: grpc.SupportPackageIsVersion
#### If you are using Go modules: Please update to the latest version of gRPC-Go using
`go get google.golang.org/grpc`.
Ensure your gRPC-Go version is `require`d at the appropriate version in
the same module containing the generated `.pb.go` files. For example,
`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
```go
module <your module name>
require (
google.golang.org/grpc v1.27.0
)
```
#### If you are *not* using Go modules:
Update the `proto` package, gRPC package, and rebuild the `.proto` files:
```sh
go get -u github.com/golang/protobuf/{proto,protoc-gen-go}
go get -u google.golang.org/grpc
protoc --go_out=plugins=grpc:. *.proto
```
### How to turn on logging ### How to turn on logging
@ -121,9 +85,11 @@ possible reasons, including:
1. mis-configured transport credentials, connection failed on handshaking 1. mis-configured transport credentials, connection failed on handshaking
1. bytes disrupted, possibly by a proxy in between 1. bytes disrupted, possibly by a proxy in between
1. server shutdown 1. server shutdown
1. Keepalive parameters caused connection shutdown, for example if you have configured 1. Keepalive parameters caused connection shutdown, for example if you have
your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). configured your server to terminate connections regularly to [trigger DNS
If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
If this is the case, you may want to increase your
[MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
to allow longer RPC calls to finish. to allow longer RPC calls to finish.
It can be tricky to debug this because the error happens on the client side but It can be tricky to debug this because the error happens on the client side but

View File

@ -34,26 +34,26 @@ import (
// key/value pairs. Keys must be hashable, and users should define their own // key/value pairs. Keys must be hashable, and users should define their own
// types for keys. Values should not be modified after they are added to an // types for keys. Values should not be modified after they are added to an
// Attributes or if they were received from one. If values implement 'Equal(o // Attributes or if they were received from one. If values implement 'Equal(o
// interface{}) bool', it will be called by (*Attributes).Equal to determine // any) bool', it will be called by (*Attributes).Equal to determine whether
// whether two values with the same key should be considered equal. // two values with the same key should be considered equal.
type Attributes struct { type Attributes struct {
m map[interface{}]interface{} m map[any]any
} }
// New returns a new Attributes containing the key/value pair. // New returns a new Attributes containing the key/value pair.
func New(key, value interface{}) *Attributes { func New(key, value any) *Attributes {
return &Attributes{m: map[interface{}]interface{}{key: value}} return &Attributes{m: map[any]any{key: value}}
} }
// WithValue returns a new Attributes containing the previous keys and values // WithValue returns a new Attributes containing the previous keys and values
// and the new key/value pair. If the same key appears multiple times, the // and the new key/value pair. If the same key appears multiple times, the
// last value overwrites all previous values for that key. To remove an // last value overwrites all previous values for that key. To remove an
// existing key, use a nil value. value should not be modified later. // existing key, use a nil value. value should not be modified later.
func (a *Attributes) WithValue(key, value interface{}) *Attributes { func (a *Attributes) WithValue(key, value any) *Attributes {
if a == nil { if a == nil {
return New(key, value) return New(key, value)
} }
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} n := &Attributes{m: make(map[any]any, len(a.m)+1)}
for k, v := range a.m { for k, v := range a.m {
n.m[k] = v n.m[k] = v
} }
@ -63,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes {
// Value returns the value associated with these attributes for key, or nil if // Value returns the value associated with these attributes for key, or nil if
// no value is associated with key. The returned value should not be modified. // no value is associated with key. The returned value should not be modified.
func (a *Attributes) Value(key interface{}) interface{} { func (a *Attributes) Value(key any) any {
if a == nil { if a == nil {
return nil return nil
} }
return a.m[key] return a.m[key]
} }
// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) // Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is
// bool' is implemented for a value in the attributes, it is called to // implemented for a value in the attributes, it is called to determine if the
// determine if the value matches the one stored in the other attributes. If // value matches the one stored in the other attributes. If Equal is not
// Equal is not implemented, standard equality is used to determine if the two // implemented, standard equality is used to determine if the two values are
// values are equal. Note that some types (e.g. maps) aren't comparable by // equal. Note that some types (e.g. maps) aren't comparable by default, so
// default, so they must be wrapped in a struct, or in an alias type, with Equal // they must be wrapped in a struct, or in an alias type, with Equal defined.
// defined.
func (a *Attributes) Equal(o *Attributes) bool { func (a *Attributes) Equal(o *Attributes) bool {
if a == nil && o == nil { if a == nil && o == nil {
return true return true
@ -93,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool {
// o missing element of a // o missing element of a
return false return false
} }
if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { if eq, ok := v.(interface{ Equal(o any) bool }); ok {
if !eq.Equal(ov) { if !eq.Equal(ov) {
return false return false
} }
@ -122,7 +121,7 @@ func (a *Attributes) String() string {
return sb.String() return sb.String()
} }
func str(x interface{}) string { func str(x any) string {
if v, ok := x.(fmt.Stringer); ok { if v, ok := x.(fmt.Stringer); ok {
return v.String() return v.String()
} else if v, ok := x.(string); ok { } else if v, ok := x.(string); ok {

View File

@ -105,8 +105,8 @@ type SubConn interface {
// //
// This will trigger a state transition for the SubConn. // This will trigger a state transition for the SubConn.
// //
// Deprecated: This method is now part of the ClientConn interface and will // Deprecated: this method will be removed. Create new SubConns for new
// eventually be removed from here. // addresses instead.
UpdateAddresses([]resolver.Address) UpdateAddresses([]resolver.Address)
// Connect starts the connecting for this SubConn. // Connect starts the connecting for this SubConn.
Connect() Connect()
@ -115,6 +115,13 @@ type SubConn interface {
// creates a new one and returns it. Returns a close function which must // creates a new one and returns it. Returns a close function which must
// be called when the Producer is no longer needed. // be called when the Producer is no longer needed.
GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
// Shutdown shuts down the SubConn gracefully. Any started RPCs will be
// allowed to complete. No future calls should be made on the SubConn.
// One final state update will be delivered to the StateListener (or
// UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
// indicate the shutdown operation. This may be delivered before
// in-progress RPCs are complete and the actual connection is closed.
Shutdown()
} }
// NewSubConnOptions contains options to create new SubConn. // NewSubConnOptions contains options to create new SubConn.
@ -129,6 +136,11 @@ type NewSubConnOptions struct {
// HealthCheckEnabled indicates whether health check service should be // HealthCheckEnabled indicates whether health check service should be
// enabled on this SubConn // enabled on this SubConn
HealthCheckEnabled bool HealthCheckEnabled bool
// StateListener is called when the state of the subconn changes. If nil,
// Balancer.UpdateSubConnState will be called instead. Will never be
// invoked until after Connect() is called on the SubConn created with
// these options.
StateListener func(SubConnState)
} }
// State contains the balancer's state relevant to the gRPC ClientConn. // State contains the balancer's state relevant to the gRPC ClientConn.
@ -150,16 +162,24 @@ type ClientConn interface {
// NewSubConn is called by balancer to create a new SubConn. // NewSubConn is called by balancer to create a new SubConn.
// It doesn't block and wait for the connections to be established. // It doesn't block and wait for the connections to be established.
// Behaviors of the SubConn can be controlled by options. // Behaviors of the SubConn can be controlled by options.
//
// Deprecated: please be aware that in a future version, SubConns will only
// support one address per SubConn.
NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
// RemoveSubConn removes the SubConn from ClientConn. // RemoveSubConn removes the SubConn from ClientConn.
// The SubConn will be shutdown. // The SubConn will be shutdown.
//
// Deprecated: use SubConn.Shutdown instead.
RemoveSubConn(SubConn) RemoveSubConn(SubConn)
// UpdateAddresses updates the addresses used in the passed in SubConn. // UpdateAddresses updates the addresses used in the passed in SubConn.
// gRPC checks if the currently connected address is still in the new list. // gRPC checks if the currently connected address is still in the new list.
// If so, the connection will be kept. Else, the connection will be // If so, the connection will be kept. Else, the connection will be
// gracefully closed, and a new connection will be created. // gracefully closed, and a new connection will be created.
// //
// This will trigger a state transition for the SubConn. // This may trigger a state transition for the SubConn.
//
// Deprecated: this method will be removed. Create new SubConns for new
// addresses instead.
UpdateAddresses(SubConn, []resolver.Address) UpdateAddresses(SubConn, []resolver.Address)
// UpdateState notifies gRPC that the balancer's internal state has // UpdateState notifies gRPC that the balancer's internal state has
@ -250,7 +270,7 @@ type DoneInfo struct {
// trailing metadata. // trailing metadata.
// //
// The only supported type now is *orca_v3.LoadReport. // The only supported type now is *orca_v3.LoadReport.
ServerLoad interface{} ServerLoad any
} }
var ( var (
@ -343,9 +363,13 @@ type Balancer interface {
ResolverError(error) ResolverError(error)
// UpdateSubConnState is called by gRPC when the state of a SubConn // UpdateSubConnState is called by gRPC when the state of a SubConn
// changes. // changes.
//
// Deprecated: Use NewSubConnOptions.StateListener when creating the
// SubConn instead.
UpdateSubConnState(SubConn, SubConnState) UpdateSubConnState(SubConn, SubConnState)
// Close closes the balancer. The balancer is not required to call // Close closes the balancer. The balancer is not currently required to
// ClientConn.RemoveSubConn for its existing SubConns. // call SubConn.Shutdown for its existing SubConns; however, this will be
// required in a future release, so it is recommended.
Close() Close()
} }
@ -390,15 +414,14 @@ var ErrBadResolverState = errors.New("bad resolver state")
type ProducerBuilder interface { type ProducerBuilder interface {
// Build creates a Producer. The first parameter is always a // Build creates a Producer. The first parameter is always a
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
// associated SubConn), but is declared as interface{} to avoid a // associated SubConn), but is declared as `any` to avoid a dependency
// dependency cycle. Should also return a close function that will be // cycle. Should also return a close function that will be called when all
// called when all references to the Producer have been given up. // references to the Producer have been given up.
Build(grpcClientConnInterface interface{}) (p Producer, close func()) Build(grpcClientConnInterface any) (p Producer, close func())
} }
// A Producer is a type shared among potentially many consumers. It is // A Producer is a type shared among potentially many consumers. It is
// associated with a SubConn, and an implementation will typically contain // associated with a SubConn, and an implementation will typically contain
// other methods to provide additional functionality, e.g. configuration or // other methods to provide additional functionality, e.g. configuration or
// subscription registration. // subscription registration.
type Producer interface { type Producer any
}

View File

@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
addrsSet.Set(a, nil) addrsSet.Set(a, nil)
if _, ok := b.subConns.Get(a); !ok { if _, ok := b.subConns.Get(a); !ok {
// a is a new address (not existing in b.subConns). // a is a new address (not existing in b.subConns).
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) var sc balancer.SubConn
opts := balancer.NewSubConnOptions{
HealthCheckEnabled: b.config.HealthCheck,
StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) },
}
sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts)
if err != nil { if err != nil {
logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
continue continue
@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
sc := sci.(balancer.SubConn) sc := sci.(balancer.SubConn)
// a was removed by resolver. // a was removed by resolver.
if _, ok := addrsSet.Get(a); !ok { if _, ok := addrsSet.Get(a); !ok {
b.cc.RemoveSubConn(sc) sc.Shutdown()
b.subConns.Delete(a) b.subConns.Delete(a)
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
// The entry will be deleted in UpdateSubConnState. // The entry will be deleted in updateSubConnState.
} }
} }
// If resolver state contains no addresses, return an error so ClientConn // If resolver state contains no addresses, return an error so ClientConn
@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() {
b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
} }
// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn.
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
}
func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
s := state.ConnectivityState s := state.ConnectivityState
if logger.V(2) { if logger.V(2) {
logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
case connectivity.Idle: case connectivity.Idle:
sc.Connect() sc.Connect()
case connectivity.Shutdown: case connectivity.Shutdown:
// When an address was removed by resolver, b called RemoveSubConn but // When an address was removed by resolver, b called Shutdown but kept
// kept the sc's state in scStates. Remove state for this sc here. // the sc's state in scStates. Remove state for this sc here.
delete(b.scStates, sc) delete(b.scStates, sc)
case connectivity.TransientFailure: case connectivity.TransientFailure:
// Save error to be reported via picker. // Save error to be reported via picker.
@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
} }
// Close is a nop because base balancer doesn't have internal state to clean up, // Close is a nop because base balancer doesn't have internal state to clean up,
// and it doesn't need to call RemoveSubConn for the SubConns. // and it doesn't need to call Shutdown for the SubConns.
func (b *baseBalancer) Close() { func (b *baseBalancer) Close() {
} }

View File

@ -99,20 +99,6 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat
// lock held. But the lock guards only the scheduling part. The actual // lock held. But the lock guards only the scheduling part. The actual
// callback is called asynchronously without the lock being held. // callback is called asynchronously without the lock being held.
ok := ccb.serializer.Schedule(func(_ context.Context) { ok := ccb.serializer.Schedule(func(_ context.Context) {
// If the addresses specified in the update contain addresses of type
// "grpclb" and the selected LB policy is not "grpclb", these addresses
// will be filtered out and ccs will be modified with the updated
// address list.
if ccb.curBalancerName != grpclbName {
var addrs []resolver.Address
for _, addr := range ccs.ResolverState.Addresses {
if addr.Type == resolver.GRPCLB {
continue
}
addrs = append(addrs, addr)
}
ccs.ResolverState.Addresses = addrs
}
errCh <- ccb.balancer.UpdateClientConnState(*ccs) errCh <- ccb.balancer.UpdateClientConnState(*ccs)
}) })
if !ok { if !ok {
@ -139,7 +125,9 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
ccb.mu.Lock() ccb.mu.Lock()
ccb.serializer.Schedule(func(_ context.Context) { ccb.serializer.Schedule(func(_ context.Context) {
ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) // Even though it is optional for balancers, gracefulswitch ensures
// opts.StateListener is set, so this cannot ever be nil.
sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
}) })
ccb.mu.Unlock() ccb.mu.Unlock()
} }
@ -221,7 +209,7 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
} }
ccb.mode = m ccb.mode = m
done := ccb.serializer.Done done := ccb.serializer.Done()
b := ccb.balancer b := ccb.balancer
ok := ccb.serializer.Schedule(func(_ context.Context) { ok := ccb.serializer.Schedule(func(_ context.Context) {
// Close the serializer to ensure that no more calls from gRPC are sent // Close the serializer to ensure that no more calls from gRPC are sent
@ -238,11 +226,9 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
} }
ccb.mu.Unlock() ccb.mu.Unlock()
// Give enqueued callbacks a chance to finish. // Give enqueued callbacks a chance to finish before closing the balancer.
<-done <-done
// Spawn a goroutine to close the balancer (since it may block trying to b.Close()
// cleanup all allocated resources) and return early.
go b.Close()
} }
// exitIdleMode is invoked by grpc when the channel exits idle mode either // exitIdleMode is invoked by grpc when the channel exits idle mode either
@ -314,29 +300,19 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
return nil, err return nil, err
} }
acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} acbw := &acBalancerWrapper{
ccb: ccb,
ac: ac,
producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
stateListener: opts.StateListener,
}
ac.acbw = acbw ac.acbw = acbw
return acbw, nil return acbw, nil
} }
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
if ccb.isIdleOrClosed() { // The graceful switch balancer will never call this.
// It it safe to ignore this call when the balancer is closed or in idle logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
// because the ClientConn takes care of closing the connections.
//
// Not returning early from here when the balancer is closed or in idle
// leads to a deadlock though, because of the following sequence of
// calls when holding cc.mu:
// cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
// ccb.RemoveAddrConn --> cc.removeAddrConn
return
}
acbw, ok := sc.(*acBalancerWrapper)
if !ok {
return
}
ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
} }
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
@ -381,6 +357,8 @@ func (ccb *ccBalancerWrapper) Target() string {
// It implements balancer.SubConn interface. // It implements balancer.SubConn interface.
type acBalancerWrapper struct { type acBalancerWrapper struct {
ac *addrConn // read-only ac *addrConn // read-only
ccb *ccBalancerWrapper // read-only
stateListener func(balancer.SubConnState)
mu sync.Mutex mu sync.Mutex
producers map[balancer.ProducerBuilder]*refCountedProducer producers map[balancer.ProducerBuilder]*refCountedProducer
@ -398,6 +376,23 @@ func (acbw *acBalancerWrapper) Connect() {
go acbw.ac.connect() go acbw.ac.connect()
} }
func (acbw *acBalancerWrapper) Shutdown() {
ccb := acbw.ccb
if ccb.isIdleOrClosed() {
// It it safe to ignore this call when the balancer is closed or in idle
// because the ClientConn takes care of closing the connections.
//
// Not returning early from here when the balancer is closed or in idle
// leads to a deadlock though, because of the following sequence of
// calls when holding cc.mu:
// cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
// ccb.RemoveAddrConn --> cc.removeAddrConn
return
}
ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
}
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not // NewStream begins a streaming RPC on the addrConn. If the addrConn is not
// ready, blocks until it is or ctx expires. Returns an error when the context // ready, blocks until it is or ctx expires. Returns an error when the context
// expires or the addrConn is shut down. // expires or the addrConn is shut down.
@ -411,7 +406,7 @@ func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc,
// Invoke performs a unary RPC. If the addrConn is not ready, returns // Invoke performs a unary RPC. If the addrConn is not ready, returns
// errSubConnNotReady. // errSubConnNotReady.
func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
if err != nil { if err != nil {
return err return err

View File

@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.30.0 // protoc-gen-go v1.31.0
// protoc v4.22.0 // protoc v4.22.0
// source: grpc/binlog/v1/binarylog.proto // source: grpc/binlog/v1/binarylog.proto

View File

@ -26,12 +26,7 @@ import (
// received. This is typically called by generated code. // received. This is typically called by generated code.
// //
// All errors returned by Invoke are compatible with the status package. // All errors returned by Invoke are compatible with the status package.
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error {
if err := cc.idlenessMgr.onCallBegin(); err != nil {
return err
}
defer cc.idlenessMgr.onCallEnd()
// allow interceptor to see all applicable call options, which means those // allow interceptor to see all applicable call options, which means those
// configured as defaults from dial option as well as per-call options // configured as defaults from dial option as well as per-call options
opts = combine(cc.dopts.callOptions, opts) opts = combine(cc.dopts.callOptions, opts)
@ -61,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption {
// received. This is typically called by generated code. // received. This is typically called by generated code.
// //
// DEPRECATED: Use ClientConn.Invoke instead. // DEPRECATED: Use ClientConn.Invoke instead.
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error {
return cc.Invoke(ctx, method, args, reply, opts...) return cc.Invoke(ctx, method, args, reply, opts...)
} }
var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
if err != nil { if err != nil {
return err return err

View File

@ -34,9 +34,11 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity" "google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
"google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/pretty"
iresolver "google.golang.org/grpc/internal/resolver" iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/transport" "google.golang.org/grpc/internal/transport"
@ -54,8 +56,6 @@ import (
const ( const (
// minimum time to give a connection to complete // minimum time to give a connection to complete
minConnectTimeout = 20 * time.Second minConnectTimeout = 20 * time.Second
// must match grpclbName in grpclb/grpclb.go
grpclbName = "grpclb"
) )
var ( var (
@ -138,7 +138,6 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{ cc := &ClientConn{
target: target, target: target,
csMgr: &connectivityStateManager{},
conns: make(map[*addrConn]struct{}), conns: make(map[*addrConn]struct{}),
dopts: defaultDialOptions(), dopts: defaultDialOptions(),
czData: new(channelzData), czData: new(channelzData),
@ -191,6 +190,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
// Register ClientConn with channelz. // Register ClientConn with channelz.
cc.channelzRegistration(target) cc.channelzRegistration(target)
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID)
if err := cc.validateTransportCredentials(); err != nil { if err := cc.validateTransportCredentials(); err != nil {
return nil, err return nil, err
} }
@ -266,7 +267,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
// Configure idleness support with configured idle timeout or default idle // Configure idleness support with configured idle timeout or default idle
// timeout duration. Idleness can be explicitly disabled by the user, by // timeout duration. Idleness can be explicitly disabled by the user, by
// setting the dial option to 0. // setting the dial option to 0.
cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger})
// Return early for non-blocking dials. // Return early for non-blocking dials.
if !cc.dopts.block { if !cc.dopts.block {
@ -317,6 +318,16 @@ func (cc *ClientConn) addTraceEvent(msg string) {
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
} }
type idler ClientConn
func (i *idler) EnterIdleMode() error {
return (*ClientConn)(i).enterIdleMode()
}
func (i *idler) ExitIdleMode() error {
return (*ClientConn)(i).exitIdleMode()
}
// exitIdleMode moves the channel out of idle mode by recreating the name // exitIdleMode moves the channel out of idle mode by recreating the name
// resolver and load balancer. // resolver and load balancer.
func (cc *ClientConn) exitIdleMode() error { func (cc *ClientConn) exitIdleMode() error {
@ -327,7 +338,7 @@ func (cc *ClientConn) exitIdleMode() error {
} }
if cc.idlenessState != ccIdlenessStateIdle { if cc.idlenessState != ccIdlenessStateIdle {
cc.mu.Unlock() cc.mu.Unlock()
logger.Info("ClientConn asked to exit idle mode when not in idle mode") channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState)
return nil return nil
} }
@ -350,7 +361,7 @@ func (cc *ClientConn) exitIdleMode() error {
cc.idlenessState = ccIdlenessStateExitingIdle cc.idlenessState = ccIdlenessStateExitingIdle
exitedIdle := false exitedIdle := false
if cc.blockingpicker == nil { if cc.blockingpicker == nil {
cc.blockingpicker = newPickerWrapper() cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers)
} else { } else {
cc.blockingpicker.exitIdleMode() cc.blockingpicker.exitIdleMode()
exitedIdle = true exitedIdle = true
@ -398,7 +409,8 @@ func (cc *ClientConn) enterIdleMode() error {
return ErrClientConnClosing return ErrClientConnClosing
} }
if cc.idlenessState != ccIdlenessStateActive { if cc.idlenessState != ccIdlenessStateActive {
logger.Error("ClientConn asked to enter idle mode when not active") channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState)
cc.mu.Unlock()
return nil return nil
} }
@ -475,7 +487,6 @@ func (cc *ClientConn) validateTransportCredentials() error {
func (cc *ClientConn) channelzRegistration(target string) { func (cc *ClientConn) channelzRegistration(target string) {
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
cc.addTraceEvent("created") cc.addTraceEvent("created")
cc.csMgr.channelzID = cc.channelzID
} }
// chainUnaryClientInterceptors chains all unary client interceptors into one. // chainUnaryClientInterceptors chains all unary client interceptors into one.
@ -492,7 +503,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) {
} else if len(interceptors) == 1 { } else if len(interceptors) == 1 {
chainedInt = interceptors[0] chainedInt = interceptors[0]
} else { } else {
chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...)
} }
} }
@ -504,7 +515,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final
if curr == len(interceptors)-1 { if curr == len(interceptors)-1 {
return finalInvoker return finalInvoker
} }
return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...)
} }
} }
@ -540,13 +551,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr
} }
} }
// newConnectivityStateManager creates an connectivityStateManager with
// the specified id.
func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager {
return &connectivityStateManager{
channelzID: id,
pubSub: grpcsync.NewPubSub(ctx),
}
}
// connectivityStateManager keeps the connectivity.State of ClientConn. // connectivityStateManager keeps the connectivity.State of ClientConn.
// This struct will eventually be exported so the balancers can access it. // This struct will eventually be exported so the balancers can access it.
//
// TODO: If possible, get rid of the `connectivityStateManager` type, and
// provide this functionality using the `PubSub`, to avoid keeping track of
// the connectivity state at two places.
type connectivityStateManager struct { type connectivityStateManager struct {
mu sync.Mutex mu sync.Mutex
state connectivity.State state connectivity.State
notifyChan chan struct{} notifyChan chan struct{}
channelzID *channelz.Identifier channelzID *channelz.Identifier
pubSub *grpcsync.PubSub
} }
// updateState updates the connectivity.State of ClientConn. // updateState updates the connectivity.State of ClientConn.
@ -562,6 +587,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) {
return return
} }
csm.state = state csm.state = state
csm.pubSub.Publish(state)
channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state)
if csm.notifyChan != nil { if csm.notifyChan != nil {
// There are other goroutines waiting on this channel. // There are other goroutines waiting on this channel.
@ -591,7 +618,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
type ClientConnInterface interface { type ClientConnInterface interface {
// Invoke performs a unary RPC and returns after the response is received // Invoke performs a unary RPC and returns after the response is received
// into reply. // into reply.
Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error
// NewStream begins a streaming RPC. // NewStream begins a streaming RPC.
NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
} }
@ -623,7 +650,7 @@ type ClientConn struct {
channelzID *channelz.Identifier // Channelz identifier for the channel. channelzID *channelz.Identifier // Channelz identifier for the channel.
resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
idlenessMgr idlenessManager idlenessMgr idle.Manager
// The following provide their own synchronization, and therefore don't // The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them. // require cc.mu to be held to access them.
@ -669,6 +696,19 @@ const (
ccIdlenessStateExitingIdle ccIdlenessStateExitingIdle
) )
func (s ccIdlenessState) String() string {
switch s {
case ccIdlenessStateActive:
return "active"
case ccIdlenessStateIdle:
return "idle"
case ccIdlenessStateExitingIdle:
return "exitingIdle"
default:
return "unknown"
}
}
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter. // ctx expires. A true value is returned in former case and false in latter.
// //
@ -760,6 +800,10 @@ func init() {
panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
} }
emptyServiceConfig = cfg.Config.(*ServiceConfig) emptyServiceConfig = cfg.Config.(*ServiceConfig)
internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() {
return cc.csMgr.pubSub.Subscribe(s)
}
} }
func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
@ -1047,8 +1091,8 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
ac.cancel() ac.cancel()
ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx)
// We have to defer here because GracefulClose => Close => onClose, which // We have to defer here because GracefulClose => onClose, which requires
// requires locking ac.mu. // locking ac.mu.
if ac.transport != nil { if ac.transport != nil {
defer ac.transport.GracefulClose() defer ac.transport.GracefulClose()
ac.transport = nil ac.transport = nil
@ -1153,23 +1197,13 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel
} }
var newBalancerName string var newBalancerName string
if cc.sc != nil && cc.sc.lbConfig != nil { if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) {
newBalancerName = cc.sc.lbConfig.name // No service config or no LB policy specified in config.
} else {
var isGRPCLB bool
for _, a := range addrs {
if a.Type == resolver.GRPCLB {
isGRPCLB = true
break
}
}
if isGRPCLB {
newBalancerName = grpclbName
} else if cc.sc != nil && cc.sc.LB != nil {
newBalancerName = *cc.sc.LB
} else {
newBalancerName = PickFirstBalancerName newBalancerName = PickFirstBalancerName
} } else if cc.sc.lbConfig != nil {
newBalancerName = cc.sc.lbConfig.name
} else { // cc.sc.LB != nil
newBalancerName = *cc.sc.LB
} }
cc.balancerWrapper.switchTo(newBalancerName) cc.balancerWrapper.switchTo(newBalancerName)
} }
@ -1208,7 +1242,10 @@ func (cc *ClientConn) ResetConnectBackoff() {
// Close tears down the ClientConn and all underlying connections. // Close tears down the ClientConn and all underlying connections.
func (cc *ClientConn) Close() error { func (cc *ClientConn) Close() error {
defer cc.cancel() defer func() {
cc.cancel()
<-cc.csMgr.pubSub.Done()
}()
cc.mu.Lock() cc.mu.Lock()
if cc.conns == nil { if cc.conns == nil {
@ -1242,7 +1279,7 @@ func (cc *ClientConn) Close() error {
rWrapper.close() rWrapper.close()
} }
if idlenessMgr != nil { if idlenessMgr != nil {
idlenessMgr.close() idlenessMgr.Close()
} }
for ac := range conns { for ac := range conns {
@ -1352,12 +1389,14 @@ func (ac *addrConn) resetTransport() {
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.cc.resolveNow(resolver.ResolveNowOptions{})
// After exhausting all addresses, the addrConn enters ac.mu.Lock()
// TRANSIENT_FAILURE.
if acCtx.Err() != nil { if acCtx.Err() != nil {
// addrConn was torn down.
ac.mu.Unlock()
return return
} }
ac.mu.Lock() // After exhausting all addresses, the addrConn enters
// TRANSIENT_FAILURE.
ac.updateConnectivityState(connectivity.TransientFailure, err) ac.updateConnectivityState(connectivity.TransientFailure, err)
// Backoff. // Backoff.
@ -1553,7 +1592,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
// Set up the health check helper functions. // Set up the health check helper functions.
currentTr := ac.transport currentTr := ac.transport
newStream := func(method string) (interface{}, error) { newStream := func(method string) (any, error) {
ac.mu.Lock() ac.mu.Lock()
if ac.transport != currentTr { if ac.transport != currentTr {
ac.mu.Unlock() ac.mu.Unlock()
@ -1641,16 +1680,7 @@ func (ac *addrConn) tearDown(err error) {
ac.updateConnectivityState(connectivity.Shutdown, nil) ac.updateConnectivityState(connectivity.Shutdown, nil)
ac.cancel() ac.cancel()
ac.curAddr = resolver.Address{} ac.curAddr = resolver.Address{}
if err == errConnDrain && curTr != nil {
// GracefulClose(...) may be executed multiple times when
// i) receiving multiple GoAway frames from the server; or
// ii) there are concurrent name resolver/Balancer triggered
// address removal and GoAway.
// We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu.
ac.mu.Unlock()
curTr.GracefulClose()
ac.mu.Lock()
}
channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
Desc: "Subchannel deleted", Desc: "Subchannel deleted",
Severity: channelz.CtInfo, Severity: channelz.CtInfo,
@ -1664,6 +1694,29 @@ func (ac *addrConn) tearDown(err error) {
// being deleted right away. // being deleted right away.
channelz.RemoveEntry(ac.channelzID) channelz.RemoveEntry(ac.channelzID)
ac.mu.Unlock() ac.mu.Unlock()
// We have to release the lock before the call to GracefulClose/Close here
// because both of them call onClose(), which requires locking ac.mu.
if curTr != nil {
if err == errConnDrain {
// Close the transport gracefully when the subConn is being shutdown.
//
// GracefulClose() may be executed multiple times if:
// - multiple GoAway frames are received from the server
// - there are concurrent name resolver or balancer triggered
// address removal and GoAway
curTr.GracefulClose()
} else {
// Hard close the transport when the channel is entering idle or is
// being shutdown. In the case where the channel is being shutdown,
// closing of transports is also taken care of by cancelation of cc.ctx.
// But in the case where the channel is entering idle, we need to
// explicitly close the transports here. Instead of distinguishing
// between these two cases, it is simpler to close the transport
// unconditionally here.
curTr.Close(err)
}
}
} }
func (ac *addrConn) getState() connectivity.State { func (ac *addrConn) getState() connectivity.State {

View File

@ -27,8 +27,8 @@ import (
// omits the name/string, which vary between the two and are not needed for // omits the name/string, which vary between the two and are not needed for
// anything besides the registry in the encoding package. // anything besides the registry in the encoding package.
type baseCodec interface { type baseCodec interface {
Marshal(v interface{}) ([]byte, error) Marshal(v any) ([]byte, error)
Unmarshal(data []byte, v interface{}) error Unmarshal(data []byte, v any) error
} }
var _ baseCodec = Codec(nil) var _ baseCodec = Codec(nil)
@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil)
// Deprecated: use encoding.Codec instead. // Deprecated: use encoding.Codec instead.
type Codec interface { type Codec interface {
// Marshal returns the wire format of v. // Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error) Marshal(v any) ([]byte, error)
// Unmarshal parses the wire format into v. // Unmarshal parses the wire format into v.
Unmarshal(data []byte, v interface{}) error Unmarshal(data []byte, v any) error
// String returns the name of the Codec implementation. This is unused by // String returns the name of the Codec implementation. This is unused by
// gRPC. // gRPC.
String() string String() string

View File

@ -139,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption {
return &joinDialOption{opts: opts} return &joinDialOption{opts: opts}
} }
// WithSharedWriteBuffer allows reusing per-connection transport write buffer.
// If this option is set to true every connection will release the buffer after
// flushing the data on the wire.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithSharedWriteBuffer(val bool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.SharedWriteBuffer = val
})
}
// WithWriteBufferSize determines how much data can be batched before doing a // WithWriteBufferSize determines how much data can be batched before doing a
// write on the wire. The corresponding memory allocation for this buffer will // write on the wire. The corresponding memory allocation for this buffer will
// be twice the size to keep syscalls low. The default value for this buffer is // be twice the size to keep syscalls low. The default value for this buffer is

View File

@ -90,9 +90,9 @@ func GetCompressor(name string) Compressor {
// methods can be called from concurrent goroutines. // methods can be called from concurrent goroutines.
type Codec interface { type Codec interface {
// Marshal returns the wire format of v. // Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error) Marshal(v any) ([]byte, error)
// Unmarshal parses the wire format into v. // Unmarshal parses the wire format into v.
Unmarshal(data []byte, v interface{}) error Unmarshal(data []byte, v any) error
// Name returns the name of the Codec implementation. The returned string // Name returns the name of the Codec implementation. The returned string
// will be used as part of content type in transmission. The result must be // will be used as part of content type in transmission. The result must be
// static; the result cannot change between calls. // static; the result cannot change between calls.

View File

@ -37,7 +37,7 @@ func init() {
// codec is a Codec implementation with protobuf. It is the default codec for gRPC. // codec is a Codec implementation with protobuf. It is the default codec for gRPC.
type codec struct{} type codec struct{}
func (codec) Marshal(v interface{}) ([]byte, error) { func (codec) Marshal(v any) ([]byte, error) {
vv, ok := v.(proto.Message) vv, ok := v.(proto.Message)
if !ok { if !ok {
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) {
return proto.Marshal(vv) return proto.Marshal(vv)
} }
func (codec) Unmarshal(data []byte, v interface{}) error { func (codec) Unmarshal(data []byte, v any) error {
vv, ok := v.(proto.Message) vv, ok := v.(proto.Message)
if !ok { if !ok {
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)

View File

@ -31,71 +31,71 @@ type componentData struct {
var cache = map[string]*componentData{} var cache = map[string]*componentData{}
func (c *componentData) InfoDepth(depth int, args ...interface{}) { func (c *componentData) InfoDepth(depth int, args ...any) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...) args = append([]any{"[" + string(c.name) + "]"}, args...)
grpclog.InfoDepth(depth+1, args...) grpclog.InfoDepth(depth+1, args...)
} }
func (c *componentData) WarningDepth(depth int, args ...interface{}) { func (c *componentData) WarningDepth(depth int, args ...any) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...) args = append([]any{"[" + string(c.name) + "]"}, args...)
grpclog.WarningDepth(depth+1, args...) grpclog.WarningDepth(depth+1, args...)
} }
func (c *componentData) ErrorDepth(depth int, args ...interface{}) { func (c *componentData) ErrorDepth(depth int, args ...any) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...) args = append([]any{"[" + string(c.name) + "]"}, args...)
grpclog.ErrorDepth(depth+1, args...) grpclog.ErrorDepth(depth+1, args...)
} }
func (c *componentData) FatalDepth(depth int, args ...interface{}) { func (c *componentData) FatalDepth(depth int, args ...any) {
args = append([]interface{}{"[" + string(c.name) + "]"}, args...) args = append([]any{"[" + string(c.name) + "]"}, args...)
grpclog.FatalDepth(depth+1, args...) grpclog.FatalDepth(depth+1, args...)
} }
func (c *componentData) Info(args ...interface{}) { func (c *componentData) Info(args ...any) {
c.InfoDepth(1, args...) c.InfoDepth(1, args...)
} }
func (c *componentData) Warning(args ...interface{}) { func (c *componentData) Warning(args ...any) {
c.WarningDepth(1, args...) c.WarningDepth(1, args...)
} }
func (c *componentData) Error(args ...interface{}) { func (c *componentData) Error(args ...any) {
c.ErrorDepth(1, args...) c.ErrorDepth(1, args...)
} }
func (c *componentData) Fatal(args ...interface{}) { func (c *componentData) Fatal(args ...any) {
c.FatalDepth(1, args...) c.FatalDepth(1, args...)
} }
func (c *componentData) Infof(format string, args ...interface{}) { func (c *componentData) Infof(format string, args ...any) {
c.InfoDepth(1, fmt.Sprintf(format, args...)) c.InfoDepth(1, fmt.Sprintf(format, args...))
} }
func (c *componentData) Warningf(format string, args ...interface{}) { func (c *componentData) Warningf(format string, args ...any) {
c.WarningDepth(1, fmt.Sprintf(format, args...)) c.WarningDepth(1, fmt.Sprintf(format, args...))
} }
func (c *componentData) Errorf(format string, args ...interface{}) { func (c *componentData) Errorf(format string, args ...any) {
c.ErrorDepth(1, fmt.Sprintf(format, args...)) c.ErrorDepth(1, fmt.Sprintf(format, args...))
} }
func (c *componentData) Fatalf(format string, args ...interface{}) { func (c *componentData) Fatalf(format string, args ...any) {
c.FatalDepth(1, fmt.Sprintf(format, args...)) c.FatalDepth(1, fmt.Sprintf(format, args...))
} }
func (c *componentData) Infoln(args ...interface{}) { func (c *componentData) Infoln(args ...any) {
c.InfoDepth(1, args...) c.InfoDepth(1, args...)
} }
func (c *componentData) Warningln(args ...interface{}) { func (c *componentData) Warningln(args ...any) {
c.WarningDepth(1, args...) c.WarningDepth(1, args...)
} }
func (c *componentData) Errorln(args ...interface{}) { func (c *componentData) Errorln(args ...any) {
c.ErrorDepth(1, args...) c.ErrorDepth(1, args...)
} }
func (c *componentData) Fatalln(args ...interface{}) { func (c *componentData) Fatalln(args ...any) {
c.FatalDepth(1, args...) c.FatalDepth(1, args...)
} }

View File

@ -42,53 +42,53 @@ func V(l int) bool {
} }
// Info logs to the INFO log. // Info logs to the INFO log.
func Info(args ...interface{}) { func Info(args ...any) {
grpclog.Logger.Info(args...) grpclog.Logger.Info(args...)
} }
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
func Infof(format string, args ...interface{}) { func Infof(format string, args ...any) {
grpclog.Logger.Infof(format, args...) grpclog.Logger.Infof(format, args...)
} }
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
func Infoln(args ...interface{}) { func Infoln(args ...any) {
grpclog.Logger.Infoln(args...) grpclog.Logger.Infoln(args...)
} }
// Warning logs to the WARNING log. // Warning logs to the WARNING log.
func Warning(args ...interface{}) { func Warning(args ...any) {
grpclog.Logger.Warning(args...) grpclog.Logger.Warning(args...)
} }
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
func Warningf(format string, args ...interface{}) { func Warningf(format string, args ...any) {
grpclog.Logger.Warningf(format, args...) grpclog.Logger.Warningf(format, args...)
} }
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
func Warningln(args ...interface{}) { func Warningln(args ...any) {
grpclog.Logger.Warningln(args...) grpclog.Logger.Warningln(args...)
} }
// Error logs to the ERROR log. // Error logs to the ERROR log.
func Error(args ...interface{}) { func Error(args ...any) {
grpclog.Logger.Error(args...) grpclog.Logger.Error(args...)
} }
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, args ...interface{}) { func Errorf(format string, args ...any) {
grpclog.Logger.Errorf(format, args...) grpclog.Logger.Errorf(format, args...)
} }
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
func Errorln(args ...interface{}) { func Errorln(args ...any) {
grpclog.Logger.Errorln(args...) grpclog.Logger.Errorln(args...)
} }
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
// It calls os.Exit() with exit code 1. // It calls os.Exit() with exit code 1.
func Fatal(args ...interface{}) { func Fatal(args ...any) {
grpclog.Logger.Fatal(args...) grpclog.Logger.Fatal(args...)
// Make sure fatal logs will exit. // Make sure fatal logs will exit.
os.Exit(1) os.Exit(1)
@ -96,7 +96,7 @@ func Fatal(args ...interface{}) {
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
// It calls os.Exit() with exit code 1. // It calls os.Exit() with exit code 1.
func Fatalf(format string, args ...interface{}) { func Fatalf(format string, args ...any) {
grpclog.Logger.Fatalf(format, args...) grpclog.Logger.Fatalf(format, args...)
// Make sure fatal logs will exit. // Make sure fatal logs will exit.
os.Exit(1) os.Exit(1)
@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) {
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
// It calle os.Exit()) with exit code 1. // It calle os.Exit()) with exit code 1.
func Fatalln(args ...interface{}) { func Fatalln(args ...any) {
grpclog.Logger.Fatalln(args...) grpclog.Logger.Fatalln(args...)
// Make sure fatal logs will exit. // Make sure fatal logs will exit.
os.Exit(1) os.Exit(1)
@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) {
// Print prints to the logger. Arguments are handled in the manner of fmt.Print. // Print prints to the logger. Arguments are handled in the manner of fmt.Print.
// //
// Deprecated: use Info. // Deprecated: use Info.
func Print(args ...interface{}) { func Print(args ...any) {
grpclog.Logger.Info(args...) grpclog.Logger.Info(args...)
} }
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
// //
// Deprecated: use Infof. // Deprecated: use Infof.
func Printf(format string, args ...interface{}) { func Printf(format string, args ...any) {
grpclog.Logger.Infof(format, args...) grpclog.Logger.Infof(format, args...)
} }
// Println prints to the logger. Arguments are handled in the manner of fmt.Println. // Println prints to the logger. Arguments are handled in the manner of fmt.Println.
// //
// Deprecated: use Infoln. // Deprecated: use Infoln.
func Println(args ...interface{}) { func Println(args ...any) {
grpclog.Logger.Infoln(args...) grpclog.Logger.Infoln(args...)
} }

View File

@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog"
// //
// Deprecated: use LoggerV2. // Deprecated: use LoggerV2.
type Logger interface { type Logger interface {
Fatal(args ...interface{}) Fatal(args ...any)
Fatalf(format string, args ...interface{}) Fatalf(format string, args ...any)
Fatalln(args ...interface{}) Fatalln(args ...any)
Print(args ...interface{}) Print(args ...any)
Printf(format string, args ...interface{}) Printf(format string, args ...any)
Println(args ...interface{}) Println(args ...any)
} }
// SetLogger sets the logger that is used in grpc. Call only from // SetLogger sets the logger that is used in grpc. Call only from
@ -45,39 +45,39 @@ type loggerWrapper struct {
Logger Logger
} }
func (g *loggerWrapper) Info(args ...interface{}) { func (g *loggerWrapper) Info(args ...any) {
g.Logger.Print(args...) g.Logger.Print(args...)
} }
func (g *loggerWrapper) Infoln(args ...interface{}) { func (g *loggerWrapper) Infoln(args ...any) {
g.Logger.Println(args...) g.Logger.Println(args...)
} }
func (g *loggerWrapper) Infof(format string, args ...interface{}) { func (g *loggerWrapper) Infof(format string, args ...any) {
g.Logger.Printf(format, args...) g.Logger.Printf(format, args...)
} }
func (g *loggerWrapper) Warning(args ...interface{}) { func (g *loggerWrapper) Warning(args ...any) {
g.Logger.Print(args...) g.Logger.Print(args...)
} }
func (g *loggerWrapper) Warningln(args ...interface{}) { func (g *loggerWrapper) Warningln(args ...any) {
g.Logger.Println(args...) g.Logger.Println(args...)
} }
func (g *loggerWrapper) Warningf(format string, args ...interface{}) { func (g *loggerWrapper) Warningf(format string, args ...any) {
g.Logger.Printf(format, args...) g.Logger.Printf(format, args...)
} }
func (g *loggerWrapper) Error(args ...interface{}) { func (g *loggerWrapper) Error(args ...any) {
g.Logger.Print(args...) g.Logger.Print(args...)
} }
func (g *loggerWrapper) Errorln(args ...interface{}) { func (g *loggerWrapper) Errorln(args ...any) {
g.Logger.Println(args...) g.Logger.Println(args...)
} }
func (g *loggerWrapper) Errorf(format string, args ...interface{}) { func (g *loggerWrapper) Errorf(format string, args ...any) {
g.Logger.Printf(format, args...) g.Logger.Printf(format, args...)
} }

View File

@ -33,35 +33,35 @@ import (
// LoggerV2 does underlying logging work for grpclog. // LoggerV2 does underlying logging work for grpclog.
type LoggerV2 interface { type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...interface{}) Info(args ...any)
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
Infoln(args ...interface{}) Infoln(args ...any)
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
Infof(format string, args ...interface{}) Infof(format string, args ...any)
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
Warning(args ...interface{}) Warning(args ...any)
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
Warningln(args ...interface{}) Warningln(args ...any)
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
Warningf(format string, args ...interface{}) Warningf(format string, args ...any)
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
Error(args ...interface{}) Error(args ...any)
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
Errorln(args ...interface{}) Errorln(args ...any)
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
Errorf(format string, args ...interface{}) Errorf(format string, args ...any)
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
// gRPC ensures that all Fatal logs will exit with os.Exit(1). // gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code. // Implementations may also call os.Exit() with a non-zero exit code.
Fatal(args ...interface{}) Fatal(args ...any)
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
// gRPC ensures that all Fatal logs will exit with os.Exit(1). // gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code. // Implementations may also call os.Exit() with a non-zero exit code.
Fatalln(args ...interface{}) Fatalln(args ...any)
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
// gRPC ensures that all Fatal logs will exit with os.Exit(1). // gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code. // Implementations may also call os.Exit() with a non-zero exit code.
Fatalf(format string, args ...interface{}) Fatalf(format string, args ...any)
// V reports whether verbosity level l is at least the requested verbose level. // V reports whether verbosity level l is at least the requested verbose level.
V(l int) bool V(l int) bool
} }
@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) {
g.m[severity].Output(2, string(b)) g.m[severity].Output(2, string(b))
} }
func (g *loggerT) Info(args ...interface{}) { func (g *loggerT) Info(args ...any) {
g.output(infoLog, fmt.Sprint(args...)) g.output(infoLog, fmt.Sprint(args...))
} }
func (g *loggerT) Infoln(args ...interface{}) { func (g *loggerT) Infoln(args ...any) {
g.output(infoLog, fmt.Sprintln(args...)) g.output(infoLog, fmt.Sprintln(args...))
} }
func (g *loggerT) Infof(format string, args ...interface{}) { func (g *loggerT) Infof(format string, args ...any) {
g.output(infoLog, fmt.Sprintf(format, args...)) g.output(infoLog, fmt.Sprintf(format, args...))
} }
func (g *loggerT) Warning(args ...interface{}) { func (g *loggerT) Warning(args ...any) {
g.output(warningLog, fmt.Sprint(args...)) g.output(warningLog, fmt.Sprint(args...))
} }
func (g *loggerT) Warningln(args ...interface{}) { func (g *loggerT) Warningln(args ...any) {
g.output(warningLog, fmt.Sprintln(args...)) g.output(warningLog, fmt.Sprintln(args...))
} }
func (g *loggerT) Warningf(format string, args ...interface{}) { func (g *loggerT) Warningf(format string, args ...any) {
g.output(warningLog, fmt.Sprintf(format, args...)) g.output(warningLog, fmt.Sprintf(format, args...))
} }
func (g *loggerT) Error(args ...interface{}) { func (g *loggerT) Error(args ...any) {
g.output(errorLog, fmt.Sprint(args...)) g.output(errorLog, fmt.Sprint(args...))
} }
func (g *loggerT) Errorln(args ...interface{}) { func (g *loggerT) Errorln(args ...any) {
g.output(errorLog, fmt.Sprintln(args...)) g.output(errorLog, fmt.Sprintln(args...))
} }
func (g *loggerT) Errorf(format string, args ...interface{}) { func (g *loggerT) Errorf(format string, args ...any) {
g.output(errorLog, fmt.Sprintf(format, args...)) g.output(errorLog, fmt.Sprintf(format, args...))
} }
func (g *loggerT) Fatal(args ...interface{}) { func (g *loggerT) Fatal(args ...any) {
g.output(fatalLog, fmt.Sprint(args...)) g.output(fatalLog, fmt.Sprint(args...))
os.Exit(1) os.Exit(1)
} }
func (g *loggerT) Fatalln(args ...interface{}) { func (g *loggerT) Fatalln(args ...any) {
g.output(fatalLog, fmt.Sprintln(args...)) g.output(fatalLog, fmt.Sprintln(args...))
os.Exit(1) os.Exit(1)
} }
func (g *loggerT) Fatalf(format string, args ...interface{}) { func (g *loggerT) Fatalf(format string, args ...any) {
g.output(fatalLog, fmt.Sprintf(format, args...)) g.output(fatalLog, fmt.Sprintf(format, args...))
os.Exit(1) os.Exit(1)
} }
@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool {
type DepthLoggerV2 interface { type DepthLoggerV2 interface {
LoggerV2 LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...interface{}) InfoDepth(depth int, args ...any)
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
WarningDepth(depth int, args ...interface{}) WarningDepth(depth int, args ...any)
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
ErrorDepth(depth int, args ...interface{}) ErrorDepth(depth int, args ...any)
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...interface{}) FatalDepth(depth int, args ...any)
} }

View File

@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.30.0 // protoc-gen-go v1.31.0
// protoc v4.22.0 // protoc v4.22.0
// source: grpc/health/v1/health.proto // source: grpc/health/v1/health.proto

View File

@ -23,7 +23,7 @@ import (
) )
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
// Unary interceptors can be specified as a DialOption, using // Unary interceptors can be specified as a DialOption, using
@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{
// defaults from the ClientConn as well as per-call options. // defaults from the ClientConn as well as per-call options.
// //
// The returned error must be compatible with the status package. // The returned error must be compatible with the status package.
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
// Streamer is called by StreamClientInterceptor to create a ClientStream. // Streamer is called by StreamClientInterceptor to create a ClientStream.
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli
// server side. All per-rpc information may be mutated by the interceptor. // server side. All per-rpc information may be mutated by the interceptor.
type UnaryServerInfo struct { type UnaryServerInfo struct {
// Server is the service implementation the user provides. This is read-only. // Server is the service implementation the user provides. This is read-only.
Server interface{} Server any
// FullMethod is the full RPC method string, i.e., /package.service/method. // FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string FullMethod string
} }
@ -78,13 +78,13 @@ type UnaryServerInfo struct {
// status package, or be one of the context errors. Otherwise, gRPC will use // status package, or be one of the context errors. Otherwise, gRPC will use
// codes.Unknown as the status code and err.Error() as the status message of the // codes.Unknown as the status code and err.Error() as the status message of the
// RPC. // RPC.
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) type UnaryHandler func(ctx context.Context, req any) (any, error)
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
// of the service method implementation. It is the responsibility of the interceptor to invoke handler // of the service method implementation. It is the responsibility of the interceptor to invoke handler
// to complete the RPC. // to complete the RPC.
type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error)
// StreamServerInfo consists of various information about a streaming RPC on // StreamServerInfo consists of various information about a streaming RPC on
// server side. All per-rpc information may be mutated by the interceptor. // server side. All per-rpc information may be mutated by the interceptor.
@ -101,4 +101,4 @@ type StreamServerInfo struct {
// info contains all the information of this RPC the interceptor can operate on. And handler is the // info contains all the information of this RPC the interceptor can operate on. And handler is the
// service method implementation. It is the responsibility of the interceptor to invoke handler to // service method implementation. It is the responsibility of the interceptor to invoke handler to
// complete the RPC. // complete the RPC.
type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error

View File

@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() {
} }
} }
// UpdateSubConnState forwards the update to the appropriate child. // updateSubConnState forwards the update to the appropriate child.
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) {
gsb.currentMu.Lock() gsb.currentMu.Lock()
defer gsb.currentMu.Unlock() defer gsb.currentMu.Unlock()
gsb.mu.Lock() gsb.mu.Lock()
@ -214,14 +214,27 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC
} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
balToUpdate = gsb.balancerPending balToUpdate = gsb.balancerPending
} }
gsb.mu.Unlock()
if balToUpdate == nil { if balToUpdate == nil {
// SubConn belonged to a stale lb policy that has not yet fully closed, // SubConn belonged to a stale lb policy that has not yet fully closed,
// or the balancer was already closed. // or the balancer was already closed.
gsb.mu.Unlock()
return return
} }
if state.ConnectivityState == connectivity.Shutdown {
delete(balToUpdate.subconns, sc)
}
gsb.mu.Unlock()
if cb != nil {
cb(state)
} else {
balToUpdate.UpdateSubConnState(sc, state) balToUpdate.UpdateSubConnState(sc, state)
} }
}
// UpdateSubConnState forwards the update to the appropriate child.
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
gsb.updateSubConnState(sc, state, nil)
}
// Close closes any active child balancers. // Close closes any active child balancers.
func (gsb *Balancer) Close() { func (gsb *Balancer) Close() {
@ -242,7 +255,7 @@ func (gsb *Balancer) Close() {
// //
// It implements the balancer.ClientConn interface and is passed down in that // It implements the balancer.ClientConn interface and is passed down in that
// capacity to the wrapped balancer. It maintains a set of subConns created by // capacity to the wrapped balancer. It maintains a set of subConns created by
// the wrapped balancer and calls from the latter to create/update/remove // the wrapped balancer and calls from the latter to create/update/shutdown
// SubConns update this set before being forwarded to the parent ClientConn. // SubConns update this set before being forwarded to the parent ClientConn.
// State updates from the wrapped balancer can result in invocation of the // State updates from the wrapped balancer can result in invocation of the
// graceful switch logic. // graceful switch logic.
@ -254,21 +267,10 @@ type balancerWrapper struct {
subconns map[balancer.SubConn]bool // subconns created by this balancer subconns map[balancer.SubConn]bool // subconns created by this balancer
} }
func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { // Close closes the underlying LB policy and shuts down the subconns it
if state.ConnectivityState == connectivity.Shutdown { // created. bw must not be referenced via balancerCurrent or balancerPending in
bw.gsb.mu.Lock() // gsb when called. gsb.mu must not be held. Does not panic with a nil
delete(bw.subconns, sc) // receiver.
bw.gsb.mu.Unlock()
}
// There is no need to protect this read with a mutex, as the write to the
// Balancer field happens in SwitchTo, which completes before this can be
// called.
bw.Balancer.UpdateSubConnState(sc, state)
}
// Close closes the underlying LB policy and removes the subconns it created. bw
// must not be referenced via balancerCurrent or balancerPending in gsb when
// called. gsb.mu must not be held. Does not panic with a nil receiver.
func (bw *balancerWrapper) Close() { func (bw *balancerWrapper) Close() {
// before Close is called. // before Close is called.
if bw == nil { if bw == nil {
@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() {
bw.Balancer.Close() bw.Balancer.Close()
bw.gsb.mu.Lock() bw.gsb.mu.Lock()
for sc := range bw.subconns { for sc := range bw.subconns {
bw.gsb.cc.RemoveSubConn(sc) sc.Shutdown()
} }
bw.gsb.mu.Unlock() bw.gsb.mu.Unlock()
} }
@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne
} }
bw.gsb.mu.Unlock() bw.gsb.mu.Unlock()
var sc balancer.SubConn
oldListener := opts.StateListener
opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) }
sc, err := bw.gsb.cc.NewSubConn(addrs, opts) sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
bw.gsb.mu.Lock() bw.gsb.mu.Lock()
if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
bw.gsb.cc.RemoveSubConn(sc) sc.Shutdown()
bw.gsb.mu.Unlock() bw.gsb.mu.Unlock()
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
} }
@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
} }
func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
bw.gsb.mu.Lock() // Note: existing third party balancers may call this, so it must remain
if !bw.gsb.balancerCurrentOrPending(bw) { // until RemoveSubConn is fully removed.
bw.gsb.mu.Unlock() sc.Shutdown()
return
}
bw.gsb.mu.Unlock()
bw.gsb.cc.RemoveSubConn(sc)
} }
func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {

View File

@ -25,7 +25,7 @@ import (
// Parser converts loads from metadata into a concrete type. // Parser converts loads from metadata into a concrete type.
type Parser interface { type Parser interface {
// Parse parses loads from metadata. // Parse parses loads from metadata.
Parse(md metadata.MD) interface{} Parse(md metadata.MD) any
} }
var parser Parser var parser Parser
@ -38,7 +38,7 @@ func SetParser(lr Parser) {
} }
// Parse calls parser.Read(). // Parse calls parser.Read().
func Parse(md metadata.MD) interface{} { func Parse(md metadata.MD) any {
if parser == nil { if parser == nil {
return nil return nil
} }

View File

@ -230,7 +230,7 @@ type ClientMessage struct {
OnClientSide bool OnClientSide bool
// Message can be a proto.Message or []byte. Other messages formats are not // Message can be a proto.Message or []byte. Other messages formats are not
// supported. // supported.
Message interface{} Message any
} }
func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
@ -270,7 +270,7 @@ type ServerMessage struct {
OnClientSide bool OnClientSide bool
// Message can be a proto.Message or []byte. Other messages formats are not // Message can be a proto.Message or []byte. Other messages formats are not
// supported. // supported.
Message interface{} Message any
} }
func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {

View File

@ -28,25 +28,25 @@ import "sync"
// the underlying mutex used for synchronization. // the underlying mutex used for synchronization.
// //
// Unbounded supports values of any type to be stored in it by using a channel // Unbounded supports values of any type to be stored in it by using a channel
// of `interface{}`. This means that a call to Put() incurs an extra memory // of `any`. This means that a call to Put() incurs an extra memory allocation,
// allocation, and also that users need a type assertion while reading. For // and also that users need a type assertion while reading. For performance
// performance critical code paths, using Unbounded is strongly discouraged and // critical code paths, using Unbounded is strongly discouraged and defining a
// defining a new type specific implementation of this buffer is preferred. See // new type specific implementation of this buffer is preferred. See
// internal/transport/transport.go for an example of this. // internal/transport/transport.go for an example of this.
type Unbounded struct { type Unbounded struct {
c chan interface{} c chan any
closed bool closed bool
mu sync.Mutex mu sync.Mutex
backlog []interface{} backlog []any
} }
// NewUnbounded returns a new instance of Unbounded. // NewUnbounded returns a new instance of Unbounded.
func NewUnbounded() *Unbounded { func NewUnbounded() *Unbounded {
return &Unbounded{c: make(chan interface{}, 1)} return &Unbounded{c: make(chan any, 1)}
} }
// Put adds t to the unbounded buffer. // Put adds t to the unbounded buffer.
func (b *Unbounded) Put(t interface{}) { func (b *Unbounded) Put(t any) {
b.mu.Lock() b.mu.Lock()
defer b.mu.Unlock() defer b.mu.Unlock()
if b.closed { if b.closed {
@ -89,7 +89,7 @@ func (b *Unbounded) Load() {
// //
// If the unbounded buffer is closed, the read channel returned by this method // If the unbounded buffer is closed, the read channel returned by this method
// is closed. // is closed.
func (b *Unbounded) Get() <-chan interface{} { func (b *Unbounded) Get() <-chan any {
return b.c return b.c
} }

View File

@ -24,9 +24,7 @@
package channelz package channelz
import ( import (
"context"
"errors" "errors"
"fmt"
"sort" "sort"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -40,8 +38,11 @@ const (
) )
var ( var (
// IDGen is the global channelz entity ID generator. It should not be used
// outside this package except by tests.
IDGen IDGenerator
db dbWrapper db dbWrapper
idGen idGenerator
// EntryPerPage defines the number of channelz entries to be shown on a web page. // EntryPerPage defines the number of channelz entries to be shown on a web page.
EntryPerPage = int64(50) EntryPerPage = int64(50)
curState int32 curState int32
@ -52,14 +53,14 @@ var (
func TurnOn() { func TurnOn() {
if !IsOn() { if !IsOn() {
db.set(newChannelMap()) db.set(newChannelMap())
idGen.reset() IDGen.Reset()
atomic.StoreInt32(&curState, 1) atomic.StoreInt32(&curState, 1)
} }
} }
// IsOn returns whether channelz data collection is on. // IsOn returns whether channelz data collection is on.
func IsOn() bool { func IsOn() bool {
return atomic.CompareAndSwapInt32(&curState, 1, 1) return atomic.LoadInt32(&curState) == 1
} }
// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
@ -97,43 +98,6 @@ func (d *dbWrapper) get() *channelMap {
return d.DB return d.DB
} }
// NewChannelzStorageForTesting initializes channelz data storage and id
// generator for testing purposes.
//
// Returns a cleanup function to be invoked by the test, which waits for up to
// 10s for all channelz state to be reset by the grpc goroutines when those
// entities get closed. This cleanup function helps with ensuring that tests
// don't mess up each other.
func NewChannelzStorageForTesting() (cleanup func() error) {
db.set(newChannelMap())
idGen.reset()
return func() error {
cm := db.get()
if cm == nil {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
ticker := time.NewTicker(10 * time.Millisecond)
defer ticker.Stop()
for {
cm.mu.RLock()
topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)
cm.mu.RUnlock()
if err := ctx.Err(); err != nil {
return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets)
}
if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 {
return nil
}
<-ticker.C
}
}
}
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a // GetTopChannels returns a slice of top channel's ChannelMetric, along with a
// boolean indicating whether there's more top channels to be queried for. // boolean indicating whether there's more top channels to be queried for.
// //
@ -193,7 +157,7 @@ func GetServer(id int64) *ServerMetric {
// //
// If channelz is not turned ON, the channelz database is not mutated. // If channelz is not turned ON, the channelz database is not mutated.
func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
id := idGen.genID() id := IDGen.genID()
var parent int64 var parent int64
isTopChannel := true isTopChannel := true
if pid != nil { if pid != nil {
@ -229,7 +193,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er
if pid == nil { if pid == nil {
return nil, errors.New("a SubChannel's parent id cannot be nil") return nil, errors.New("a SubChannel's parent id cannot be nil")
} }
id := idGen.genID() id := IDGen.genID()
if !IsOn() { if !IsOn() {
return newIdentifer(RefSubChannel, id, pid), nil return newIdentifer(RefSubChannel, id, pid), nil
} }
@ -251,7 +215,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er
// //
// If channelz is not turned ON, the channelz database is not mutated. // If channelz is not turned ON, the channelz database is not mutated.
func RegisterServer(s Server, ref string) *Identifier { func RegisterServer(s Server, ref string) *Identifier {
id := idGen.genID() id := IDGen.genID()
if !IsOn() { if !IsOn() {
return newIdentifer(RefServer, id, nil) return newIdentifer(RefServer, id, nil)
} }
@ -277,7 +241,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e
if pid == nil { if pid == nil {
return nil, errors.New("a ListenSocket's parent id cannot be 0") return nil, errors.New("a ListenSocket's parent id cannot be 0")
} }
id := idGen.genID() id := IDGen.genID()
if !IsOn() { if !IsOn() {
return newIdentifer(RefListenSocket, id, pid), nil return newIdentifer(RefListenSocket, id, pid), nil
} }
@ -297,7 +261,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e
if pid == nil { if pid == nil {
return nil, errors.New("a NormalSocket's parent id cannot be 0") return nil, errors.New("a NormalSocket's parent id cannot be 0")
} }
id := idGen.genID() id := IDGen.genID()
if !IsOn() { if !IsOn() {
return newIdentifer(RefNormalSocket, id, pid), nil return newIdentifer(RefNormalSocket, id, pid), nil
} }
@ -776,14 +740,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric {
return sm return sm
} }
type idGenerator struct { // IDGenerator is an incrementing atomic that tracks IDs for channelz entities.
type IDGenerator struct {
id int64 id int64
} }
func (i *idGenerator) reset() { // Reset resets the generated ID back to zero. Should only be used at
// initialization or by tests sensitive to the ID number.
func (i *IDGenerator) Reset() {
atomic.StoreInt64(&i.id, 0) atomic.StoreInt64(&i.id, 0)
} }
func (i *idGenerator) genID() int64 { func (i *IDGenerator) genID() int64 {
return atomic.AddInt64(&i.id, 1) return atomic.AddInt64(&i.id, 1)
} }

View File

@ -31,7 +31,7 @@ func withParens(id *Identifier) string {
} }
// Info logs and adds a trace event if channelz is on. // Info logs and adds a trace event if channelz is on.
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
AddTraceEvent(l, id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...), Desc: fmt.Sprint(args...),
Severity: CtInfo, Severity: CtInfo,
@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
} }
// Infof logs and adds a trace event if channelz is on. // Infof logs and adds a trace event if channelz is on.
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
AddTraceEvent(l, id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprintf(format, args...), Desc: fmt.Sprintf(format, args...),
Severity: CtInfo, Severity: CtInfo,
@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter
} }
// Warning logs and adds a trace event if channelz is on. // Warning logs and adds a trace event if channelz is on.
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
AddTraceEvent(l, id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...), Desc: fmt.Sprint(args...),
Severity: CtWarning, Severity: CtWarning,
@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
} }
// Warningf logs and adds a trace event if channelz is on. // Warningf logs and adds a trace event if channelz is on.
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
AddTraceEvent(l, id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprintf(format, args...), Desc: fmt.Sprintf(format, args...),
Severity: CtWarning, Severity: CtWarning,
@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in
} }
// Error logs and adds a trace event if channelz is on. // Error logs and adds a trace event if channelz is on.
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
AddTraceEvent(l, id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprint(args...), Desc: fmt.Sprint(args...),
Severity: CtError, Severity: CtError,
@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
} }
// Errorf logs and adds a trace event if channelz is on. // Errorf logs and adds a trace event if channelz is on.
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
AddTraceEvent(l, id, 1, &TraceEventDesc{ AddTraceEvent(l, id, 1, &TraceEventDesc{
Desc: fmt.Sprintf(format, args...), Desc: fmt.Sprintf(format, args...),
Severity: CtError, Severity: CtError,

View File

@ -628,6 +628,7 @@ type tracedChannel interface {
type channelTrace struct { type channelTrace struct {
cm *channelMap cm *channelMap
clearCalled bool
createdTime time.Time createdTime time.Time
eventCount int64 eventCount int64
mu sync.Mutex mu sync.Mutex
@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) {
} }
func (c *channelTrace) clear() { func (c *channelTrace) clear() {
if c.clearCalled {
return
}
c.clearCalled = true
c.mu.Lock() c.mu.Lock()
for _, e := range c.events { for _, e := range c.events {
if e.RefID != 0 { if e.RefID != 0 {

View File

@ -23,7 +23,7 @@ import (
) )
// GetSocketOption gets the socket option info of the conn. // GetSocketOption gets the socket option info of the conn.
func GetSocketOption(socket interface{}) *SocketOptionData { func GetSocketOption(socket any) *SocketOptionData {
c, ok := socket.(syscall.Conn) c, ok := socket.(syscall.Conn)
if !ok { if !ok {
return nil return nil

View File

@ -22,6 +22,6 @@
package channelz package channelz
// GetSocketOption gets the socket option info of the conn. // GetSocketOption gets the socket option info of the conn.
func GetSocketOption(c interface{}) *SocketOptionData { func GetSocketOption(c any) *SocketOptionData {
return nil return nil
} }

View File

@ -25,12 +25,12 @@ import (
type requestInfoKey struct{} type requestInfoKey struct{}
// NewRequestInfoContext creates a context with ri. // NewRequestInfoContext creates a context with ri.
func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { func NewRequestInfoContext(ctx context.Context, ri any) context.Context {
return context.WithValue(ctx, requestInfoKey{}, ri) return context.WithValue(ctx, requestInfoKey{}, ri)
} }
// RequestInfoFromContext extracts the RequestInfo from ctx. // RequestInfoFromContext extracts the RequestInfo from ctx.
func RequestInfoFromContext(ctx context.Context) interface{} { func RequestInfoFromContext(ctx context.Context) any {
return ctx.Value(requestInfoKey{}) return ctx.Value(requestInfoKey{})
} }
@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} {
type clientHandshakeInfoKey struct{} type clientHandshakeInfoKey struct{}
// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { func ClientHandshakeInfoFromContext(ctx context.Context) any {
return ctx.Value(clientHandshakeInfoKey{}) return ctx.Value(clientHandshakeInfoKey{})
} }
// NewClientHandshakeInfoContext creates a context with chi. // NewClientHandshakeInfoContext creates a context with chi.
func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context {
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
} }

View File

@ -37,9 +37,12 @@ var (
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
// PickFirstLBConfig is set if we should support configuration of the // PickFirstLBConfig is set if we should support configuration of the
// pick_first LB policy, which can be enabled by setting the environment // pick_first LB policy.
// variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true)
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) // LeastRequestLB is set if we should support the least_request_experimental
// LB policy, which can be enabled by setting the environment variable
// "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false)
// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
// handshakes that can be performed. // handshakes that can be performed.
ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)

View File

@ -30,7 +30,7 @@ var Logger LoggerV2
var DepthLogger DepthLoggerV2 var DepthLogger DepthLoggerV2
// InfoDepth logs to the INFO log at the specified depth. // InfoDepth logs to the INFO log at the specified depth.
func InfoDepth(depth int, args ...interface{}) { func InfoDepth(depth int, args ...any) {
if DepthLogger != nil { if DepthLogger != nil {
DepthLogger.InfoDepth(depth, args...) DepthLogger.InfoDepth(depth, args...)
} else { } else {
@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) {
} }
// WarningDepth logs to the WARNING log at the specified depth. // WarningDepth logs to the WARNING log at the specified depth.
func WarningDepth(depth int, args ...interface{}) { func WarningDepth(depth int, args ...any) {
if DepthLogger != nil { if DepthLogger != nil {
DepthLogger.WarningDepth(depth, args...) DepthLogger.WarningDepth(depth, args...)
} else { } else {
@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) {
} }
// ErrorDepth logs to the ERROR log at the specified depth. // ErrorDepth logs to the ERROR log at the specified depth.
func ErrorDepth(depth int, args ...interface{}) { func ErrorDepth(depth int, args ...any) {
if DepthLogger != nil { if DepthLogger != nil {
DepthLogger.ErrorDepth(depth, args...) DepthLogger.ErrorDepth(depth, args...)
} else { } else {
@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) {
} }
// FatalDepth logs to the FATAL log at the specified depth. // FatalDepth logs to the FATAL log at the specified depth.
func FatalDepth(depth int, args ...interface{}) { func FatalDepth(depth int, args ...any) {
if DepthLogger != nil { if DepthLogger != nil {
DepthLogger.FatalDepth(depth, args...) DepthLogger.FatalDepth(depth, args...)
} else { } else {
@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) {
// is defined here to avoid a circular dependency. // is defined here to avoid a circular dependency.
type LoggerV2 interface { type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...interface{}) Info(args ...any)
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
Infoln(args ...interface{}) Infoln(args ...any)
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
Infof(format string, args ...interface{}) Infof(format string, args ...any)
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
Warning(args ...interface{}) Warning(args ...any)
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
Warningln(args ...interface{}) Warningln(args ...any)
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
Warningf(format string, args ...interface{}) Warningf(format string, args ...any)
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
Error(args ...interface{}) Error(args ...any)
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
Errorln(args ...interface{}) Errorln(args ...any)
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
Errorf(format string, args ...interface{}) Errorf(format string, args ...any)
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
// gRPC ensures that all Fatal logs will exit with os.Exit(1). // gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code. // Implementations may also call os.Exit() with a non-zero exit code.
Fatal(args ...interface{}) Fatal(args ...any)
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
// gRPC ensures that all Fatal logs will exit with os.Exit(1). // gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code. // Implementations may also call os.Exit() with a non-zero exit code.
Fatalln(args ...interface{}) Fatalln(args ...any)
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
// gRPC ensures that all Fatal logs will exit with os.Exit(1). // gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code. // Implementations may also call os.Exit() with a non-zero exit code.
Fatalf(format string, args ...interface{}) Fatalf(format string, args ...any)
// V reports whether verbosity level l is at least the requested verbose level. // V reports whether verbosity level l is at least the requested verbose level.
V(l int) bool V(l int) bool
} }
@ -116,11 +116,11 @@ type LoggerV2 interface {
// later release. // later release.
type DepthLoggerV2 interface { type DepthLoggerV2 interface {
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...interface{}) InfoDepth(depth int, args ...any)
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
WarningDepth(depth int, args ...interface{}) WarningDepth(depth int, args ...any)
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
ErrorDepth(depth int, args ...interface{}) ErrorDepth(depth int, args ...any)
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...interface{}) FatalDepth(depth int, args ...any)
} }

View File

@ -31,7 +31,7 @@ type PrefixLogger struct {
} }
// Infof does info logging. // Infof does info logging.
func (pl *PrefixLogger) Infof(format string, args ...interface{}) { func (pl *PrefixLogger) Infof(format string, args ...any) {
if pl != nil { if pl != nil {
// Handle nil, so the tests can pass in a nil logger. // Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format format = pl.prefix + format
@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) {
} }
// Warningf does warning logging. // Warningf does warning logging.
func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { func (pl *PrefixLogger) Warningf(format string, args ...any) {
if pl != nil { if pl != nil {
format = pl.prefix + format format = pl.prefix + format
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) {
} }
// Errorf does error logging. // Errorf does error logging.
func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { func (pl *PrefixLogger) Errorf(format string, args ...any) {
if pl != nil { if pl != nil {
format = pl.prefix + format format = pl.prefix + format
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
} }
// Debugf does info logging at verbose level 2. // Debugf does info logging at verbose level 2.
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { func (pl *PrefixLogger) Debugf(format string, args ...any) {
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
// rewrite PrefixLogger a little to ensure that we don't use the global // rewrite PrefixLogger a little to ensure that we don't use the global
// `Logger` here, and instead use the `logger` field. // `Logger` here, and instead use the `logger` field.

View File

@ -32,10 +32,10 @@ import (
// //
// This type is safe for concurrent access. // This type is safe for concurrent access.
type CallbackSerializer struct { type CallbackSerializer struct {
// Done is closed once the serializer is shut down completely, i.e all // done is closed once the serializer is shut down completely, i.e all
// scheduled callbacks are executed and the serializer has deallocated all // scheduled callbacks are executed and the serializer has deallocated all
// its resources. // its resources.
Done chan struct{} done chan struct{}
callbacks *buffer.Unbounded callbacks *buffer.Unbounded
closedMu sync.Mutex closedMu sync.Mutex
@ -48,12 +48,12 @@ type CallbackSerializer struct {
// callbacks will be added once this context is canceled, and any pending un-run // callbacks will be added once this context is canceled, and any pending un-run
// callbacks will be executed before the serializer is shut down. // callbacks will be executed before the serializer is shut down.
func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
t := &CallbackSerializer{ cs := &CallbackSerializer{
Done: make(chan struct{}), done: make(chan struct{}),
callbacks: buffer.NewUnbounded(), callbacks: buffer.NewUnbounded(),
} }
go t.run(ctx) go cs.run(ctx)
return t return cs
} }
// Schedule adds a callback to be scheduled after existing callbacks are run. // Schedule adds a callback to be scheduled after existing callbacks are run.
@ -64,56 +64,62 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
// Return value indicates if the callback was successfully added to the list of // Return value indicates if the callback was successfully added to the list of
// callbacks to be executed by the serializer. It is not possible to add // callbacks to be executed by the serializer. It is not possible to add
// callbacks once the context passed to NewCallbackSerializer is cancelled. // callbacks once the context passed to NewCallbackSerializer is cancelled.
func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
t.closedMu.Lock() cs.closedMu.Lock()
defer t.closedMu.Unlock() defer cs.closedMu.Unlock()
if t.closed { if cs.closed {
return false return false
} }
t.callbacks.Put(f) cs.callbacks.Put(f)
return true return true
} }
func (t *CallbackSerializer) run(ctx context.Context) { func (cs *CallbackSerializer) run(ctx context.Context) {
var backlog []func(context.Context) var backlog []func(context.Context)
defer close(t.Done) defer close(cs.done)
for ctx.Err() == nil { for ctx.Err() == nil {
select { select {
case <-ctx.Done(): case <-ctx.Done():
// Do nothing here. Next iteration of the for loop will not happen, // Do nothing here. Next iteration of the for loop will not happen,
// since ctx.Err() would be non-nil. // since ctx.Err() would be non-nil.
case callback, ok := <-t.callbacks.Get(): case callback, ok := <-cs.callbacks.Get():
if !ok { if !ok {
return return
} }
t.callbacks.Load() cs.callbacks.Load()
callback.(func(ctx context.Context))(ctx) callback.(func(ctx context.Context))(ctx)
} }
} }
// Fetch pending callbacks if any, and execute them before returning from // Fetch pending callbacks if any, and execute them before returning from
// this method and closing t.Done. // this method and closing cs.done.
t.closedMu.Lock() cs.closedMu.Lock()
t.closed = true cs.closed = true
backlog = t.fetchPendingCallbacks() backlog = cs.fetchPendingCallbacks()
t.callbacks.Close() cs.callbacks.Close()
t.closedMu.Unlock() cs.closedMu.Unlock()
for _, b := range backlog { for _, b := range backlog {
b(ctx) b(ctx)
} }
} }
func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
var backlog []func(context.Context) var backlog []func(context.Context)
for { for {
select { select {
case b := <-t.callbacks.Get(): case b := <-cs.callbacks.Get():
backlog = append(backlog, b.(func(context.Context))) backlog = append(backlog, b.(func(context.Context)))
t.callbacks.Load() cs.callbacks.Load()
default: default:
return backlog return backlog
} }
} }
} }
// Done returns a channel that is closed after the context passed to
// NewCallbackSerializer is canceled and all callbacks have been executed.
func (cs *CallbackSerializer) Done() <-chan struct{} {
return cs.done
}

View File

@ -29,7 +29,7 @@ import (
type Subscriber interface { type Subscriber interface {
// OnMessage is invoked when a new message is published. Implementations // OnMessage is invoked when a new message is published. Implementations
// must not block in this method. // must not block in this method.
OnMessage(msg interface{}) OnMessage(msg any)
} }
// PubSub is a simple one-to-many publish-subscribe system that supports // PubSub is a simple one-to-many publish-subscribe system that supports
@ -40,25 +40,23 @@ type Subscriber interface {
// subscribers interested in receiving these messages register a callback // subscribers interested in receiving these messages register a callback
// via the Subscribe() method. // via the Subscribe() method.
// //
// Once a PubSub is stopped, no more messages can be published, and // Once a PubSub is stopped, no more messages can be published, but any pending
// it is guaranteed that no more subscriber callback will be invoked. // published messages will be delivered to the subscribers. Done may be used
// to determine when all published messages have been delivered.
type PubSub struct { type PubSub struct {
cs *CallbackSerializer cs *CallbackSerializer
cancel context.CancelFunc
// Access to the below fields are guarded by this mutex. // Access to the below fields are guarded by this mutex.
mu sync.Mutex mu sync.Mutex
msg interface{} msg any
subscribers map[Subscriber]bool subscribers map[Subscriber]bool
stopped bool
} }
// NewPubSub returns a new PubSub instance. // NewPubSub returns a new PubSub instance. Users should cancel the
func NewPubSub() *PubSub { // provided context to shutdown the PubSub.
ctx, cancel := context.WithCancel(context.Background()) func NewPubSub(ctx context.Context) *PubSub {
return &PubSub{ return &PubSub{
cs: NewCallbackSerializer(ctx), cs: NewCallbackSerializer(ctx),
cancel: cancel,
subscribers: map[Subscriber]bool{}, subscribers: map[Subscriber]bool{},
} }
} }
@ -75,10 +73,6 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
ps.mu.Lock() ps.mu.Lock()
defer ps.mu.Unlock() defer ps.mu.Unlock()
if ps.stopped {
return func() {}
}
ps.subscribers[sub] = true ps.subscribers[sub] = true
if ps.msg != nil { if ps.msg != nil {
@ -102,14 +96,10 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
// Publish publishes the provided message to the PubSub, and invokes // Publish publishes the provided message to the PubSub, and invokes
// callbacks registered by subscribers asynchronously. // callbacks registered by subscribers asynchronously.
func (ps *PubSub) Publish(msg interface{}) { func (ps *PubSub) Publish(msg any) {
ps.mu.Lock() ps.mu.Lock()
defer ps.mu.Unlock() defer ps.mu.Unlock()
if ps.stopped {
return
}
ps.msg = msg ps.msg = msg
for sub := range ps.subscribers { for sub := range ps.subscribers {
s := sub s := sub
@ -124,13 +114,8 @@ func (ps *PubSub) Publish(msg interface{}) {
} }
} }
// Stop shuts down the PubSub and releases any resources allocated by it. // Done returns a channel that is closed after the context passed to NewPubSub
// It is guaranteed that no subscriber callbacks would be invoked once this // is canceled and all updates have been sent to subscribers.
// method returns. func (ps *PubSub) Done() <-chan struct{} {
func (ps *PubSub) Stop() { return ps.cs.Done()
ps.mu.Lock()
defer ps.mu.Unlock()
ps.stopped = true
ps.cancel()
} }

View File

@ -16,7 +16,9 @@
* *
*/ */
package grpc // Package idle contains a component for managing idleness (entering and exiting)
// based on RPC activity.
package idle
import ( import (
"fmt" "fmt"
@ -24,6 +26,8 @@ import (
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"google.golang.org/grpc/grpclog"
) )
// For overriding in unit tests. // For overriding in unit tests.
@ -31,31 +35,31 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
return time.AfterFunc(d, f) return time.AfterFunc(d, f)
} }
// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter // Enforcer is the functionality provided by grpc.ClientConn to enter
// and exit from idle mode. // and exit from idle mode.
type idlenessEnforcer interface { type Enforcer interface {
exitIdleMode() error ExitIdleMode() error
enterIdleMode() error EnterIdleMode() error
} }
// idlenessManager defines the functionality required to track RPC activity on a // Manager defines the functionality required to track RPC activity on a
// channel. // channel.
type idlenessManager interface { type Manager interface {
onCallBegin() error OnCallBegin() error
onCallEnd() OnCallEnd()
close() Close()
} }
type noopIdlenessManager struct{} type noopManager struct{}
func (noopIdlenessManager) onCallBegin() error { return nil } func (noopManager) OnCallBegin() error { return nil }
func (noopIdlenessManager) onCallEnd() {} func (noopManager) OnCallEnd() {}
func (noopIdlenessManager) close() {} func (noopManager) Close() {}
// idlenessManagerImpl implements the idlenessManager interface. It uses atomic // manager implements the Manager interface. It uses atomic operations to
// operations to synchronize access to shared state and a mutex to guarantee // synchronize access to shared state and a mutex to guarantee mutual exclusion
// mutual exclusion in a critical section. // in a critical section.
type idlenessManagerImpl struct { type manager struct {
// State accessed atomically. // State accessed atomically.
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
@ -64,14 +68,15 @@ type idlenessManagerImpl struct {
// Can be accessed without atomics or mutex since these are set at creation // Can be accessed without atomics or mutex since these are set at creation
// time and read-only after that. // time and read-only after that.
enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. enforcer Enforcer // Functionality provided by grpc.ClientConn.
timeout int64 // Idle timeout duration nanos stored as an int64. timeout int64 // Idle timeout duration nanos stored as an int64.
logger grpclog.LoggerV2
// idleMu is used to guarantee mutual exclusion in two scenarios: // idleMu is used to guarantee mutual exclusion in two scenarios:
// - Opposing intentions: // - Opposing intentions:
// - a: Idle timeout has fired and handleIdleTimeout() is trying to put // - a: Idle timeout has fired and handleIdleTimeout() is trying to put
// the channel in idle mode because the channel has been inactive. // the channel in idle mode because the channel has been inactive.
// - b: At the same time an RPC is made on the channel, and onCallBegin() // - b: At the same time an RPC is made on the channel, and OnCallBegin()
// is trying to prevent the channel from going idle. // is trying to prevent the channel from going idle.
// - Competing intentions: // - Competing intentions:
// - The channel is in idle mode and there are multiple RPCs starting at // - The channel is in idle mode and there are multiple RPCs starting at
@ -83,28 +88,37 @@ type idlenessManagerImpl struct {
timer *time.Timer timer *time.Timer
} }
// newIdlenessManager creates a new idleness manager implementation for the // ManagerOptions is a collection of options used by
// given idle timeout. // NewManager.
func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { type ManagerOptions struct {
if idleTimeout == 0 { Enforcer Enforcer
return noopIdlenessManager{} Timeout time.Duration
Logger grpclog.LoggerV2
} }
i := &idlenessManagerImpl{ // NewManager creates a new idleness manager implementation for the
enforcer: enforcer, // given idle timeout.
timeout: int64(idleTimeout), func NewManager(opts ManagerOptions) Manager {
if opts.Timeout == 0 {
return noopManager{}
} }
i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout)
return i m := &manager{
enforcer: opts.Enforcer,
timeout: int64(opts.Timeout),
logger: opts.Logger,
}
m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout)
return m
} }
// resetIdleTimer resets the idle timer to the given duration. This method // resetIdleTimer resets the idle timer to the given duration. This method
// should only be called from the timer callback. // should only be called from the timer callback.
func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { func (m *manager) resetIdleTimer(d time.Duration) {
i.idleMu.Lock() m.idleMu.Lock()
defer i.idleMu.Unlock() defer m.idleMu.Unlock()
if i.timer == nil { if m.timer == nil {
// Only close sets timer to nil. We are done. // Only close sets timer to nil. We are done.
return return
} }
@ -112,47 +126,47 @@ func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) {
// It is safe to ignore the return value from Reset() because this method is // It is safe to ignore the return value from Reset() because this method is
// only ever called from the timer callback, which means the timer has // only ever called from the timer callback, which means the timer has
// already fired. // already fired.
i.timer.Reset(d) m.timer.Reset(d)
} }
// handleIdleTimeout is the timer callback that is invoked upon expiry of the // handleIdleTimeout is the timer callback that is invoked upon expiry of the
// configured idle timeout. The channel is considered inactive if there are no // configured idle timeout. The channel is considered inactive if there are no
// ongoing calls and no RPC activity since the last time the timer fired. // ongoing calls and no RPC activity since the last time the timer fired.
func (i *idlenessManagerImpl) handleIdleTimeout() { func (m *manager) handleIdleTimeout() {
if i.isClosed() { if m.isClosed() {
return return
} }
if atomic.LoadInt32(&i.activeCallsCount) > 0 { if atomic.LoadInt32(&m.activeCallsCount) > 0 {
i.resetIdleTimer(time.Duration(i.timeout)) m.resetIdleTimer(time.Duration(m.timeout))
return return
} }
// There has been activity on the channel since we last got here. Reset the // There has been activity on the channel since we last got here. Reset the
// timer and return. // timer and return.
if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
// Set the timer to fire after a duration of idle timeout, calculated // Set the timer to fire after a duration of idle timeout, calculated
// from the time the most recent RPC completed. // from the time the most recent RPC completed.
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0)
i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano()))
return return
} }
// This CAS operation is extremely likely to succeed given that there has // This CAS operation is extremely likely to succeed given that there has
// been no activity since the last time we were here. Setting the // been no activity since the last time we were here. Setting the
// activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the
// channel is either in idle mode or is trying to get there. // channel is either in idle mode or is trying to get there.
if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
// This CAS operation can fail if an RPC started after we checked for // This CAS operation can fail if an RPC started after we checked for
// activity at the top of this method, or one was ongoing from before // activity at the top of this method, or one was ongoing from before
// the last time we were here. In both case, reset the timer and return. // the last time we were here. In both case, reset the timer and return.
i.resetIdleTimer(time.Duration(i.timeout)) m.resetIdleTimer(time.Duration(m.timeout))
return return
} }
// Now that we've set the active calls count to -math.MaxInt32, it's time to // Now that we've set the active calls count to -math.MaxInt32, it's time to
// actually move to idle mode. // actually move to idle mode.
if i.tryEnterIdleMode() { if m.tryEnterIdleMode() {
// Successfully entered idle mode. No timer needed until we exit idle. // Successfully entered idle mode. No timer needed until we exit idle.
return return
} }
@ -160,8 +174,8 @@ func (i *idlenessManagerImpl) handleIdleTimeout() {
// Failed to enter idle mode due to a concurrent RPC that kept the channel // Failed to enter idle mode due to a concurrent RPC that kept the channel
// active, or because of an error from the channel. Undo the attempt to // active, or because of an error from the channel. Undo the attempt to
// enter idle, and reset the timer to try again later. // enter idle, and reset the timer to try again later.
atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
i.resetIdleTimer(time.Duration(i.timeout)) m.resetIdleTimer(time.Duration(m.timeout))
} }
// tryEnterIdleMode instructs the channel to enter idle mode. But before // tryEnterIdleMode instructs the channel to enter idle mode. But before
@ -171,15 +185,15 @@ func (i *idlenessManagerImpl) handleIdleTimeout() {
// Return value indicates whether or not the channel moved to idle mode. // Return value indicates whether or not the channel moved to idle mode.
// //
// Holds idleMu which ensures mutual exclusion with exitIdleMode. // Holds idleMu which ensures mutual exclusion with exitIdleMode.
func (i *idlenessManagerImpl) tryEnterIdleMode() bool { func (m *manager) tryEnterIdleMode() bool {
i.idleMu.Lock() m.idleMu.Lock()
defer i.idleMu.Unlock() defer m.idleMu.Unlock()
if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 {
// We raced and lost to a new RPC. Very rare, but stop entering idle. // We raced and lost to a new RPC. Very rare, but stop entering idle.
return false return false
} }
if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
// An very short RPC could have come in (and also finished) after we // An very short RPC could have come in (and also finished) after we
// checked for calls count and activity in handleIdleTimeout(), but // checked for calls count and activity in handleIdleTimeout(), but
// before the CAS operation. So, we need to check for activity again. // before the CAS operation. So, we need to check for activity again.
@ -189,99 +203,99 @@ func (i *idlenessManagerImpl) tryEnterIdleMode() bool {
// No new RPCs have come in since we last set the active calls count value // No new RPCs have come in since we last set the active calls count value
// -math.MaxInt32 in the timer callback. And since we have the lock, it is // -math.MaxInt32 in the timer callback. And since we have the lock, it is
// safe to enter idle mode now. // safe to enter idle mode now.
if err := i.enforcer.enterIdleMode(); err != nil { if err := m.enforcer.EnterIdleMode(); err != nil {
logger.Errorf("Failed to enter idle mode: %v", err) m.logger.Errorf("Failed to enter idle mode: %v", err)
return false return false
} }
// Successfully entered idle mode. // Successfully entered idle mode.
i.actuallyIdle = true m.actuallyIdle = true
return true return true
} }
// onCallBegin is invoked at the start of every RPC. // OnCallBegin is invoked at the start of every RPC.
func (i *idlenessManagerImpl) onCallBegin() error { func (m *manager) OnCallBegin() error {
if i.isClosed() { if m.isClosed() {
return nil return nil
} }
if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { if atomic.AddInt32(&m.activeCallsCount, 1) > 0 {
// Channel is not idle now. Set the activity bit and allow the call. // Channel is not idle now. Set the activity bit and allow the call.
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
return nil return nil
} }
// Channel is either in idle mode or is in the process of moving to idle // Channel is either in idle mode or is in the process of moving to idle
// mode. Attempt to exit idle mode to allow this RPC. // mode. Attempt to exit idle mode to allow this RPC.
if err := i.exitIdleMode(); err != nil { if err := m.exitIdleMode(); err != nil {
// Undo the increment to calls count, and return an error causing the // Undo the increment to calls count, and return an error causing the
// RPC to fail. // RPC to fail.
atomic.AddInt32(&i.activeCallsCount, -1) atomic.AddInt32(&m.activeCallsCount, -1)
return err return err
} }
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
return nil return nil
} }
// exitIdleMode instructs the channel to exit idle mode. // exitIdleMode instructs the channel to exit idle mode.
// //
// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
func (i *idlenessManagerImpl) exitIdleMode() error { func (m *manager) exitIdleMode() error {
i.idleMu.Lock() m.idleMu.Lock()
defer i.idleMu.Unlock() defer m.idleMu.Unlock()
if !i.actuallyIdle { if !m.actuallyIdle {
// This can happen in two scenarios: // This can happen in two scenarios:
// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
// tryEnterIdleMode(). But before the latter could grab the lock, an RPC // tryEnterIdleMode(). But before the latter could grab the lock, an RPC
// came in and onCallBegin() noticed that the calls count is negative. // came in and OnCallBegin() noticed that the calls count is negative.
// - Channel is in idle mode, and multiple new RPCs come in at the same // - Channel is in idle mode, and multiple new RPCs come in at the same
// time, all of them notice a negative calls count in onCallBegin and get // time, all of them notice a negative calls count in OnCallBegin and get
// here. The first one to get the lock would got the channel to exit idle. // here. The first one to get the lock would got the channel to exit idle.
// //
// Either way, nothing to do here. // Either way, nothing to do here.
return nil return nil
} }
if err := i.enforcer.exitIdleMode(); err != nil { if err := m.enforcer.ExitIdleMode(); err != nil {
return fmt.Errorf("channel failed to exit idle mode: %v", err) return fmt.Errorf("channel failed to exit idle mode: %v", err)
} }
// Undo the idle entry process. This also respects any new RPC attempts. // Undo the idle entry process. This also respects any new RPC attempts.
atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
i.actuallyIdle = false m.actuallyIdle = false
// Start a new timer to fire after the configured idle timeout. // Start a new timer to fire after the configured idle timeout.
i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout)
return nil return nil
} }
// onCallEnd is invoked at the end of every RPC. // OnCallEnd is invoked at the end of every RPC.
func (i *idlenessManagerImpl) onCallEnd() { func (m *manager) OnCallEnd() {
if i.isClosed() { if m.isClosed() {
return return
} }
// Record the time at which the most recent call finished. // Record the time at which the most recent call finished.
atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano())
// Decrement the active calls count. This count can temporarily go negative // Decrement the active calls count. This count can temporarily go negative
// when the timer callback is in the process of moving the channel to idle // when the timer callback is in the process of moving the channel to idle
// mode, but one or more RPCs come in and complete before the timer callback // mode, but one or more RPCs come in and complete before the timer callback
// can get done with the process of moving to idle mode. // can get done with the process of moving to idle mode.
atomic.AddInt32(&i.activeCallsCount, -1) atomic.AddInt32(&m.activeCallsCount, -1)
} }
func (i *idlenessManagerImpl) isClosed() bool { func (m *manager) isClosed() bool {
return atomic.LoadInt32(&i.closed) == 1 return atomic.LoadInt32(&m.closed) == 1
} }
func (i *idlenessManagerImpl) close() { func (m *manager) Close() {
atomic.StoreInt32(&i.closed, 1) atomic.StoreInt32(&m.closed, 1)
i.idleMu.Lock() m.idleMu.Lock()
i.timer.Stop() m.timer.Stop()
i.timer = nil m.timer = nil
i.idleMu.Unlock() m.idleMu.Unlock()
} }

View File

@ -30,7 +30,7 @@ import (
var ( var (
// WithHealthCheckFunc is set by dialoptions.go // WithHealthCheckFunc is set by dialoptions.go
WithHealthCheckFunc interface{} // func (HealthChecker) DialOption WithHealthCheckFunc any // func (HealthChecker) DialOption
// HealthCheckFunc is used to provide client-side LB channel health checking // HealthCheckFunc is used to provide client-side LB channel health checking
HealthCheckFunc HealthChecker HealthCheckFunc HealthChecker
// BalancerUnregister is exported by package balancer to unregister a balancer. // BalancerUnregister is exported by package balancer to unregister a balancer.
@ -38,8 +38,12 @@ var (
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience. // default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second KeepaliveMinPingTime = 10 * time.Second
// KeepaliveMinServerPingTime is the minimum ping interval for servers.
// This must be 1s by default, but tests may wish to set it lower for
// convenience.
KeepaliveMinServerPingTime = time.Second
// ParseServiceConfig parses a JSON representation of the service config. // ParseServiceConfig parses a JSON representation of the service config.
ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult ParseServiceConfig any // func(string) *serviceconfig.ParseResult
// EqualServiceConfigForTesting is for testing service config generation and // EqualServiceConfigForTesting is for testing service config generation and
// parsing. Both a and b should be returned by ParseServiceConfig. // parsing. Both a and b should be returned by ParseServiceConfig.
// This function compares the config without rawJSON stripped, in case the // This function compares the config without rawJSON stripped, in case the
@ -49,33 +53,33 @@ var (
// given name. This is set by package certprovider for use from xDS // given name. This is set by package certprovider for use from xDS
// bootstrap code while parsing certificate provider configs in the // bootstrap code while parsing certificate provider configs in the
// bootstrap file. // bootstrap file.
GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder GetCertificateProviderBuilder any // func(string) certprovider.Builder
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
// stored in the passed in attributes. This is set by // stored in the passed in attributes. This is set by
// credentials/xds/xds.go. // credentials/xds/xds.go.
GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo
// GetServerCredentials returns the transport credentials configured on a // GetServerCredentials returns the transport credentials configured on a
// gRPC server. An xDS-enabled server needs to know what type of credentials // gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go. // is configured on the underlying gRPC server. This is set by server.go.
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials
// CanonicalString returns the canonical string of the code defined here: // CanonicalString returns the canonical string of the code defined here:
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
// //
// This is used in the 1.0 release of gcp/observability, and thus must not be // This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed. // deleted or changed.
CanonicalString interface{} // func (codes.Code) string CanonicalString any // func (codes.Code) string
// DrainServerTransports initiates a graceful close of existing connections // DrainServerTransports initiates a graceful close of existing connections
// on a gRPC server accepted on the provided listener address. An // on a gRPC server accepted on the provided listener address. An
// xDS-enabled server invokes this method on a grpc.Server when a particular // xDS-enabled server invokes this method on a grpc.Server when a particular
// listener moves to "not-serving" mode. // listener moves to "not-serving" mode.
DrainServerTransports interface{} // func(*grpc.Server, string) DrainServerTransports any // func(*grpc.Server, string)
// AddGlobalServerOptions adds an array of ServerOption that will be // AddGlobalServerOptions adds an array of ServerOption that will be
// effective globally for newly created servers. The priority will be: 1. // effective globally for newly created servers. The priority will be: 1.
// user-provided; 2. this method; 3. default values. // user-provided; 2. this method; 3. default values.
// //
// This is used in the 1.0 release of gcp/observability, and thus must not be // This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed. // deleted or changed.
AddGlobalServerOptions interface{} // func(opt ...ServerOption) AddGlobalServerOptions any // func(opt ...ServerOption)
// ClearGlobalServerOptions clears the array of extra ServerOption. This // ClearGlobalServerOptions clears the array of extra ServerOption. This
// method is useful in testing and benchmarking. // method is useful in testing and benchmarking.
// //
@ -88,14 +92,14 @@ var (
// //
// This is used in the 1.0 release of gcp/observability, and thus must not be // This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed. // deleted or changed.
AddGlobalDialOptions interface{} // func(opt ...DialOption) AddGlobalDialOptions any // func(opt ...DialOption)
// DisableGlobalDialOptions returns a DialOption that prevents the // DisableGlobalDialOptions returns a DialOption that prevents the
// ClientConn from applying the global DialOptions (set via // ClientConn from applying the global DialOptions (set via
// AddGlobalDialOptions). // AddGlobalDialOptions).
// //
// This is used in the 1.0 release of gcp/observability, and thus must not be // This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed. // deleted or changed.
DisableGlobalDialOptions interface{} // func() grpc.DialOption DisableGlobalDialOptions any // func() grpc.DialOption
// ClearGlobalDialOptions clears the array of extra DialOption. This // ClearGlobalDialOptions clears the array of extra DialOption. This
// method is useful in testing and benchmarking. // method is useful in testing and benchmarking.
// //
@ -104,23 +108,26 @@ var (
ClearGlobalDialOptions func() ClearGlobalDialOptions func()
// JoinDialOptions combines the dial options passed as arguments into a // JoinDialOptions combines the dial options passed as arguments into a
// single dial option. // single dial option.
JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
// JoinServerOptions combines the server options passed as arguments into a // JoinServerOptions combines the server options passed as arguments into a
// single server option. // single server option.
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption
// WithBinaryLogger returns a DialOption that specifies the binary logger // WithBinaryLogger returns a DialOption that specifies the binary logger
// for a ClientConn. // for a ClientConn.
// //
// This is used in the 1.0 release of gcp/observability, and thus must not be // This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed. // deleted or changed.
WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption
// BinaryLogger returns a ServerOption that can set the binary logger for a // BinaryLogger returns a ServerOption that can set the binary logger for a
// server. // server.
// //
// This is used in the 1.0 release of gcp/observability, and thus must not be // This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed. // deleted or changed.
BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
// SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn
SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
// the provided xds bootstrap config instead of the global configuration from // the provided xds bootstrap config instead of the global configuration from
@ -131,7 +138,7 @@ var (
// //
// This function should ONLY be used for testing and may not work with some // This function should ONLY be used for testing and may not work with some
// other features, including the CSDS service. // other features, including the CSDS service.
NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment // Specifier Plugin for testing purposes, regardless of the XDSRLS environment
@ -163,7 +170,11 @@ var (
UnregisterRBACHTTPFilterForTesting func() UnregisterRBACHTTPFilterForTesting func()
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions)
// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
// metadata to RPCs.
GRPCResolverSchemeExtraMetadata string = "xds"
) )
// HealthChecker defines the signature of the client-side LB channel health checking function. // HealthChecker defines the signature of the client-side LB channel health checking function.
@ -174,7 +185,7 @@ var (
// //
// The health checking protocol is defined at: // The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md // https://github.com/grpc/grpc/blob/master/doc/health-checking.md
type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error
const ( const (
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.

View File

@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata")
type mdValue metadata.MD type mdValue metadata.MD
func (m mdValue) Equal(o interface{}) bool { func (m mdValue) Equal(o any) bool {
om, ok := o.(mdValue) om, ok := o.(mdValue)
if !ok { if !ok {
return false return false

View File

@ -35,7 +35,7 @@ const jsonIndent = " "
// ToJSON marshals the input into a json string. // ToJSON marshals the input into a json string.
// //
// If marshal fails, it falls back to fmt.Sprintf("%+v"). // If marshal fails, it falls back to fmt.Sprintf("%+v").
func ToJSON(e interface{}) string { func ToJSON(e any) string {
switch ee := e.(type) { switch ee := e.(type) {
case protov1.Message: case protov1.Message:
mm := jsonpb.Marshaler{Indent: jsonIndent} mm := jsonpb.Marshaler{Indent: jsonIndent}

View File

@ -92,7 +92,7 @@ type ClientStream interface {
// calling RecvMsg on the same stream at the same time, but it is not safe // calling RecvMsg on the same stream at the same time, but it is not safe
// to call SendMsg on the same stream in different goroutines. It is also // to call SendMsg on the same stream in different goroutines. It is also
// not safe to call CloseSend concurrently with SendMsg. // not safe to call CloseSend concurrently with SendMsg.
SendMsg(m interface{}) error SendMsg(m any) error
// RecvMsg blocks until it receives a message into m or the stream is // RecvMsg blocks until it receives a message into m or the stream is
// done. It returns io.EOF when the stream completes successfully. On // done. It returns io.EOF when the stream completes successfully. On
// any other error, the stream is aborted and the error contains the RPC // any other error, the stream is aborted and the error contains the RPC
@ -101,7 +101,7 @@ type ClientStream interface {
// It is safe to have a goroutine calling SendMsg and another goroutine // It is safe to have a goroutine calling SendMsg and another goroutine
// calling RecvMsg on the same stream at the same time, but it is not // calling RecvMsg on the same stream at the same time, but it is not
// safe to call RecvMsg on the same stream in different goroutines. // safe to call RecvMsg on the same stream in different goroutines.
RecvMsg(m interface{}) error RecvMsg(m any) error
} }
// ClientInterceptor is an interceptor for gRPC client streams. // ClientInterceptor is an interceptor for gRPC client streams.

View File

@ -49,7 +49,7 @@ func New(c codes.Code, msg string) *Status {
} }
// Newf returns New(c, fmt.Sprintf(format, a...)). // Newf returns New(c, fmt.Sprintf(format, a...)).
func Newf(c codes.Code, format string, a ...interface{}) *Status { func Newf(c codes.Code, format string, a ...any) *Status {
return New(c, fmt.Sprintf(format, a...)) return New(c, fmt.Sprintf(format, a...))
} }
@ -64,7 +64,7 @@ func Err(c codes.Code, msg string) error {
} }
// Errorf returns Error(c, fmt.Sprintf(format, a...)). // Errorf returns Error(c, fmt.Sprintf(format, a...)).
func Errorf(c codes.Code, format string, a ...interface{}) error { func Errorf(c codes.Code, format string, a ...any) error {
return Err(c, fmt.Sprintf(format, a...)) return Err(c, fmt.Sprintf(format, a...))
} }
@ -120,11 +120,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
// Details returns a slice of details messages attached to the status. // Details returns a slice of details messages attached to the status.
// If a detail cannot be decoded, the error is returned in place of the detail. // If a detail cannot be decoded, the error is returned in place of the detail.
func (s *Status) Details() []interface{} { func (s *Status) Details() []any {
if s == nil || s.s == nil { if s == nil || s.s == nil {
return nil return nil
} }
details := make([]interface{}, 0, len(s.s.Details)) details := make([]any, 0, len(s.s.Details))
for _, any := range s.s.Details { for _, any := range s.s.Details {
detail := &ptypes.DynamicAny{} detail := &ptypes.DynamicAny{}
if err := ptypes.UnmarshalAny(any, detail); err != nil { if err := ptypes.UnmarshalAny(any, detail); err != nil {

View File

@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
} }
type itemNode struct { type itemNode struct {
it interface{} it any
next *itemNode next *itemNode
} }
@ -49,7 +49,7 @@ type itemList struct {
tail *itemNode tail *itemNode
} }
func (il *itemList) enqueue(i interface{}) { func (il *itemList) enqueue(i any) {
n := &itemNode{it: i} n := &itemNode{it: i}
if il.tail == nil { if il.tail == nil {
il.head, il.tail = n, n il.head, il.tail = n, n
@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) {
// peek returns the first item in the list without removing it from the // peek returns the first item in the list without removing it from the
// list. // list.
func (il *itemList) peek() interface{} { func (il *itemList) peek() any {
return il.head.it return il.head.it
} }
func (il *itemList) dequeue() interface{} { func (il *itemList) dequeue() any {
if il.head == nil { if il.head == nil {
return nil return nil
} }
@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error {
return err return err
} }
func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) {
var wakeUp bool var wakeUp bool
c.mu.Lock() c.mu.Lock()
if c.err != nil { if c.err != nil {
@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b
} }
// Note argument f should never be nil. // Note argument f should never be nil.
func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) {
c.mu.Lock() c.mu.Lock()
if c.err != nil { if c.err != nil {
c.mu.Unlock() c.mu.Unlock()
@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo
return true, nil return true, nil
} }
func (c *controlBuffer) get(block bool) (interface{}, error) { func (c *controlBuffer) get(block bool) (any, error) {
for { for {
c.mu.Lock() c.mu.Lock()
if c.err != nil { if c.err != nil {
@ -830,7 +830,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
return nil return nil
} }
func (l *loopyWriter) handle(i interface{}) error { func (l *loopyWriter) handle(i any) error {
switch i := i.(type) { switch i := i.(type) {
case *incomingWindowUpdate: case *incomingWindowUpdate:
l.incomingWindowUpdateHandler(i) l.incomingWindowUpdateHandler(i)

View File

@ -330,7 +330,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
readerDone: make(chan struct{}), readerDone: make(chan struct{}),
writerDone: make(chan struct{}), writerDone: make(chan struct{}),
goAway: make(chan struct{}), goAway: make(chan struct{}),
framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
fc: &trInFlow{limit: uint32(icwz)}, fc: &trInFlow{limit: uint32(icwz)},
scheme: scheme, scheme: scheme,
activeStreams: make(map[uint32]*Stream), activeStreams: make(map[uint32]*Stream),
@ -762,7 +762,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
firstTry := true firstTry := true
var ch chan struct{} var ch chan struct{}
transportDrainRequired := false transportDrainRequired := false
checkForStreamQuota := func(it interface{}) bool { checkForStreamQuota := func(it any) bool {
if t.streamQuota <= 0 { // Can go negative if server decreases it. if t.streamQuota <= 0 { // Can go negative if server decreases it.
if firstTry { if firstTry {
t.waitingStreams++ t.waitingStreams++
@ -800,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
return true return true
} }
var hdrListSizeErr error var hdrListSizeErr error
checkForHeaderListSize := func(it interface{}) bool { checkForHeaderListSize := func(it any) bool {
if t.maxSendHeaderListSize == nil { if t.maxSendHeaderListSize == nil {
return true return true
} }
@ -815,7 +815,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
return true return true
} }
for { for {
success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { success, err := t.controlBuf.executeAndPut(func(it any) bool {
return checkForHeaderListSize(it) && checkForStreamQuota(it) return checkForHeaderListSize(it) && checkForStreamQuota(it)
}, hdr) }, hdr)
if err != nil { if err != nil {
@ -927,7 +927,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
rst: rst, rst: rst,
rstCode: rstCode, rstCode: rstCode,
} }
addBackStreamQuota := func(interface{}) bool { addBackStreamQuota := func(any) bool {
t.streamQuota++ t.streamQuota++
if t.streamQuota > 0 && t.waitingStreams > 0 { if t.streamQuota > 0 && t.waitingStreams > 0 {
select { select {
@ -1080,7 +1080,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
// for the transport and the stream based on the current bdp // for the transport and the stream based on the current bdp
// estimation. // estimation.
func (t *http2Client) updateFlowControl(n uint32) { func (t *http2Client) updateFlowControl(n uint32) {
updateIWS := func(interface{}) bool { updateIWS := func(any) bool {
t.initialWindowSize = int32(n) t.initialWindowSize = int32(n)
t.mu.Lock() t.mu.Lock()
for _, s := range t.activeStreams { for _, s := range t.activeStreams {
@ -1233,7 +1233,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
} }
updateFuncs = append(updateFuncs, updateStreamQuota) updateFuncs = append(updateFuncs, updateStreamQuota)
} }
t.controlBuf.executeAndPut(func(interface{}) bool { t.controlBuf.executeAndPut(func(any) bool {
for _, f := range updateFuncs { for _, f := range updateFuncs {
f() f()
} }
@ -1505,14 +1505,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
return return
} }
isHeader := false // For headers, set them in s.header and close headerChan. For trailers or
// trailers-only, closeStream will set the trailers and close headerChan as
// If headerChan hasn't been closed yet // needed.
if !endStream {
// If headerChan hasn't been closed yet (expected, given we checked it
// above, but something else could have potentially closed the whole
// stream).
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
s.headerValid = true s.headerValid = true
if !endStream {
// HEADERS frame block carries a Response-Headers.
isHeader = true
// These values can be set without any synchronization because // These values can be set without any synchronization because
// stream goroutine will read it only after seeing a closed // stream goroutine will read it only after seeing a closed
// headerChan which we'll close after setting this. // headerChan which we'll close after setting this.
@ -1520,15 +1521,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if len(mdata) > 0 { if len(mdata) > 0 {
s.header = mdata s.header = mdata
} }
} else {
// HEADERS frame block carries a Trailers-Only.
s.noHeaders = true
}
close(s.headerChan) close(s.headerChan)
} }
}
for _, sh := range t.statsHandlers { for _, sh := range t.statsHandlers {
if isHeader { if !endStream {
inHeader := &stats.InHeader{ inHeader := &stats.InHeader{
Client: true, Client: true,
WireLength: int(frame.Header().Length), WireLength: int(frame.Header().Length),
@ -1554,9 +1552,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
statusGen = status.New(rawStatusCode, grpcMessage) statusGen = status.New(rawStatusCode, grpcMessage)
} }
// if client received END_STREAM from server while stream was still active, send RST_STREAM // If client received END_STREAM from server while stream was still active,
rst := s.getState() == streamActive // send RST_STREAM.
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) rstStream := s.getState() == streamActive
t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true)
} }
// readServerPreface reads and handles the initial settings frame from the // readServerPreface reads and handles the initial settings frame from the

View File

@ -165,7 +165,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
if config.MaxHeaderListSize != nil { if config.MaxHeaderListSize != nil {
maxHeaderListSize = *config.MaxHeaderListSize maxHeaderListSize = *config.MaxHeaderListSize
} }
framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
// Send initial settings as connection preface to client. // Send initial settings as connection preface to client.
isettings := []http2.Setting{{ isettings := []http2.Setting{{
ID: http2.SettingMaxFrameSize, ID: http2.SettingMaxFrameSize,
@ -855,7 +855,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
} }
return nil return nil
}) })
t.controlBuf.executeAndPut(func(interface{}) bool { t.controlBuf.executeAndPut(func(any) bool {
for _, f := range updateFuncs { for _, f := range updateFuncs {
f() f()
} }
@ -939,7 +939,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD)
return headerFields return headerFields
} }
func (t *http2Server) checkForHeaderListSize(it interface{}) bool { func (t *http2Server) checkForHeaderListSize(it any) bool {
if t.maxSendHeaderListSize == nil { if t.maxSendHeaderListSize == nil {
return true return true
} }

View File

@ -30,6 +30,7 @@ import (
"net/url" "net/url"
"strconv" "strconv"
"strings" "strings"
"sync"
"time" "time"
"unicode/utf8" "unicode/utf8"
@ -309,6 +310,7 @@ func decodeGrpcMessageUnchecked(msg string) string {
} }
type bufWriter struct { type bufWriter struct {
pool *sync.Pool
buf []byte buf []byte
offset int offset int
batchSize int batchSize int
@ -316,12 +318,17 @@ type bufWriter struct {
err error err error
} }
func newBufWriter(conn net.Conn, batchSize int) *bufWriter { func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
return &bufWriter{ w := &bufWriter{
buf: make([]byte, batchSize*2),
batchSize: batchSize, batchSize: batchSize,
conn: conn, conn: conn,
pool: pool,
} }
// this indicates that we should use non shared buf
if pool == nil {
w.buf = make([]byte, batchSize)
}
return w
} }
func (w *bufWriter) Write(b []byte) (n int, err error) { func (w *bufWriter) Write(b []byte) (n int, err error) {
@ -332,19 +339,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) {
n, err = w.conn.Write(b) n, err = w.conn.Write(b)
return n, toIOError(err) return n, toIOError(err)
} }
if w.buf == nil {
b := w.pool.Get().(*[]byte)
w.buf = *b
}
for len(b) > 0 { for len(b) > 0 {
nn := copy(w.buf[w.offset:], b) nn := copy(w.buf[w.offset:], b)
b = b[nn:] b = b[nn:]
w.offset += nn w.offset += nn
n += nn n += nn
if w.offset >= w.batchSize { if w.offset >= w.batchSize {
err = w.Flush() err = w.flushKeepBuffer()
} }
} }
return n, err return n, err
} }
func (w *bufWriter) Flush() error { func (w *bufWriter) Flush() error {
err := w.flushKeepBuffer()
// Only release the buffer if we are in a "shared" mode
if w.buf != nil && w.pool != nil {
b := w.buf
w.pool.Put(&b)
w.buf = nil
}
return err
}
func (w *bufWriter) flushKeepBuffer() error {
if w.err != nil { if w.err != nil {
return w.err return w.err
} }
@ -381,7 +403,10 @@ type framer struct {
fr *http2.Framer fr *http2.Framer
} }
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
if writeBufferSize < 0 { if writeBufferSize < 0 {
writeBufferSize = 0 writeBufferSize = 0
} }
@ -389,7 +414,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList
if readBufferSize > 0 { if readBufferSize > 0 {
r = bufio.NewReaderSize(r, readBufferSize) r = bufio.NewReaderSize(r, readBufferSize)
} }
w := newBufWriter(conn, writeBufferSize) var pool *sync.Pool
if sharedWriteBuffer {
pool = getWriteBufferPool(writeBufferSize)
}
w := newBufWriter(conn, writeBufferSize, pool)
f := &framer{ f := &framer{
writer: w, writer: w,
fr: http2.NewFramer(w, r), fr: http2.NewFramer(w, r),
@ -403,6 +432,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList
return f return f
} }
func getWriteBufferPool(writeBufferSize int) *sync.Pool {
writeBufferMutex.Lock()
defer writeBufferMutex.Unlock()
size := writeBufferSize * 2
pool, ok := writeBufferPoolMap[size]
if ok {
return pool
}
pool = &sync.Pool{
New: func() any {
b := make([]byte, size)
return &b
},
}
writeBufferPoolMap[size] = pool
return pool
}
// parseDialTarget returns the network and address to pass to dialer. // parseDialTarget returns the network and address to pass to dialer.
func parseDialTarget(target string) (string, string) { func parseDialTarget(target string) (string, string) {
net := "tcp" net := "tcp"

View File

@ -43,10 +43,6 @@ import (
"google.golang.org/grpc/tap" "google.golang.org/grpc/tap"
) )
// ErrNoHeaders is used as a signal that a trailers only response was received,
// and is not a real error.
var ErrNoHeaders = errors.New("stream has no headers")
const logLevel = 2 const logLevel = 2
type bufferPool struct { type bufferPool struct {
@ -56,7 +52,7 @@ type bufferPool struct {
func newBufferPool() *bufferPool { func newBufferPool() *bufferPool {
return &bufferPool{ return &bufferPool{
pool: sync.Pool{ pool: sync.Pool{
New: func() interface{} { New: func() any {
return new(bytes.Buffer) return new(bytes.Buffer)
}, },
}, },
@ -390,14 +386,10 @@ func (s *Stream) Header() (metadata.MD, error) {
} }
s.waitOnHeader() s.waitOnHeader()
if !s.headerValid { if !s.headerValid || s.noHeaders {
return nil, s.status.Err() return nil, s.status.Err()
} }
if s.noHeaders {
return nil, ErrNoHeaders
}
return s.header.Copy(), nil return s.header.Copy(), nil
} }
@ -559,6 +551,7 @@ type ServerConfig struct {
InitialConnWindowSize int32 InitialConnWindowSize int32
WriteBufferSize int WriteBufferSize int
ReadBufferSize int ReadBufferSize int
SharedWriteBuffer bool
ChannelzParentID *channelz.Identifier ChannelzParentID *channelz.Identifier
MaxHeaderListSize *uint32 MaxHeaderListSize *uint32
HeaderTableSize *uint32 HeaderTableSize *uint32
@ -592,6 +585,8 @@ type ConnectOptions struct {
WriteBufferSize int WriteBufferSize int
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
ReadBufferSize int ReadBufferSize int
// SharedWriteBuffer indicates whether connections should reuse write buffer
SharedWriteBuffer bool
// ChannelzParentID sets the addrConn id which initiate the creation of this client transport. // ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
ChannelzParentID *channelz.Identifier ChannelzParentID *channelz.Identifier
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
@ -736,7 +731,7 @@ type ServerTransport interface {
} }
// connectionErrorf creates an ConnectionError with the specified error description. // connectionErrorf creates an ConnectionError with the specified error description.
func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError {
return ConnectionError{ return ConnectionError{
Desc: fmt.Sprintf(format, a...), Desc: fmt.Sprintf(format, a...),
temp: temp, temp: temp,

View File

@ -28,6 +28,7 @@ import (
"google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/channelz"
istatus "google.golang.org/grpc/internal/status" istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport" "google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
) )
@ -39,10 +40,14 @@ type pickerWrapper struct {
idle bool idle bool
blockingCh chan struct{} blockingCh chan struct{}
picker balancer.Picker picker balancer.Picker
statsHandlers []stats.Handler // to record blocking picker calls
} }
func newPickerWrapper() *pickerWrapper { func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
return &pickerWrapper{blockingCh: make(chan struct{})} return &pickerWrapper{
blockingCh: make(chan struct{}),
statsHandlers: statsHandlers,
}
} }
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
@ -95,6 +100,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
var ch chan struct{} var ch chan struct{}
var lastPickErr error var lastPickErr error
for { for {
pw.mu.Lock() pw.mu.Lock()
if pw.done { if pw.done {
@ -129,6 +135,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
continue continue
} }
// If the channel is set, it means that the pick call had to wait for a
// new picker at some point. Either it's the first iteration and this
// function received the first picker, or a picker errored with
// ErrNoSubConnAvailable or errored with failfast set to false, which
// will trigger a continue to the next iteration. In the first case this
// conditional will hit if this call had to block (the channel is set).
// In the second case, the only way it will get to this conditional is
// if there is a new picker.
if ch != nil {
for _, sh := range pw.statsHandlers {
sh.HandleRPC(ctx, &stats.PickerUpdated{})
}
}
ch = pw.blockingCh ch = pw.blockingCh
p := pw.picker p := pw.picker
pw.mu.Unlock() pw.mu.Unlock()

View File

@ -26,12 +26,18 @@ import (
"google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity" "google.golang.org/grpc/connectivity"
"google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/serviceconfig"
) )
const (
// PickFirstBalancerName is the name of the pick_first balancer. // PickFirstBalancerName is the name of the pick_first balancer.
const PickFirstBalancerName = "pick_first" PickFirstBalancerName = "pick_first"
logPrefix = "[pick-first-lb %p] "
)
func newPickfirstBuilder() balancer.Builder { func newPickfirstBuilder() balancer.Builder {
return &pickfirstBuilder{} return &pickfirstBuilder{}
@ -40,7 +46,9 @@ func newPickfirstBuilder() balancer.Builder {
type pickfirstBuilder struct{} type pickfirstBuilder struct{}
func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
return &pickfirstBalancer{cc: cc} b := &pickfirstBalancer{cc: cc}
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
} }
func (*pickfirstBuilder) Name() string { func (*pickfirstBuilder) Name() string {
@ -57,23 +65,36 @@ type pfConfig struct {
} }
func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
cfg := &pfConfig{} if !envconfig.PickFirstLBConfig {
if err := json.Unmarshal(js, cfg); err != nil { // Prior to supporting loadbalancing configuration, the pick_first LB
// policy did not implement the balancer.ConfigParser interface. This
// meant that if a non-empty configuration was passed to it, the service
// config unmarshaling code would throw a warning log, but would
// continue using the pick_first LB policy. The code below ensures the
// same behavior is retained if the env var is not set.
if string(js) != "{}" {
logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js))
}
return nil, nil
}
var cfg pfConfig
if err := json.Unmarshal(js, &cfg); err != nil {
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
} }
return cfg, nil return cfg, nil
} }
type pickfirstBalancer struct { type pickfirstBalancer struct {
logger *internalgrpclog.PrefixLogger
state connectivity.State state connectivity.State
cc balancer.ClientConn cc balancer.ClientConn
subConn balancer.SubConn subConn balancer.SubConn
cfg *pfConfig
} }
func (b *pickfirstBalancer) ResolverError(err error) { func (b *pickfirstBalancer) ResolverError(err error) {
if logger.V(2) { if b.logger.V(2) {
logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) b.logger.Infof("Received error from the name resolver: %v", err)
} }
if b.subConn == nil { if b.subConn == nil {
b.state = connectivity.TransientFailure b.state = connectivity.TransientFailure
@ -96,35 +117,44 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// The resolver reported an empty address list. Treat it like an error by // The resolver reported an empty address list. Treat it like an error by
// calling b.ResolverError. // calling b.ResolverError.
if b.subConn != nil { if b.subConn != nil {
// Remove the old subConn. All addresses were removed, so it is no longer // Shut down the old subConn. All addresses were removed, so it is
// valid. // no longer valid.
b.cc.RemoveSubConn(b.subConn) b.subConn.Shutdown()
b.subConn = nil b.subConn = nil
} }
b.ResolverError(errors.New("produced zero addresses")) b.ResolverError(errors.New("produced zero addresses"))
return balancer.ErrBadResolverState return balancer.ErrBadResolverState
} }
if state.BalancerConfig != nil { // We don't have to guard this block with the env var because ParseConfig
cfg, ok := state.BalancerConfig.(*pfConfig) // already does so.
if !ok { cfg, ok := state.BalancerConfig.(pfConfig)
return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) if state.BalancerConfig != nil && !ok {
return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
} }
b.cfg = cfg if cfg.ShuffleAddressList {
} addrs = append([]resolver.Address{}, addrs...)
if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList {
grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
} }
if b.logger.V(2) {
b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
}
if b.subConn != nil { if b.subConn != nil {
b.cc.UpdateAddresses(b.subConn, addrs) b.cc.UpdateAddresses(b.subConn, addrs)
return nil return nil
} }
subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) var subConn balancer.SubConn
subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
StateListener: func(state balancer.SubConnState) {
b.updateSubConnState(subConn, state)
},
})
if err != nil { if err != nil {
if logger.V(2) { if b.logger.V(2) {
logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) b.logger.Infof("Failed to create new SubConn: %v", err)
} }
b.state = connectivity.TransientFailure b.state = connectivity.TransientFailure
b.cc.UpdateState(balancer.State{ b.cc.UpdateState(balancer.State{
@ -143,13 +173,19 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
return nil return nil
} }
// UpdateSubConnState is unused as a StateListener is always registered when
// creating SubConns.
func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
if logger.V(2) { b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) }
func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
if b.logger.V(2) {
b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
} }
if b.subConn != subConn { if b.subConn != subConn {
if logger.V(2) { if b.logger.V(2) {
logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") b.logger.Infof("Ignored state change because subConn is not recognized")
} }
return return
} }

View File

@ -37,7 +37,7 @@ type PreparedMsg struct {
} }
// Encode marshalls and compresses the message using the codec and compressor for the stream. // Encode marshalls and compresses the message using the codec and compressor for the stream.
func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { func (p *PreparedMsg) Encode(s Stream, msg any) error {
ctx := s.Context() ctx := s.Context()
rpcInfo, ok := rpcInfoFromContext(ctx) rpcInfo, ok := rpcInfoFromContext(ctx)
if !ok { if !ok {

View File

@ -20,7 +20,7 @@ package resolver
type addressMapEntry struct { type addressMapEntry struct {
addr Address addr Address
value interface{} value any
} }
// AddressMap is a map of addresses to arbitrary values taking into account // AddressMap is a map of addresses to arbitrary values taking into account
@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int {
} }
// Get returns the value for the address in the map, if present. // Get returns the value for the address in the map, if present.
func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { func (a *AddressMap) Get(addr Address) (value any, ok bool) {
addrKey := toMapKey(&addr) addrKey := toMapKey(&addr)
entryList := a.m[addrKey] entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 { if entry := entryList.find(addr); entry != -1 {
@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) {
} }
// Set updates or adds the value to the address in the map. // Set updates or adds the value to the address in the map.
func (a *AddressMap) Set(addr Address, value interface{}) { func (a *AddressMap) Set(addr Address, value any) {
addrKey := toMapKey(&addr) addrKey := toMapKey(&addr)
entryList := a.m[addrKey] entryList := a.m[addrKey]
if entry := entryList.find(addr); entry != -1 { if entry := entryList.find(addr); entry != -1 {
@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address {
} }
// Values returns a slice of all current map values. // Values returns a slice of all current map values.
func (a *AddressMap) Values() []interface{} { func (a *AddressMap) Values() []any {
ret := make([]interface{}, 0, a.Len()) ret := make([]any, 0, a.Len())
for _, entryList := range a.m { for _, entryList := range a.m {
for _, entry := range entryList { for _, entry := range entryList {
ret = append(ret, entry.value) ret = append(ret, entry.value)

View File

@ -77,25 +77,6 @@ func GetDefaultScheme() string {
return defaultScheme return defaultScheme
} }
// AddressType indicates the address type returned by name resolution.
//
// Deprecated: use Attributes in Address instead.
type AddressType uint8
const (
// Backend indicates the address is for a backend server.
//
// Deprecated: use Attributes in Address instead.
Backend AddressType = iota
// GRPCLB indicates the address is for a grpclb load balancer.
//
// Deprecated: to select the GRPCLB load balancing policy, use a service
// config with a corresponding loadBalancingConfig. To supply balancer
// addresses to the GRPCLB load balancing policy, set State.Attributes
// using balancer/grpclb/state.Set.
GRPCLB
)
// Address represents a server the client connects to. // Address represents a server the client connects to.
// //
// # Experimental // # Experimental
@ -111,9 +92,6 @@ type Address struct {
// the address, instead of the hostname from the Dial target string. In most cases, // the address, instead of the hostname from the Dial target string. In most cases,
// this should not be set. // this should not be set.
// //
// If Type is GRPCLB, ServerName should be the name of the remote load
// balancer, not the name of the backend.
//
// WARNING: ServerName must only be populated with trusted values. It // WARNING: ServerName must only be populated with trusted values. It
// is insecure to populate it with data from untrusted inputs since untrusted // is insecure to populate it with data from untrusted inputs since untrusted
// values could be used to bypass the authority checks performed by TLS. // values could be used to bypass the authority checks performed by TLS.
@ -126,18 +104,16 @@ type Address struct {
// BalancerAttributes contains arbitrary data about this address intended // BalancerAttributes contains arbitrary data about this address intended
// for consumption by the LB policy. These attributes do not affect SubConn // for consumption by the LB policy. These attributes do not affect SubConn
// creation, connection establishment, handshaking, etc. // creation, connection establishment, handshaking, etc.
BalancerAttributes *attributes.Attributes
// Type is the type of this address.
// //
// Deprecated: use Attributes instead. // Deprecated: when an Address is inside an Endpoint, this field should not
Type AddressType // be used, and it will eventually be removed entirely.
BalancerAttributes *attributes.Attributes
// Metadata is the information associated with Addr, which may be used // Metadata is the information associated with Addr, which may be used
// to make load balancing decision. // to make load balancing decision.
// //
// Deprecated: use Attributes instead. // Deprecated: use Attributes instead.
Metadata interface{} Metadata any
} }
// Equal returns whether a and o are identical. Metadata is compared directly, // Equal returns whether a and o are identical. Metadata is compared directly,
@ -150,7 +126,7 @@ func (a Address) Equal(o Address) bool {
return a.Addr == o.Addr && a.ServerName == o.ServerName && return a.Addr == o.Addr && a.ServerName == o.ServerName &&
a.Attributes.Equal(o.Attributes) && a.Attributes.Equal(o.Attributes) &&
a.BalancerAttributes.Equal(o.BalancerAttributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) &&
a.Type == o.Type && a.Metadata == o.Metadata a.Metadata == o.Metadata
} }
// String returns JSON formatted string representation of the address. // String returns JSON formatted string representation of the address.
@ -194,11 +170,37 @@ type BuildOptions struct {
Dialer func(context.Context, string) (net.Conn, error) Dialer func(context.Context, string) (net.Conn, error)
} }
// An Endpoint is one network endpoint, or server, which may have multiple
// addresses with which it can be accessed.
type Endpoint struct {
// Addresses contains a list of addresses used to access this endpoint.
Addresses []Address
// Attributes contains arbitrary data about this endpoint intended for
// consumption by the LB policy.
Attributes *attributes.Attributes
}
// State contains the current Resolver state relevant to the ClientConn. // State contains the current Resolver state relevant to the ClientConn.
type State struct { type State struct {
// Addresses is the latest set of resolved addresses for the target. // Addresses is the latest set of resolved addresses for the target.
//
// If a resolver sets Addresses but does not set Endpoints, one Endpoint
// will be created for each Address before the State is passed to the LB
// policy. The BalancerAttributes of each entry in Addresses will be set
// in Endpoints.Attributes, and be cleared in the Endpoint's Address's
// BalancerAttributes.
//
// Soon, Addresses will be deprecated and replaced fully by Endpoints.
Addresses []Address Addresses []Address
// Endpoints is the latest set of resolved endpoints for the target.
//
// If a resolver produces a State containing Endpoints but not Addresses,
// it must take care to ensure the LB policies it selects will support
// Endpoints.
Endpoints []Endpoint
// ServiceConfig contains the result from parsing the latest service // ServiceConfig contains the result from parsing the latest service
// config. If it is nil, it indicates no service config is present or the // config. If it is nil, it indicates no service config is present or the
// resolver does not provide service configs. // resolver does not provide service configs.
@ -258,15 +260,6 @@ type ClientConn interface {
// target does not contain a scheme or if the parsed scheme is not registered // target does not contain a scheme or if the parsed scheme is not registered
// (i.e. no corresponding resolver available to resolve the endpoint), we will // (i.e. no corresponding resolver available to resolve the endpoint), we will
// apply the default scheme, and will attempt to reparse it. // apply the default scheme, and will attempt to reparse it.
//
// Examples:
//
// - "dns://some_authority/foo.bar"
// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
// - "foo.bar"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}
// - "unknown_scheme://authority/endpoint"
// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}
type Target struct { type Target struct {
// URL contains the parsed dial target with an optional default scheme added // URL contains the parsed dial target with an optional default scheme added
// to it if the original dial target contained no scheme or contained an // to it if the original dial target contained no scheme or contained an
@ -321,10 +314,3 @@ type Resolver interface {
// Close closes the resolver. // Close closes the resolver.
Close() Close()
} }
// UnregisterForTesting removes the resolver builder with the given scheme from the
// resolver map.
// This function is for testing only.
func UnregisterForTesting(scheme string) {
delete(m, scheme)
}

View File

@ -133,7 +133,7 @@ func (ccr *ccResolverWrapper) close() {
ccr.mu.Unlock() ccr.mu.Unlock()
// Give enqueued callbacks a chance to finish. // Give enqueued callbacks a chance to finish.
<-ccr.serializer.Done <-ccr.serializer.Done()
// Spawn a goroutine to close the resolver (since it may block trying to // Spawn a goroutine to close the resolver (since it may block trying to
// cleanup all allocated resources) and return early. // cleanup all allocated resources) and return early.
@ -152,6 +152,14 @@ func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context))
// which includes addresses and service config. // which includes addresses and service config.
func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
errCh := make(chan error, 1) errCh := make(chan error, 1)
if s.Endpoints == nil {
s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
for _, a := range s.Addresses {
ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
ep.Addresses[0].BalancerAttributes = nil
s.Endpoints = append(s.Endpoints, ep)
}
}
ok := ccr.serializer.Schedule(func(context.Context) { ok := ccr.serializer.Schedule(func(context.Context) {
ccr.addChannelzTraceEvent(s) ccr.addChannelzTraceEvent(s)
ccr.curState = s ccr.curState = s

Some files were not shown because too many files have changed in this diff Show More