upgrade to latest dependencies (#835)

bumping golang.org/x/tools 0b1f1d4...c5643e9:
  > c5643e9 gopls/internal/server: fix two bugs related to dynamic configuration
  > 50b4f1b gopls/internal/golang: close open file
  > f0ef3c6 gopls: update x/telemetry dependency to fix crash
  > 8cf0a8e gopls: record that v0.15 will be the last to support go1.18
  > 730dc3c gopls/internal/settings: add a hidden option to disable zero config
  > 95f04f4 gopls/internal/golang: add resolve support for inline refactorings
  > 9619683 gopls/internal/cache: treat local replaces as workspace modules
  > a5af84e gopls/internal/cache: check views on any on-disk change to go.mod files
  > a7407fa gopls: update telemetry
  > 314368d go/analysis/passes/deepequalerrors: audit for types.Alias safety
  > f4fa7a7 go.mod: update golang.org/x dependencies
  > 5fcc627 go/analysis/passes/nilness: handle type assertions too
  > e3f1180 gopls/internal/cache: fix crash in analysis
  > 76ef6b6 internal/jsonrpc2: export WireError type
  > acf07b3 gopls/internal/utils/immutable: remove unused Map.Keys
  > c11269c gopls/semantic: elide zero-length tokens
  > 7f80389 go/analysis/passes/stringintconv: audit for types.Alias safety
  > 76795ef gopls/doc: audit for types.Alias safety
  > ab67961 internal/imports: update stdlib index for Go 1.22.0
  > d64ed6a internal/refactor/inline: audit for types.Alias safety
  > 0d87589 go/types/typeutil: audit for types.Alias safety
  > a297bfd go/callgraph/rta: audit for types.Alias safety
  > 08393e0 cmd/deadcode: audit for types.Alias safety
  > 37586e4 internal/apidiff: audit for types.Alias safety
  > 6d4ccf2 gopls/internal/cache: prime goimports cache asynchronously
  > 8b6359d gopls/internal/cache: share goimports state for GOMODCACHE
  > 2bb7f1c gopls/internal/test/integration/bench: improve completion benchmarks
  > f6dc1e9 internal/imports: merge init and newModuleResolver
  > 4f6024e internal/gopathwalk: walk directories concurrently
  > 8fcb5f0 gopls/internal/test/marker: skip on LUCI solaris builders
  > 08bd728 gopls/internal/test/integration: add a test for an inner go.work file
  > aee2e76 gopls/internal/settings: deprecate "allowModfileModifications"
  > ddb71b0 gopls/internal/cache: remove findWorkspaceModFile
  > 365517a gopls/internal/cache: detect and reinit on workspace vendor changes
  > 0be034b internal/aliases: Adds an internal alias package.
  > 8efa10c internal/imports: add a benchmark for the initial mod cache scan
  > df03d7d gopls/internal/golang: add missing check for nil funcBody
  > a1fbc78 gopls/internal/golang: move Go semantic tokens out of server
  > 51e3724 gopls: remove unused parameters
  > d6bd2d7 gopls/internal/cache: add assertions for export data inconsistency
  > 3403ce1 go/internal/gccgoimporter: recognize "any" as a builtin type
  > aecdb2d gopls/internal/cache: support workspace vendoring
  > e80085c gopls/internal/golang: downgrade bug.Reportf for missing builtin
  > 9c43803 gopls/internal/cache: allow command-line-package >1 CompileGoFiles
  > efce0f5 gopls/internal/cache/metadata: assert graph is acyclic
  > c3f60b7 gopls/internal/golang: hover: show embedded fields
  > 85146f5 gopls/internal/lsp/source: highlight returns correctly
  > e211e0f gopls: enable crashmonitor for unexpected crashes
  > d077888 gopls: update third party dependencies
  > 2e15dc7 gopls/internal/test/marker: reject "module testdata"
  > c07f927 cmd/deadcode: enable crash reporting
  > 44aed24 gopls/internal/analysis/undeclaredname: improve fix name
  > afe5265 go/packages: wait for sizes goroutine on all control paths
  > 4250783 refactor/eg: don't use cgo in Test
  > 1efbdde cmd/stringer, go/loader: use testenv.NeedsTool(t, "cgo")
  > 5f90691 go/buildutil: permit comma delimiters in build tags
  > 0c80ba3 internal/imports: remove the unused ProcessEnv.ModFile field
  > c046c5b gopls/internal/test/marker/testdata: move test to correct location
  > da7ed64 gopls/internal/cache/imports: simplify importsState invalidation
  > 7ec8ac0 gopls/internal/golang: hover: show type's methods after doc comment
  > 0e1f5be gopls: update x/telemetry dependency
  > 341c043 gopls/internal/test/integration/bench: fix broken benchmarks
  > b048cf1 cmd/callgraph: unskip TestCallgraph on windows/arm64
  > 14d7f7b gopls/internal/cache: memoize the view filter func
  > 5e16437 gopls/internal/cache: skip TestZeroConfigAlgorithm on wasm
  > dd0f88f gopls/doc/emacs.md: explain how to organize imports automatically
  > 3d49bd5 internal/lsp: publish protocol.UnmarshalJSON
  > 35d8b1b gopls/internal/golang: don't bug.Report repaired ASTs in hover
  > 8174727 gopls/internal/test: await quiescence in TestInconsistentVendoring
  > df2fa1e gopls/doc: s/source/golang/ in the SVG file
  > 238800b gopls: manual tweaks after the lsp/ -> . rename
  > 95c6ac4 gopls/internal/protocol/command: move from ../lsp
  > 6d109d1 gopls/internal/protocol: move out of lsp/
  > f872b3d gopls/internal/cache: move out of lsp/
  > 6823da4 gopls/internal/golang: move from lsp/source
  > 129144e gopls/internal/lsprpc: move out of lsp/
  > eed1997 gopls: use relative watched file patterns if supported
  > fa791ac gopls/internal/progress: simplify API
  > 816dcdf go/packages: add test for flake observed on Android
  > d5e76f1 gopls/internal/progress: move from lsp/
  > 6fe9326 gopls/internal/telemetry/cmd/stacks: include client name in report
  > 6a58b98 gopls/internal/lsp/lsprpc: radically reduce API
  > 0ffb1d0 gopls/doc: add Helix editor documentation for gopls
  > c16e222 gopls/internal/test/integration: add regtest for hover crash
  > bd547e5 gopls/internal/lsp/cache: clean up public API
  > 4c53267 gopls/internal/lsp/cache: don't report a bug for standalone test files
  > a49867f internal/imports: don't add imports that shadow predeclared names
  > e2ca594 gopls/internal/lsp/source: address problems detected by staticcheck
  > 94d99e3 gopls/internal/test/marker: re-enable some tests
  > 060c748 gopls: consolidate analyzers, eliminating "convenience" analyzers
  > 7f6ec23 go/ssa: suppress go/packages-based test on android
  > a987ef7 go/packages: publish Driver{Request,Response}
  > e1555a3 go/ssa: add Function.DomPostorder
  > cd7b510 gopls/internal/lsp/cache: allow versions of form "go1.2.3"
  > d0930db gopls/internal/lsp/cache: fix bug reports from toGobDiagnostics
  > 1871a2e gopls/internal/lsp/source: move Go CodeActions logic from server
  > afcd676 gopls/internal/lsp/source: show promoted methods in hover
  > 525acd1 go/analysis/internal/analysisflags: add RelatedInformation JSON
  > c7ccb51 gopls/doc/design: rewrite the architecture overview
  > 39a2545 gopls/internal/version: amend the year in a recent copyright header
  > 186fcf3 gopls/internal/lsp/source: factor edit conversion utils
  > 83c6c25 gopls/internal/server: analyze widest packages for open files
  > c467be3 gopls: add a main.version variable to override the version at linktime
  > 6673e7a gopls/internal/lsp/source/completion: infer parameter types
  > 3458e0c gopls/internal/lsp/source: fix Fix titles
  > 72a36a7 gopls/internal/test/marker: fix test breakage due to semantic conflict
  > 470afda gopls/internal/analysis/unusedparams: eliminate false positives
  > 592d9e1 internal/lsp: convert refactor code actions to use codeAction/resolve
  > f2d3f78 gopls/internal/lsp/protocol: fix TestURIFromPath on windows
  > d2200d1 gopls/internal/test/marker: update skip comment for addgowork.txt
  > c0db45f gopls/internal/server: simplify DiagnoseFiles to avoid a race
  > 0d1b478 gopls/internal/test/integration: fix flakiness of TestResolveImportCycle
  > d517112 gopls/internal/lsp/protocol: deliver log messages in order
  > 9164f2a gopls/internal/test/marker: skip on solaris-amd64-oraclerel
  > dbc9d3e internal/analysisinternal: TypeExpr: don't forget Variadic
  > 54cf5bc internal/debug: show new View information in the debug page
  > b37fde9 gopls: simplify gopls command configuration
  > 6a0605d gopls/internal/test/marker: regenerate golden data
  > f572b7e gopls/internal/lsp/source/stub: trim version suffix from module path
bumping golang.org/x/mod 6e58e47...fa1ba42:
  > fa1ba42 sumdb: replace globsMatchPath with module.MatchPrefixPatterns
bumping google.golang.org/grpc 8167bc3...c6e7f04:
  > c6e7f04 Change version to 1.61.1 (# 6981)
  > dbd4cbc cherry-pick # 6977 to 1.61.x release branch (# 6980)
  > 57ed608 Change version to 1.61.1-dev (# 6937)
bumping google.golang.org/genproto/googleapis/api 6c6643b...31a09d3:
  > 31a09d3 chore(all): update all (# 1085)
  > 1f4bbc5 chore(all): auto-regenerate .pb.go files (# 1083)
  > ef43131 chore(all): auto-regenerate .pb.go files (# 1082)
bumping github.com/prometheus/common 7e44242...773d566:
  > 773d566 Merge pull request # 570 from ywwg/owilliams/quoted-metric-name-02
  > a3bdb9e Support empty OAuth2 inline secrets (# 547)
  > 319c62c UTF-8 support in metric and label names
  > bd0376d UTF-8 support in validation, and some parsers and formatters (# 537)
bumping knative.dev/pkg 2d2e27d...9227ebb:
  > 9227ebb Bump google.golang.org/api from 0.163.0 to 0.165.0 (# 2960)
  > fecb381 Bump google.golang.org/grpc from 1.61.0 to 1.61.1 (# 2963)
  > d978608 Bump github.com/prometheus/common from 0.46.0 to 0.47.0 (# 2962)
  > 0d88449 Bump golang.org/x/tools from 0.17.0 to 0.18.0 (# 2961)
  > f094d82 Bump cloud.google.com/go/storage from 1.37.0 to 1.38.0 (# 2959)
  > b8f9b22 Update community files (# 2957)
  > bc60487 Update community files (# 2956)
  > 405f0c4 Update community files (# 2955)
bumping google.golang.org/genproto a9fa171...1f4bbc5:
  > 1f4bbc5 chore(all): auto-regenerate .pb.go files (# 1083)
  > ef43131 chore(all): auto-regenerate .pb.go files (# 1082)
  > 6c6643b chore(all): update all (# 1078)
bumping google.golang.org/api 87aa1d0...e451477:
  > e451477 chore(main): release 0.165.0 (# 2415)
  > deab77d feat(all): auto-regenerate discovery clients (# 2416)
  > c702880 feat(all): auto-regenerate discovery clients (# 2414)
  > 3ff4bc1 chore(main): release 0.164.0 (# 2407)
  > edbe996 fix(transport): disable universe domain check if token source (# 2413)
  > 05a0882 chore(all): update all (# 2410)
  > 51d5068 feat(all): auto-regenerate discovery clients (# 2412)
  > 246b19f feat(all): auto-regenerate discovery clients (# 2409)
  > f1b37df feat(all): auto-regenerate discovery clients (# 2408)
  > 1bd8304 feat(all): auto-regenerate discovery clients (# 2406)
bumping google.golang.org/genproto/googleapis/rpc 1f4bbc5...31a09d3:
  > 31a09d3 chore(all): update all (# 1085)
bumping knative.dev/hack f3881d9...999d7e6:
  > 999d7e6 Update community files (# 367)
  > 6090613 Update community files (# 366)
  > d1067f2 Update community files (# 365)

Signed-off-by: Knative Automation <automation@knative.team>
This commit is contained in:
Knative Automation 2024-02-19 16:20:14 -05:00 committed by GitHub
parent 9f70352974
commit 894aec1dd9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 1360 additions and 443 deletions

20
go.mod
View File

@ -11,8 +11,8 @@ require (
k8s.io/client-go v0.28.5
k8s.io/code-generator v0.28.5
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9
knative.dev/hack v0.0.0-20240201013652-f3881d90c189
knative.dev/pkg v0.0.0-20240212080204-2d2e27d30a44
knative.dev/hack v0.0.0-20240214131420-999d7e6b8495
knative.dev/pkg v0.0.0-20240219120257-9227ebb57a4e
)
require (
@ -50,13 +50,13 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.18.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.46.0 // indirect
github.com/prometheus/common v0.47.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/statsd_exporter v0.22.7 // indirect
github.com/spf13/pflag v1.0.5 // indirect
go.opencensus.io v0.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/mod v0.15.0 // indirect
golang.org/x/net v0.21.0 // indirect
golang.org/x/oauth2 v0.17.0 // indirect
golang.org/x/sync v0.6.0 // indirect
@ -64,14 +64,14 @@ require (
golang.org/x/term v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.17.0 // indirect
golang.org/x/tools v0.18.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.163.0 // indirect
google.golang.org/api v0.165.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect
google.golang.org/grpc v1.61.0 // indirect
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect
google.golang.org/grpc v1.61.1 // indirect
google.golang.org/protobuf v1.32.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect

40
go.sum
View File

@ -256,8 +256,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k=
github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
@ -348,8 +348,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -518,8 +518,8 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -543,8 +543,8 @@ google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.163.0 h1:4BBDpPaSH+H28NhnX+WwjXxbRLQ7TWuEKp4BQyEjxvk=
google.golang.org/api v0.163.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0=
google.golang.org/api v0.165.0 h1:zd5d4JIIIaYYsfVy1HzoXYZ9rWCSBxxAglbczzo7Bgc=
google.golang.org/api v0.165.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -584,12 +584,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg=
google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k=
google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 h1:KHBtwE+eQc3+NxpjmRFlQ3pJQ2FNnhhgB9xOV8kyBuU=
google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe h1:bQnxqljG/wqi4NTXu2+DJ3n7APcEA882QZ1JvhQAq9o=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo=
google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A=
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 h1:FSL3lRCkhaPFxqi0s9o+V4UI2WTzAVOvkgbd4kVV4Wg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -603,8 +603,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0=
google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY=
google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -666,10 +666,10 @@ k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5Ohx
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/hack v0.0.0-20240201013652-f3881d90c189 h1:a8htyuf5+S0NGxxdKXeQ49XOD9dEC1LHoofRQPgFKrU=
knative.dev/hack v0.0.0-20240201013652-f3881d90c189/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/pkg v0.0.0-20240212080204-2d2e27d30a44 h1:z+zDEtrKQNjcsOMjnQR83amedL/ugUrD046+GBqypvs=
knative.dev/pkg v0.0.0-20240212080204-2d2e27d30a44/go.mod h1:jmjP5/Vy9LamN82J3DfoJpVAw5JHpNxjUF0f/QCDX6E=
knative.dev/hack v0.0.0-20240214131420-999d7e6b8495 h1:Eh+3WsTecxutSwtpzU4Py1dNCcToxgqRDLSLjBKfdEE=
knative.dev/hack v0.0.0-20240214131420-999d7e6b8495/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/pkg v0.0.0-20240219120257-9227ebb57a4e h1:TD0WhK+DDFqcDrxukIUZWYwy1140eQzCYNinHZZZhHs=
knative.dev/pkg v0.0.0-20240219120257-9227ebb57a4e/go.mod h1:nCYpiIXemsTvpmuVNfJEAXcSXTGSduE2uwU0HH4BvUI=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@ -73,8 +73,8 @@ func ResponseFormat(h http.Header) Format {
// NewDecoder returns a new decoder based on the given input format.
// If the input format does not imply otherwise, a text format decoder is returned.
func NewDecoder(r io.Reader, format Format) Decoder {
switch format {
case FmtProtoDelim:
switch format.FormatType() {
case TypeProtoDelim:
return &protoDecoder{r: r}
}
return &textDecoder{r: r}

View File

@ -22,6 +22,7 @@ import (
"google.golang.org/protobuf/encoding/prototext"
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
@ -61,23 +62,32 @@ func (ec encoderCloser) Close() error {
// as the support is still experimental. To include the option to negotiate
// FmtOpenMetrics, use NegotiateOpenMetrics.
func Negotiate(h http.Header) Format {
escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
switch Format(escapeParam) {
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
default:
// If the escaping parameter is unknown, ignore it.
}
}
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
return FmtProtoDelim
return FmtProtoDelim + escapingScheme
case "text":
return FmtProtoText
return FmtProtoText + escapingScheme
case "compact-text":
return FmtProtoCompact
return FmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
return FmtText + escapingScheme
}
}
return FmtText
return FmtText + escapingScheme
}
// NegotiateIncludingOpenMetrics works like Negotiate but includes
@ -85,29 +95,40 @@ func Negotiate(h http.Header) Format {
// temporary and will disappear once FmtOpenMetrics is fully supported and as
// such may be negotiated by the normal Negotiate function.
func NegotiateIncludingOpenMetrics(h http.Header) Format {
escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
switch Format(escapeParam) {
case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
default:
// If the escaping parameter is unknown, ignore it.
}
}
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
return FmtProtoDelim
return FmtProtoDelim + escapingScheme
case "text":
return FmtProtoText
return FmtProtoText + escapingScheme
case "compact-text":
return FmtProtoCompact
return FmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
return FmtText + escapingScheme
}
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
if ver == OpenMetricsVersion_1_0_0 {
return FmtOpenMetrics_1_0_0
switch ver {
case OpenMetricsVersion_1_0_0:
return FmtOpenMetrics_1_0_0 + escapingScheme
default:
return FmtOpenMetrics_0_0_1 + escapingScheme
}
return FmtOpenMetrics_0_0_1
}
}
return FmtText
return FmtText + escapingScheme
}
// NewEncoder returns a new encoder based on content type negotiation. All
@ -116,9 +137,13 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
// to the Encoder interface directly. The current version of the Encoder
// interface is kept for backwards compatibility.
// In cases where the Format does not allow for UTF-8 names, the global
// NameEscapingScheme will be applied.
func NewEncoder(w io.Writer, format Format) Encoder {
switch format {
case FmtProtoDelim:
escapingScheme := format.ToEscapingScheme()
switch format.FormatType() {
case TypeProtoDelim:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := protodelim.MarshalTo(w, v)
@ -126,34 +151,34 @@ func NewEncoder(w io.Writer, format Format) Encoder {
},
close: func() error { return nil },
}
case FmtProtoCompact:
case TypeProtoCompact:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, v.String())
_, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
return err
},
close: func() error { return nil },
}
case FmtProtoText:
case TypeProtoText:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, prototext.Format(v))
_, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
return err
},
close: func() error { return nil },
}
case FmtText:
case TypeTextPlain:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToText(w, v)
_, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
return err
},
close: func() error { return nil },
}
case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0:
case TypeOpenMetrics:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToOpenMetrics(w, v)
_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme))
return err
},
close: func() error {

View File

@ -14,10 +14,22 @@
// Package expfmt contains tools for reading and writing Prometheus metrics.
package expfmt
import (
"strings"
"github.com/prometheus/common/model"
)
// Format specifies the HTTP content type of the different wire protocols.
type Format string
// Constants to assemble the Content-Type values for the different wire protocols.
// Constants to assemble the Content-Type values for the different wire
// protocols. The Content-Type strings here are all for the legacy exposition
// formats, where valid characters for metric names and label names are limited.
// Support for arbitrary UTF-8 characters in those names is already partially
// implemented in this module (see model.ValidationScheme), but to actually use
// it on the wire, new content-type strings will have to be agreed upon and
// added here.
const (
TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
@ -27,7 +39,8 @@ const (
OpenMetricsVersion_0_0_1 = "0.0.1"
OpenMetricsVersion_1_0_0 = "1.0.0"
// The Content-Type values for the different wire protocols.
// The Content-Type values for the different wire protocols. Do not do direct
// comparisons to these constants, instead use the comparison functions.
FmtUnknown Format = `<unknown>`
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
@ -41,3 +54,93 @@ const (
hdrContentType = "Content-Type"
hdrAccept = "Accept"
)
// FormatType is a Go enum representing the overall category for the given
// Format. As the number of Format permutations increases, doing basic string
// comparisons are not feasible, so this enum captures the most useful
// high-level attribute of the Format string.
type FormatType int
const (
TypeUnknown = iota
TypeProtoCompact
TypeProtoDelim
TypeProtoText
TypeTextPlain
TypeOpenMetrics
)
// FormatType deduces an overall FormatType for the given format.
func (f Format) FormatType() FormatType {
toks := strings.Split(string(f), ";")
if len(toks) < 2 {
return TypeUnknown
}
params := make(map[string]string)
for i, t := range toks {
if i == 0 {
continue
}
args := strings.Split(t, "=")
if len(args) != 2 {
continue
}
params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
}
switch strings.TrimSpace(toks[0]) {
case ProtoType:
if params["proto"] != ProtoProtocol {
return TypeUnknown
}
switch params["encoding"] {
case "delimited":
return TypeProtoDelim
case "text":
return TypeProtoText
case "compact-text":
return TypeProtoCompact
default:
return TypeUnknown
}
case OpenMetricsType:
if params["charset"] != "utf-8" {
return TypeUnknown
}
return TypeOpenMetrics
case "text/plain":
v, ok := params["version"]
if !ok {
return TypeTextPlain
}
if v == TextVersion {
return TypeTextPlain
}
return TypeUnknown
default:
return TypeUnknown
}
}
// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
// "escaping" term exists, that will be used. Otherwise, the global default will
// be returned.
func (format Format) ToEscapingScheme() model.EscapingScheme {
for _, p := range strings.Split(string(format), ";") {
toks := strings.Split(p, "=")
if len(toks) != 2 {
continue
}
key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
if key == model.EscapingKey {
scheme, err := model.ToEscapingScheme(value)
if err != nil {
return model.NameEscapingScheme
}
return scheme
}
}
return model.NameEscapingScheme
}

View File

@ -35,6 +35,18 @@ import (
// sanity checks. If the input contains duplicate metrics or invalid metric or
// label names, the conversion will result in invalid text format output.
//
// If metric names conform to the legacy validation pattern, they will be placed
// outside the brackets in the traditional way, like `foo{}`. If the metric name
// fails the legacy validation check, it will be placed quoted inside the
// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
// no error will be thrown in this case.
//
// Similar to metric names, if label names conform to the legacy validation
// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
// name fails the legacy validation check, it will be quoted:
// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
// no error will be thrown in this case.
//
// This function fulfills the type 'expfmt.encoder'.
//
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
@ -98,7 +110,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
if err != nil {
return
}
n, err = w.WriteString(shortName)
n, err = writeName(w, shortName)
written += n
if err != nil {
return
@ -124,7 +136,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
if err != nil {
return
}
n, err = w.WriteString(shortName)
n, err = writeName(w, shortName)
written += n
if err != nil {
return
@ -303,21 +315,9 @@ func writeOpenMetricsSample(
floatValue float64, intValue uint64, useIntValue bool,
exemplar *dto.Exemplar,
) (int, error) {
var written int
n, err := w.WriteString(name)
written += n
if err != nil {
return written, err
}
if suffix != "" {
n, err = w.WriteString(suffix)
written += n
if err != nil {
return written, err
}
}
n, err = writeOpenMetricsLabelPairs(
w, metric.Label, additionalLabelName, additionalLabelValue,
written := 0
n, err := writeOpenMetricsNameAndLabelPairs(
w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
@ -365,27 +365,58 @@ func writeOpenMetricsSample(
return written, nil
}
// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
// in OpenMetrics style.
func writeOpenMetricsLabelPairs(
// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
// formats the float in OpenMetrics style.
func writeOpenMetricsNameAndLabelPairs(
w enhancedWriter,
name string,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
if len(in) == 0 && additionalLabelName == "" {
return 0, nil
}
var (
written int
separator byte = '{'
written int
separator byte = '{'
metricInsideBraces = false
)
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces, quoted.
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
separator = ','
}
n, err := writeName(w, name)
written += n
if err != nil {
return written, err
}
}
if len(in) == 0 && additionalLabelName == "" {
if metricInsideBraces {
err := w.WriteByte('}')
written++
if err != nil {
return written, err
}
}
return written, nil
}
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
n, err := w.WriteString(lp.GetName())
n, err := writeName(w, lp.GetName())
written += n
if err != nil {
return written, err
@ -451,7 +482,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
if err != nil {
return written, err
}
n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
written += n
if err != nil {
return written, err

View File

@ -62,6 +62,18 @@ var (
// contains duplicate metrics or invalid metric or label names, the conversion
// will result in invalid text format output.
//
// If metric names conform to the legacy validation pattern, they will be placed
// outside the brackets in the traditional way, like `foo{}`. If the metric name
// fails the legacy validation check, it will be placed quoted inside the
// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
// no error will be thrown in this case.
//
// Similar to metric names, if label names conform to the legacy validation
// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
// name fails the legacy validation check, it will be quoted:
// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
// no error will be thrown in this case.
//
// This method fulfills the type 'prometheus.encoder'.
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
// Fail-fast checks.
@ -98,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
if err != nil {
return
}
n, err = w.WriteString(name)
n, err = writeName(w, name)
written += n
if err != nil {
return
@ -124,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
if err != nil {
return
}
n, err = w.WriteString(name)
n, err = writeName(w, name)
written += n
if err != nil {
return
@ -280,21 +292,9 @@ func writeSample(
additionalLabelName string, additionalLabelValue float64,
value float64,
) (int, error) {
var written int
n, err := w.WriteString(name)
written += n
if err != nil {
return written, err
}
if suffix != "" {
n, err = w.WriteString(suffix)
written += n
if err != nil {
return written, err
}
}
n, err = writeLabelPairs(
w, metric.Label, additionalLabelName, additionalLabelValue,
written := 0
n, err := writeNameAndLabelPairs(
w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
@ -330,32 +330,64 @@ func writeSample(
return written, nil
}
// writeLabelPairs converts a slice of LabelPair proto messages plus the
// explicitly given additional label pair into text formatted as required by the
// text format and writes it to 'w'. An empty slice in combination with an empty
// string 'additionalLabelName' results in nothing being written. Otherwise, the
// label pairs are written, escaped as required by the text format, and enclosed
// in '{...}'. The function returns the number of bytes written and any error
// encountered.
func writeLabelPairs(
// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
// explicitly given metric name and additional label pair into text formatted as
// required by the text format and writes it to 'w'. An empty slice in
// combination with an empty string 'additionalLabelName' results in nothing
// being written. Otherwise, the label pairs are written, escaped as required by
// the text format, and enclosed in '{...}'. The function returns the number of
// bytes written and any error encountered. If the metric name is not
// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
// label names will also be quoted.
func writeNameAndLabelPairs(
w enhancedWriter,
name string,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
if len(in) == 0 && additionalLabelName == "" {
return 0, nil
}
var (
written int
separator byte = '{'
written int
separator byte = '{'
metricInsideBraces = false
)
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces.
if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
separator = ','
}
n, err := writeName(w, name)
written += n
if err != nil {
return written, err
}
}
if len(in) == 0 && additionalLabelName == "" {
if metricInsideBraces {
err := w.WriteByte('}')
written++
if err != nil {
return written, err
}
}
return written, nil
}
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
n, err := w.WriteString(lp.GetName())
n, err := writeName(w, lp.GetName())
written += n
if err != nil {
return written, err
@ -462,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
numBufPool.Put(bp)
return written, err
}
// writeName writes a string as-is if it complies with the legacy naming
// scheme, or escapes it in double quotes if not.
func writeName(w enhancedWriter, name string) (int, error) {
if model.IsValidLegacyMetricName(model.LabelValue(name)) {
return w.WriteString(name)
}
var written int
var err error
err = w.WriteByte('"')
written++
if err != nil {
return written, err
}
var n int
n, err = writeEscapedString(w, name, true)
written += n
if err != nil {
return written, err
}
err = w.WriteByte('"')
written++
return written, err
}

View File

@ -97,17 +97,25 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// therewith.
type LabelName string
// IsValid is true iff the label name matches the pattern of LabelNameRE. This
// method, however, does not use LabelNameRE for the check but a much faster
// hardcoded implementation.
// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
// names, and iff it's valid UTF-8 if NameValidationScheme is set to
// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
// check but a much faster hardcoded implementation.
func (ln LabelName) IsValid() bool {
if len(ln) == 0 {
return false
}
for i, b := range ln {
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
return false
switch NameValidationScheme {
case LegacyValidation:
for i, b := range ln {
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
return false
}
}
case UTF8Validation:
return utf8.ValidString(string(ln))
default:
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
}
return true
}
@ -164,7 +172,7 @@ func (l LabelNames) String() string {
// A LabelValue is an associated value for a LabelName.
type LabelValue string
// IsValid returns true iff the string is a valid UTF8.
// IsValid returns true iff the string is a valid UTF-8.
func (lv LabelValue) IsValid() bool {
return utf8.ValidString(string(lv))
}

View File

@ -18,6 +18,77 @@ import (
"regexp"
"sort"
"strings"
"unicode/utf8"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
)
var (
// NameValidationScheme determines the method of name validation to be used by
// all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode
// in isolation from other components that don't support UTF-8 may result in
// bugs or other undefined behavior. This value is intended to be set by
// UTF-8-aware binaries as part of their startup. To avoid need for locking,
// this value should be set once, ideally in an init(), before multiple
// goroutines are started.
NameValidationScheme = LegacyValidation
// NameEscapingScheme defines the default way that names will be
// escaped when presented to systems that do not support UTF-8 names. If the
// Content-Type "escaping" term is specified, that will override this value.
NameEscapingScheme = ValueEncodingEscaping
)
// ValidationScheme is a Go enum for determining how metric and label names will
// be validated by this library.
type ValidationScheme int
const (
// LegacyValidation is a setting that requirets that metric and label names
// conform to the original Prometheus character requirements described by
// MetricNameRE and LabelNameRE.
LegacyValidation ValidationScheme = iota
// UTF8Validation only requires that metric and label names be valid UTF-8
// strings.
UTF8Validation
)
type EscapingScheme int
const (
// NoEscaping indicates that a name will not be escaped. Unescaped names that
// do not conform to the legacy validity check will use a new exposition
// format syntax that will be officially standardized in future versions.
NoEscaping EscapingScheme = iota
// UnderscoreEscaping replaces all legacy-invalid characters with underscores.
UnderscoreEscaping
// DotsEscaping is similar to UnderscoreEscaping, except that dots are
// converted to `_dot_` and pre-existing underscores are converted to `__`.
DotsEscaping
// ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
// characters with the unicode value, surrounded by underscores. Single
// underscores are replaced with double underscores.
ValueEncodingEscaping
)
const (
// EscapingKey is the key in an Accept or Content-Type header that defines how
// metric and label names that do not conform to the legacy character
// requirements should be escaped when being scraped by a legacy prometheus
// system. If a system does not explicitly pass an escaping parameter in the
// Accept header, the default NameEscapingScheme will be used.
EscapingKey = "escaping"
// Possible values for Escaping Key:
AllowUTF8 = "allow-utf-8" // No escaping required.
EscapeUnderscores = "underscores"
EscapeDots = "dots"
EscapeValues = "values"
)
// MetricNameRE is a regular expression matching valid metric
@ -84,17 +155,302 @@ func (m Metric) FastFingerprint() Fingerprint {
return LabelSet(m).FastFingerprint()
}
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
// selected.
func IsValidMetricName(n LabelValue) bool {
switch NameValidationScheme {
case LegacyValidation:
return IsValidLegacyMetricName(n)
case UTF8Validation:
if len(n) == 0 {
return false
}
return utf8.ValidString(string(n))
default:
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
}
}
// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
// legacy validation scheme regardless of the value of NameValidationScheme.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
func IsValidMetricName(n LabelValue) bool {
func IsValidLegacyMetricName(n LabelValue) bool {
if len(n) == 0 {
return false
}
for i, b := range n {
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
if !isValidLegacyRune(b, i) {
return false
}
}
return true
}
// EscapeMetricFamily escapes the given metric names and labels with the given
// escaping scheme. Returns a new object that uses the same pointers to fields
// when possible and creates new escaped versions so as not to mutate the
// input.
func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
if v == nil {
return nil
}
if scheme == NoEscaping {
return v
}
out := &dto.MetricFamily{
Help: v.Help,
Type: v.Type,
}
// If the name is nil, copy as-is, don't try to escape.
if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
out.Name = v.Name
} else {
out.Name = proto.String(EscapeName(v.GetName(), scheme))
}
for _, m := range v.Metric {
if !metricNeedsEscaping(m) {
out.Metric = append(out.Metric, m)
continue
}
escaped := &dto.Metric{
Gauge: m.Gauge,
Counter: m.Counter,
Summary: m.Summary,
Untyped: m.Untyped,
Histogram: m.Histogram,
TimestampMs: m.TimestampMs,
}
for _, l := range m.Label {
if l.GetName() == MetricNameLabel {
if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
escaped.Label = append(escaped.Label, l)
continue
}
escaped.Label = append(escaped.Label, &dto.LabelPair{
Name: proto.String(MetricNameLabel),
Value: proto.String(EscapeName(l.GetValue(), scheme)),
})
continue
}
if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
escaped.Label = append(escaped.Label, l)
continue
}
escaped.Label = append(escaped.Label, &dto.LabelPair{
Name: proto.String(EscapeName(l.GetName(), scheme)),
Value: l.Value,
})
}
out.Metric = append(out.Metric, escaped)
}
return out
}
func metricNeedsEscaping(m *dto.Metric) bool {
for _, l := range m.Label {
if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
return true
}
if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
return true
}
}
return false
}
const (
lowerhex = "0123456789abcdef"
)
// EscapeName escapes the incoming name according to the provided escaping
// scheme. Depending on the rules of escaping, this may cause no change in the
// string that is returned. (Especially NoEscaping, which by definition is a
// noop). This function does not do any validation of the name.
func EscapeName(name string, scheme EscapingScheme) string {
if len(name) == 0 {
return name
}
var escaped strings.Builder
switch scheme {
case NoEscaping:
return name
case UnderscoreEscaping:
if IsValidLegacyMetricName(LabelValue(name)) {
return name
}
for i, b := range name {
if isValidLegacyRune(b, i) {
escaped.WriteRune(b)
} else {
escaped.WriteRune('_')
}
}
return escaped.String()
case DotsEscaping:
// Do not early return for legacy valid names, we still escape underscores.
for i, b := range name {
if b == '_' {
escaped.WriteString("__")
} else if b == '.' {
escaped.WriteString("_dot_")
} else if isValidLegacyRune(b, i) {
escaped.WriteRune(b)
} else {
escaped.WriteRune('_')
}
}
return escaped.String()
case ValueEncodingEscaping:
if IsValidLegacyMetricName(LabelValue(name)) {
return name
}
escaped.WriteString("U__")
for i, b := range name {
if isValidLegacyRune(b, i) {
escaped.WriteRune(b)
} else if !utf8.ValidRune(b) {
escaped.WriteString("_FFFD_")
} else if b < 0x100 {
escaped.WriteRune('_')
for s := 4; s >= 0; s -= 4 {
escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
}
escaped.WriteRune('_')
} else if b < 0x10000 {
escaped.WriteRune('_')
for s := 12; s >= 0; s -= 4 {
escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
}
escaped.WriteRune('_')
}
}
return escaped.String()
default:
panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
}
}
// lower function taken from strconv.atoi
func lower(c byte) byte {
return c | ('x' - 'X')
}
// UnescapeName unescapes the incoming name according to the provided escaping
// scheme if possible. Some schemes are partially or totally non-roundtripable.
// If any error is enountered, returns the original input.
func UnescapeName(name string, scheme EscapingScheme) string {
if len(name) == 0 {
return name
}
switch scheme {
case NoEscaping:
return name
case UnderscoreEscaping:
// It is not possible to unescape from underscore replacement.
return name
case DotsEscaping:
name = strings.ReplaceAll(name, "_dot_", ".")
name = strings.ReplaceAll(name, "__", "_")
return name
case ValueEncodingEscaping:
escapedName, found := strings.CutPrefix(name, "U__")
if !found {
return name
}
var unescaped strings.Builder
TOP:
for i := 0; i < len(escapedName); i++ {
// All non-underscores are treated normally.
if escapedName[i] != '_' {
unescaped.WriteByte(escapedName[i])
continue
}
i++
if i >= len(escapedName) {
return name
}
// A double underscore is a single underscore.
if escapedName[i] == '_' {
unescaped.WriteByte('_')
continue
}
// We think we are in a UTF-8 code, process it.
var utf8Val uint
for j := 0; i < len(escapedName); j++ {
// This is too many characters for a utf8 value.
if j > 4 {
return name
}
// Found a closing underscore, convert to a rune, check validity, and append.
if escapedName[i] == '_' {
utf8Rune := rune(utf8Val)
if !utf8.ValidRune(utf8Rune) {
return name
}
unescaped.WriteRune(utf8Rune)
continue TOP
}
r := lower(escapedName[i])
utf8Val *= 16
if r >= '0' && r <= '9' {
utf8Val += uint(r) - '0'
} else if r >= 'a' && r <= 'f' {
utf8Val += uint(r) - 'a' + 10
} else {
return name
}
i++
}
// Didn't find closing underscore, invalid.
return name
}
return unescaped.String()
default:
panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
}
}
func isValidLegacyRune(b rune, i int) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
}
func (e EscapingScheme) String() string {
switch e {
case NoEscaping:
return AllowUTF8
case UnderscoreEscaping:
return EscapeUnderscores
case DotsEscaping:
return EscapeDots
case ValueEncodingEscaping:
return EscapeValues
default:
panic(fmt.Sprintf("unknown format scheme %d", e))
}
}
func ToEscapingScheme(s string) (EscapingScheme, error) {
if s == "" {
return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme")
}
switch s {
case AllowUTF8:
return NoEscaping, nil
case EscapeUnderscores:
return UnderscoreEscaping, nil
case EscapeDots:
return DotsEscaping, nil
case EscapeValues:
return ValueEncodingEscaping, nil
default:
return NoEscaping, fmt.Errorf("unknown format scheme " + s)
}
}

View File

@ -9,11 +9,13 @@ package gopathwalk
import (
"bufio"
"bytes"
"io"
"io/fs"
"log"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
)
@ -21,8 +23,13 @@ import (
type Options struct {
// If Logf is non-nil, debug logging is enabled through this function.
Logf func(format string, args ...interface{})
// Search module caches. Also disables legacy goimports ignore rules.
ModulesEnabled bool
// Maximum number of concurrent calls to user-provided callbacks,
// or 0 for GOMAXPROCS.
Concurrency int
}
// RootType indicates the type of a Root.
@ -43,19 +50,28 @@ type Root struct {
Type RootType
}
// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
// Walk concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
//
// For each package found, add will be called with the absolute
// paths of the containing source directory and the package directory.
//
// Unlike filepath.WalkDir, Walk follows symbolic links
// (while guarding against cycles).
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
WalkSkip(roots, add, func(Root, string) bool { return false }, opts)
}
// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
// WalkSkip concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to
// find packages.
//
// For each package found, add will be called with the absolute
// paths of the containing source directory and the package directory.
// For each directory that will be scanned, skip will be called
// with the absolute paths of the containing source directory and the directory.
// If skip returns false on a directory it will be processed.
//
// Unlike filepath.WalkDir, WalkSkip follows symbolic links
// (while guarding against cycles).
func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) {
for _, root := range roots {
walkDir(root, add, skip, opts)
@ -64,45 +80,51 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root
// walkDir creates a walker and starts fastwalk with this walker.
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
if opts.Logf == nil {
opts.Logf = func(format string, args ...interface{}) {}
}
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
if opts.Logf != nil {
opts.Logf("skipping nonexistent directory: %v", root.Path)
}
opts.Logf("skipping nonexistent directory: %v", root.Path)
return
}
start := time.Now()
if opts.Logf != nil {
opts.Logf("scanning %s", root.Path)
}
opts.Logf("scanning %s", root.Path)
concurrency := opts.Concurrency
if concurrency == 0 {
// The walk be either CPU-bound or I/O-bound, depending on what the
// caller-supplied add function does and the details of the user's platform
// and machine. Rather than trying to fine-tune the concurrency level for a
// specific environment, we default to GOMAXPROCS: it is likely to be a good
// choice for a CPU-bound add function, and if it is instead I/O-bound, then
// dealing with I/O saturation is arguably the job of the kernel and/or
// runtime. (Oversaturating I/O seems unlikely to harm performance as badly
// as failing to saturate would.)
concurrency = runtime.GOMAXPROCS(0)
}
w := &walker{
root: root,
add: add,
skip: skip,
opts: opts,
added: make(map[string]bool),
root: root,
add: add,
skip: skip,
opts: opts,
sem: make(chan struct{}, concurrency),
}
w.init()
// Add a trailing path separator to cause filepath.WalkDir to traverse symlinks.
w.sem <- struct{}{}
path := root.Path
if len(path) == 0 {
path = "." + string(filepath.Separator)
} else if !os.IsPathSeparator(path[len(path)-1]) {
path = path + string(filepath.Separator)
if path == "" {
path = "."
}
if fi, err := os.Lstat(path); err == nil {
w.walk(path, nil, fs.FileInfoToDirEntry(fi))
} else {
w.opts.Logf("scanning directory %v: %v", root.Path, err)
}
<-w.sem
w.walking.Wait()
if err := filepath.WalkDir(path, w.walk); err != nil {
logf := opts.Logf
if logf == nil {
logf = log.Printf
}
logf("scanning directory %v: %v", root.Path, err)
}
if opts.Logf != nil {
opts.Logf("scanned %s in %v", root.Path, time.Since(start))
}
opts.Logf("scanned %s in %v", root.Path, time.Since(start))
}
// walker is the callback for fastwalk.Walk.
@ -112,10 +134,18 @@ type walker struct {
skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true.
opts Options // Options passed to Walk by the user.
pathSymlinks []os.FileInfo
ignoredDirs []string
walking sync.WaitGroup
sem chan struct{} // Channel of semaphore tokens; send to acquire, receive to release.
ignoredDirs []string
added map[string]bool
added sync.Map // map[string]bool
}
// A symlinkList is a linked list of os.FileInfos for parent directories
// reached via symlinks.
type symlinkList struct {
info os.FileInfo
prev *symlinkList
}
// init initializes the walker based on its Options
@ -132,9 +162,7 @@ func (w *walker) init() {
for _, p := range ignoredPaths {
full := filepath.Join(w.root.Path, p)
w.ignoredDirs = append(w.ignoredDirs, full)
if w.opts.Logf != nil {
w.opts.Logf("Directory added to ignore list: %s", full)
}
w.opts.Logf("Directory added to ignore list: %s", full)
}
}
@ -144,12 +172,10 @@ func (w *walker) init() {
func (w *walker) getIgnoredDirs(path string) []string {
file := filepath.Join(path, ".goimportsignore")
slurp, err := os.ReadFile(file)
if w.opts.Logf != nil {
if err != nil {
w.opts.Logf("%v", err)
} else {
w.opts.Logf("Read %s", file)
}
if err != nil {
w.opts.Logf("%v", err)
} else {
w.opts.Logf("Read %s", file)
}
if err != nil {
return nil
@ -183,63 +209,22 @@ func (w *walker) shouldSkipDir(dir string) bool {
// walk walks through the given path.
//
// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored:
// walk returns only nil or fs.SkipDir.
func (w *walker) walk(path string, d fs.DirEntry, err error) error {
if err != nil {
// We have no way to report errors back through Walk or WalkSkip,
// so just log and ignore them.
if w.opts.Logf != nil {
w.opts.Logf("%v", err)
}
if d == nil {
// Nothing more to do: the error prevents us from knowing
// what path even represents.
return nil
}
}
if d.Type().IsRegular() {
if !strings.HasSuffix(path, ".go") {
return nil
}
dir := filepath.Dir(path)
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
// Doesn't make sense to have regular files
// directly in your $GOPATH/src or $GOROOT/src.
return nil
}
if !w.added[dir] {
w.add(w.root, dir)
w.added[dir] = true
}
return nil
}
if d.IsDir() {
base := filepath.Base(path)
if base == "" || base[0] == '.' || base[0] == '_' ||
base == "testdata" ||
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
(!w.opts.ModulesEnabled && base == "node_modules") {
return fs.SkipDir
}
if w.shouldSkipDir(path) {
return fs.SkipDir
}
return nil
}
// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored.
func (w *walker) walk(path string, pathSymlinks *symlinkList, d fs.DirEntry) {
if d.Type()&os.ModeSymlink != 0 {
// Walk the symlink's target rather than the symlink itself.
//
// (Note that os.Stat, unlike the lower-lever os.Readlink,
// follows arbitrarily many layers of symlinks, so it will eventually
// reach either a non-symlink or a nonexistent target.)
//
// TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src
// and GOPATH/src. Do we really need to traverse them here? If so, why?
fi, err := os.Stat(path)
if err != nil || !fi.IsDir() {
// Not a directory. Just walk the file (or broken link) and be done.
return w.walk(path, fs.FileInfoToDirEntry(fi), err)
if err != nil {
w.opts.Logf("%v", err)
return
}
// Avoid walking symlink cycles: if we have already followed a symlink to
@ -249,83 +234,104 @@ func (w *walker) walk(path string, d fs.DirEntry, err error) error {
// the number of extra stat calls we make if we *don't* encounter a cycle.
// Since we don't actually expect to encounter symlink cycles in practice,
// this seems like the right tradeoff.
for _, parent := range w.pathSymlinks {
if os.SameFile(fi, parent) {
return nil
for parent := pathSymlinks; parent != nil; parent = parent.prev {
if os.SameFile(fi, parent.info) {
return
}
}
w.pathSymlinks = append(w.pathSymlinks, fi)
defer func() {
w.pathSymlinks = w.pathSymlinks[:len(w.pathSymlinks)-1]
}()
pathSymlinks = &symlinkList{
info: fi,
prev: pathSymlinks,
}
d = fs.FileInfoToDirEntry(fi)
}
// On some platforms the OS (or the Go os package) sometimes fails to
// resolve directory symlinks before a trailing slash
// (even though POSIX requires it to do so).
//
// On macOS that failure may be caused by a known libc/kernel bug;
// see https://go.dev/issue/59586.
//
// On Windows before Go 1.21, it may be caused by a bug in
// os.Lstat (fixed in https://go.dev/cl/463177).
//
// Since we need to handle this explicitly on broken platforms anyway,
// it is simplest to just always do that and not rely on POSIX pathname
// resolution to walk the directory (such as by calling WalkDir with
// a trailing slash appended to the path).
//
// Instead, we make a sequence of walk calls — directly and through
// recursive calls to filepath.WalkDir — simulating what WalkDir would do
// if the symlink were a regular directory.
// First we call walk on the path as a directory
// (instead of a symlink).
err = w.walk(path, fs.FileInfoToDirEntry(fi), nil)
if err == fs.SkipDir {
return nil
} else if err != nil {
// This should be impossible, but handle it anyway in case
// walk is changed to return other errors.
return err
if d.Type().IsRegular() {
if !strings.HasSuffix(path, ".go") {
return
}
// Now read the directory and walk its entries.
ents, err := os.ReadDir(path)
dir := filepath.Dir(path)
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
// Doesn't make sense to have regular files
// directly in your $GOPATH/src or $GOROOT/src.
//
// TODO(bcmills): there are many levels of directory within
// RootModuleCache where this also wouldn't make sense,
// Can we generalize this to any directory without a corresponding
// import path?
return
}
if _, dup := w.added.LoadOrStore(dir, true); !dup {
w.add(w.root, dir)
}
}
if !d.IsDir() {
return
}
base := filepath.Base(path)
if base == "" || base[0] == '.' || base[0] == '_' ||
base == "testdata" ||
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
(!w.opts.ModulesEnabled && base == "node_modules") ||
w.shouldSkipDir(path) {
return
}
// Read the directory and walk its entries.
f, err := os.Open(path)
if err != nil {
w.opts.Logf("%v", err)
return
}
defer f.Close()
for {
// We impose an arbitrary limit on the number of ReadDir results per
// directory to limit the amount of memory consumed for stale or upcoming
// directory entries. The limit trades off CPU (number of syscalls to read
// the whole directory) against RAM (reachable directory entries other than
// the one currently being processed).
//
// Since we process the directories recursively, we will end up maintaining
// a slice of entries for each level of the directory tree.
// (Compare https://go.dev/issue/36197.)
ents, err := f.ReadDir(1024)
if err != nil {
// Report the ReadDir error, as filepath.WalkDir would do.
err = w.walk(path, fs.FileInfoToDirEntry(fi), err)
if err == fs.SkipDir {
return nil
} else if err != nil {
return err // Again, should be impossible.
if err != io.EOF {
w.opts.Logf("%v", err)
}
// Fall through and iterate over whatever entries we did manage to get.
break
}
for _, d := range ents {
nextPath := filepath.Join(path, d.Name())
if d.IsDir() {
// We want to walk the whole directory tree rooted at nextPath,
// not just the single entry for the directory.
err := filepath.WalkDir(nextPath, w.walk)
if err != nil && w.opts.Logf != nil {
w.opts.Logf("%v", err)
}
} else {
err := w.walk(nextPath, d, nil)
if err == fs.SkipDir {
// Skip the rest of the entries in the parent directory of nextPath
// (that is, path itself).
break
} else if err != nil {
return err // Again, should be impossible.
select {
case w.sem <- struct{}{}:
// Got a new semaphore token, so we can traverse the directory concurrently.
d := d
w.walking.Add(1)
go func() {
defer func() {
<-w.sem
w.walking.Done()
}()
w.walk(nextPath, pathSymlinks, d)
}()
continue
default:
// No tokens available, so traverse serially.
}
}
}
return nil
}
// Not a file, regular directory, or symlink; skip.
return nil
w.walk(nextPath, pathSymlinks, d)
}
}
}

View File

@ -13,6 +13,7 @@ import (
"go/build"
"go/parser"
"go/token"
"go/types"
"io/fs"
"io/ioutil"
"os"
@ -700,20 +701,21 @@ func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map
return result, nil
}
func PrimeCache(ctx context.Context, env *ProcessEnv) error {
func PrimeCache(ctx context.Context, resolver Resolver) error {
// Fully scan the disk for directories, but don't actually read any Go files.
callback := &scanCallback{
rootFound: func(gopathwalk.Root) bool {
return true
rootFound: func(root gopathwalk.Root) bool {
// See getCandidatePkgs: walking GOROOT is apparently expensive and
// unnecessary.
return root.Type != gopathwalk.RootGOROOT
},
dirFound: func(pkg *pkg) bool {
return false
},
packageNameLoaded: func(pkg *pkg) bool {
return false
},
// packageNameLoaded and exportsLoaded must never be called.
}
return getCandidatePkgs(ctx, callback, "", "", env)
return resolver.scan(ctx, callback)
}
func candidateImportName(pkg *pkg) string {
@ -827,16 +829,45 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
}
var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"}
// TODO(rfindley): we should depend on GOOS and GOARCH, to provide accurate
// imports when doing cross-platform development.
var requiredGoEnvVars = []string{
"GO111MODULE",
"GOFLAGS",
"GOINSECURE",
"GOMOD",
"GOMODCACHE",
"GONOPROXY",
"GONOSUMDB",
"GOPATH",
"GOPROXY",
"GOROOT",
"GOSUMDB",
"GOWORK",
}
// ProcessEnv contains environment variables and settings that affect the use of
// the go command, the go/build package, etc.
//
// ...a ProcessEnv *also* overwrites its Env along with derived state in the
// form of the resolver. And because it is lazily initialized, an env may just
// be broken and unusable, but there is no way for the caller to detect that:
// all queries will just fail.
//
// TODO(rfindley): refactor this package so that this type (perhaps renamed to
// just Env or Config) is an immutable configuration struct, to be exchanged
// for an initialized object via a constructor that returns an error. Perhaps
// the signature should be `func NewResolver(*Env) (*Resolver, error)`, where
// resolver is a concrete type used for resolving imports. Via this
// refactoring, we can avoid the need to call ProcessEnv.init and
// ProcessEnv.GoEnv everywhere, and implicitly fix all the places where this
// these are misused. Also, we'd delegate the caller the decision of how to
// handle a broken environment.
type ProcessEnv struct {
GocmdRunner *gocommand.Runner
BuildFlags []string
ModFlag string
ModFile string
// SkipPathInScan returns true if the path should be skipped from scans of
// the RootCurrentModule root type. The function argument is a clean,
@ -846,7 +877,7 @@ type ProcessEnv struct {
// Env overrides the OS environment, and can be used to specify
// GOPROXY, GO111MODULE, etc. PATH cannot be set here, because
// exec.Command will not honor it.
// Specifying all of RequiredGoEnvVars avoids a call to `go env`.
// Specifying all of requiredGoEnvVars avoids a call to `go env`.
Env map[string]string
WorkingDir string
@ -854,9 +885,17 @@ type ProcessEnv struct {
// If Logf is non-nil, debug logging is enabled through this function.
Logf func(format string, args ...interface{})
initialized bool
// If set, ModCache holds a shared cache of directory info to use across
// multiple ProcessEnvs.
ModCache *DirInfoCache
resolver Resolver
initialized bool // see TODO above
// resolver and resolverErr are lazily evaluated (see GetResolver).
// This is unclean, but see the big TODO in the docstring for ProcessEnv
// above: for now, we can't be sure that the ProcessEnv is fully initialized.
resolver Resolver
resolverErr error
}
func (e *ProcessEnv) goEnv() (map[string]string, error) {
@ -936,20 +975,31 @@ func (e *ProcessEnv) env() []string {
}
func (e *ProcessEnv) GetResolver() (Resolver, error) {
if e.resolver != nil {
return e.resolver, nil
}
if err := e.init(); err != nil {
return nil, err
}
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
e.resolver = newGopathResolver(e)
return e.resolver, nil
if e.resolver == nil && e.resolverErr == nil {
// TODO(rfindley): we should only use a gopathResolver here if the working
// directory is actually *in* GOPATH. (I seem to recall an open gopls issue
// for this behavior, but I can't find it).
//
// For gopls, we can optionally explicitly choose a resolver type, since we
// already know the view type.
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
e.resolver = newGopathResolver(e)
} else {
e.resolver, e.resolverErr = newModuleResolver(e, e.ModCache)
}
}
e.resolver = newModuleResolver(e)
return e.resolver, nil
return e.resolver, e.resolverErr
}
// buildContext returns the build.Context to use for matching files.
//
// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform
// development.
func (e *ProcessEnv) buildContext() (*build.Context, error) {
ctx := build.Default
goenv, err := e.goEnv()
@ -1029,15 +1079,23 @@ func addStdlibCandidates(pass *pass, refs references) error {
type Resolver interface {
// loadPackageNames loads the package names in importPaths.
loadPackageNames(importPaths []string, srcDir string) (map[string]string, error)
// scan works with callback to search for packages. See scanCallback for details.
scan(ctx context.Context, callback *scanCallback) error
// loadExports returns the set of exported symbols in the package at dir.
// loadExports may be called concurrently.
loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error)
// scoreImportPath returns the relevance for an import path.
scoreImportPath(ctx context.Context, path string) float64
ClearForNewScan()
// ClearForNewScan returns a new Resolver based on the receiver that has
// cleared its internal caches of directory contents.
//
// The new resolver should be primed and then set via
// [ProcessEnv.UpdateResolver].
ClearForNewScan() Resolver
}
// A scanCallback controls a call to scan and receives its results.
@ -1120,7 +1178,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
go func(pkgName string, symbols map[string]bool) {
defer wg.Done()
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename)
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols)
if err != nil {
firstErrOnce.Do(func() {
@ -1151,6 +1209,17 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
}()
for result := range results {
// Don't offer completions that would shadow predeclared
// names, such as github.com/coreos/etcd/error.
if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
// Ideally we would skip this candidate only
// if the predeclared name is actually
// referenced by the file, but that's a lot
// trickier to compute and would still create
// an import that is likely to surprise the
// user before long.
continue
}
pass.addCandidate(result.imp, result.pkg)
}
return firstErr
@ -1193,31 +1262,22 @@ func ImportPathToAssumedName(importPath string) string {
type gopathResolver struct {
env *ProcessEnv
walked bool
cache *dirInfoCache
cache *DirInfoCache
scanSema chan struct{} // scanSema prevents concurrent scans.
}
func newGopathResolver(env *ProcessEnv) *gopathResolver {
r := &gopathResolver{
env: env,
cache: &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
},
env: env,
cache: NewDirInfoCache(),
scanSema: make(chan struct{}, 1),
}
r.scanSema <- struct{}{}
return r
}
func (r *gopathResolver) ClearForNewScan() {
<-r.scanSema
r.cache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
}
r.walked = false
r.scanSema <- struct{}{}
func (r *gopathResolver) ClearForNewScan() Resolver {
return newGopathResolver(r.env)
}
func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
@ -1538,7 +1598,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
// findImport searches for a package with the given symbols.
// If no package is found, findImport returns ("", false, nil)
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) {
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
// Sort the candidates by their import package length,
// assuming that shorter package names are better than long
// ones. Note that this sorts by the de-vendored name, so

View File

@ -236,7 +236,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
src = src[:len(src)-len("}\n")]
// Gofmt has also indented the function body one level.
// Remove that indent.
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
src = bytes.ReplaceAll(src, []byte("\n\t"), []byte("\n"))
return matchSpace(orig, src)
}
return file, adjust, nil

View File

@ -23,49 +23,88 @@ import (
"golang.org/x/tools/internal/gopathwalk"
)
// ModuleResolver implements resolver for modules using the go command as little
// as feasible.
// Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning
// as fast as possible, which is desirable for a call to goimports from the
// command line, but it doesn't work as well for gopls, where it suffers from
// slow startup (golang/go#44863) and intermittent hanging (golang/go#59216),
// both caused by populating the cache, albeit in slightly different ways.
//
// A high level list of TODOs:
// - Optimize the scan itself, as there is some redundancy statting and
// reading go.mod files.
// - Invert the relationship between ProcessEnv and Resolver (see the
// docstring of ProcessEnv).
// - Make it easier to use an external resolver implementation.
//
// Smaller TODOs are annotated in the code below.
// ModuleResolver implements the Resolver interface for a workspace using
// modules.
//
// A goal of the ModuleResolver is to invoke the Go command as little as
// possible. To this end, it runs the Go command only for listing module
// information (i.e. `go list -m -e -json ...`). Package scanning, the process
// of loading package information for the modules, is implemented internally
// via the scan method.
//
// It has two types of state: the state derived from the go command, which
// is populated by init, and the state derived from scans, which is populated
// via scan. A root is considered scanned if it has been walked to discover
// directories. However, if the scan did not require additional information
// from the directory (such as package name or exports), the directory
// information itself may be partially populated. It will be lazily filled in
// as needed by scans, using the scanCallback.
type ModuleResolver struct {
env *ProcessEnv
moduleCacheDir string
dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
roots []gopathwalk.Root
scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots.
scannedRoots map[gopathwalk.Root]bool
env *ProcessEnv
initialized bool
mains []*gocommand.ModuleJSON
mainByDir map[string]*gocommand.ModuleJSON
modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path...
modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir.
// Module state, populated during construction
dummyVendorMod *gocommand.ModuleJSON // if vendoring is enabled, a pseudo-module to represent the /vendor directory
moduleCacheDir string // GOMODCACHE, inferred from GOPATH if unset
roots []gopathwalk.Root // roots to scan, in approximate order of importance
mains []*gocommand.ModuleJSON // main modules
mainByDir map[string]*gocommand.ModuleJSON // module information by dir, to join with roots
modsByModPath []*gocommand.ModuleJSON // all modules, ordered by # of path components in their module path
modsByDir []*gocommand.ModuleJSON // ...or by the number of path components in their Dir.
// moduleCacheCache stores information about the module cache.
moduleCacheCache *dirInfoCache
otherCache *dirInfoCache
// Scanning state, populated by scan
// scanSema prevents concurrent scans, and guards scannedRoots and the cache
// fields below (though the caches themselves are concurrency safe).
// Receive to acquire, send to release.
scanSema chan struct{}
scannedRoots map[gopathwalk.Root]bool // if true, root has been walked
// Caches of directory info, populated by scans and scan callbacks
//
// moduleCacheCache stores cached information about roots in the module
// cache, which are immutable and therefore do not need to be invalidated.
//
// otherCache stores information about all other roots (even GOROOT), which
// may change.
moduleCacheCache *DirInfoCache
otherCache *DirInfoCache
}
func newModuleResolver(e *ProcessEnv) *ModuleResolver {
// newModuleResolver returns a new module-aware goimports resolver.
//
// Note: use caution when modifying this constructor: changes must also be
// reflected in ModuleResolver.ClearForNewScan.
func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleResolver, error) {
r := &ModuleResolver{
env: e,
scanSema: make(chan struct{}, 1),
}
r.scanSema <- struct{}{}
return r
}
func (r *ModuleResolver) init() error {
if r.initialized {
return nil
}
r.scanSema <- struct{}{} // release
goenv, err := r.env.goEnv()
if err != nil {
return err
return nil, err
}
// TODO(rfindley): can we refactor to share logic with r.env.invokeGo?
inv := gocommand.Invocation{
BuildFlags: r.env.BuildFlags,
ModFlag: r.env.ModFlag,
ModFile: r.env.ModFile,
Env: r.env.env(),
Logf: r.env.Logf,
WorkingDir: r.env.WorkingDir,
@ -77,9 +116,12 @@ func (r *ModuleResolver) init() error {
// Module vendor directories are ignored in workspace mode:
// https://go.googlesource.com/proposal/+/master/design/45713-workspace.md
if len(r.env.Env["GOWORK"]) == 0 {
// TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but
// they should be available from the ProcessEnv. Can we avoid the redundant
// invocation?
vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
if err != nil {
return err
return nil, err
}
}
@ -100,19 +142,14 @@ func (r *ModuleResolver) init() error {
// GO111MODULE=on. Other errors are fatal.
if err != nil {
if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") {
return err
return nil, err
}
}
}
if gmc := r.env.Env["GOMODCACHE"]; gmc != "" {
r.moduleCacheDir = gmc
} else {
gopaths := filepath.SplitList(goenv["GOPATH"])
if len(gopaths) == 0 {
return fmt.Errorf("empty GOPATH")
}
r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod")
r.moduleCacheDir = gomodcacheForEnv(goenv)
if r.moduleCacheDir == "" {
return nil, fmt.Errorf("cannot resolve GOMODCACHE")
}
sort.Slice(r.modsByModPath, func(i, j int) bool {
@ -141,7 +178,11 @@ func (r *ModuleResolver) init() error {
} else {
addDep := func(mod *gocommand.ModuleJSON) {
if mod.Replace == nil {
// This is redundant with the cache, but we'll skip it cheaply enough.
// This is redundant with the cache, but we'll skip it cheaply enough
// when we encounter it in the module cache scan.
//
// Including it at a lower index in r.roots than the module cache dir
// helps prioritize matches from within existing dependencies.
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache})
} else {
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther})
@ -158,24 +199,40 @@ func (r *ModuleResolver) init() error {
addDep(mod)
}
}
// If provided, share the moduleCacheCache.
//
// TODO(rfindley): The module cache is immutable. However, the loaded
// exports do depend on GOOS and GOARCH. Fortunately, the
// ProcessEnv.buildContext does not adjust these from build.DefaultContext
// (even though it should). So for now, this is OK to share, but we need to
// add logic for handling GOOS/GOARCH.
r.moduleCacheCache = moduleCacheCache
r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache})
}
r.scannedRoots = map[gopathwalk.Root]bool{}
if r.moduleCacheCache == nil {
r.moduleCacheCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
}
r.moduleCacheCache = NewDirInfoCache()
}
if r.otherCache == nil {
r.otherCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
}
r.otherCache = NewDirInfoCache()
return r, nil
}
// gomodcacheForEnv returns the GOMODCACHE value to use based on the given env
// map, which must have GOMODCACHE and GOPATH populated.
//
// TODO(rfindley): this is defensive refactoring.
// 1. Is this even relevant anymore? Can't we just read GOMODCACHE.
// 2. Use this to separate module cache scanning from other scanning.
func gomodcacheForEnv(goenv map[string]string) string {
if gmc := goenv["GOMODCACHE"]; gmc != "" {
return gmc
}
r.initialized = true
return nil
gopaths := filepath.SplitList(goenv["GOPATH"])
if len(gopaths) == 0 {
return ""
}
return filepath.Join(gopaths[0], "/pkg/mod")
}
func (r *ModuleResolver) initAllMods() error {
@ -206,30 +263,82 @@ func (r *ModuleResolver) initAllMods() error {
return nil
}
func (r *ModuleResolver) ClearForNewScan() {
<-r.scanSema
r.scannedRoots = map[gopathwalk.Root]bool{}
r.otherCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
listeners: map[*int]cacheListener{},
}
r.scanSema <- struct{}{}
}
// ClearForNewScan invalidates the last scan.
//
// It preserves the set of roots, but forgets about the set of directories.
// Though it forgets the set of module cache directories, it remembers their
// contents, since they are assumed to be immutable.
func (r *ModuleResolver) ClearForNewScan() Resolver {
<-r.scanSema // acquire r, to guard scannedRoots
r2 := &ModuleResolver{
env: r.env,
dummyVendorMod: r.dummyVendorMod,
moduleCacheDir: r.moduleCacheDir,
roots: r.roots,
mains: r.mains,
mainByDir: r.mainByDir,
modsByModPath: r.modsByModPath,
func (r *ModuleResolver) ClearForNewMod() {
<-r.scanSema
*r = ModuleResolver{
env: r.env,
scanSema: make(chan struct{}, 1),
scannedRoots: make(map[gopathwalk.Root]bool),
otherCache: NewDirInfoCache(),
moduleCacheCache: r.moduleCacheCache,
otherCache: r.otherCache,
scanSema: r.scanSema,
}
r.init()
r.scanSema <- struct{}{}
r2.scanSema <- struct{}{} // r2 must start released
// Invalidate root scans. We don't need to invalidate module cache roots,
// because they are immutable.
// (We don't support a use case where GOMODCACHE is cleaned in the middle of
// e.g. a gopls session: the user must restart gopls to get accurate
// imports.)
//
// Scanning for new directories in GOMODCACHE should be handled elsewhere,
// via a call to ScanModuleCache.
for _, root := range r.roots {
if root.Type == gopathwalk.RootModuleCache && r.scannedRoots[root] {
r2.scannedRoots[root] = true
}
}
r.scanSema <- struct{}{} // release r
return r2
}
// findPackage returns the module and directory that contains the package at
// the given import path, or returns nil, "" if no module is in scope.
// ClearModuleInfo invalidates resolver state that depends on go.mod file
// contents (essentially, the output of go list -m -json ...).
//
// Notably, it does not forget directory contents, which are reset
// asynchronously via ClearForNewScan.
//
// If the ProcessEnv is a GOPATH environment, ClearModuleInfo is a no op.
//
// TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods.
func (e *ProcessEnv) ClearModuleInfo() {
if r, ok := e.resolver.(*ModuleResolver); ok {
resolver, resolverErr := newModuleResolver(e, e.ModCache)
if resolverErr == nil {
<-r.scanSema // acquire (guards caches)
resolver.moduleCacheCache = r.moduleCacheCache
resolver.otherCache = r.otherCache
r.scanSema <- struct{}{} // release
}
e.resolver = resolver
e.resolverErr = resolverErr
}
}
// UpdateResolver sets the resolver for the ProcessEnv to use in imports
// operations. Only for use with the result of [Resolver.ClearForNewScan].
//
// TODO(rfindley): this awkward API is a result of the (arguably) inverted
// relationship between configuration and state described in the doc comment
// for [ProcessEnv].
func (e *ProcessEnv) UpdateResolver(r Resolver) {
e.resolver = r
e.resolverErr = nil
}
// findPackage returns the module and directory from within the main modules
// and their dependencies that contains the package at the given import path,
// or returns nil, "" if no module is in scope.
func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) {
// This can't find packages in the stdlib, but that's harmless for all
// the existing code paths.
@ -295,10 +404,6 @@ func (r *ModuleResolver) cacheStore(info directoryPackageInfo) {
}
}
func (r *ModuleResolver) cacheKeys() []string {
return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...)
}
// cachePackageName caches the package name for a dir already in the cache.
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) {
if info.rootType == gopathwalk.RootModuleCache {
@ -367,15 +472,15 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON
return modDir != mod.Dir
}
func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) {
readModName := func(modFile string) string {
modBytes, err := os.ReadFile(modFile)
if err != nil {
return ""
}
return modulePath(modBytes)
func readModName(modFile string) string {
modBytes, err := os.ReadFile(modFile)
if err != nil {
return ""
}
return modulePath(modBytes)
}
func (r *ModuleResolver) modInfo(dir string) (modDir, modName string) {
if r.dirInModuleCache(dir) {
if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 {
index := strings.Index(dir, matches[1]+"@"+matches[2])
@ -409,11 +514,9 @@ func (r *ModuleResolver) dirInModuleCache(dir string) bool {
}
func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
if err := r.init(); err != nil {
return nil, err
}
names := map[string]string{}
for _, path := range importPaths {
// TODO(rfindley): shouldn't this use the dirInfoCache?
_, packageDir := r.findPackage(path)
if packageDir == "" {
continue
@ -431,10 +534,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
ctx, done := event.Start(ctx, "imports.ModuleResolver.scan")
defer done()
if err := r.init(); err != nil {
return err
}
processDir := func(info directoryPackageInfo) {
// Skip this directory if we were not able to get the package information successfully.
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
@ -444,18 +543,18 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
if err != nil {
return
}
if !callback.dirFound(pkg) {
return
}
pkg.packageName, err = r.cachePackageName(info)
if err != nil {
return
}
if !callback.packageNameLoaded(pkg) {
return
}
_, exports, err := r.loadExports(ctx, pkg, false)
if err != nil {
return
@ -494,7 +593,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
return packageScanned
}
// Add anything new to the cache, and process it if we're still listening.
add := func(root gopathwalk.Root, dir string) {
r.cacheStore(r.scanDirForPackage(root, dir))
}
@ -509,9 +607,9 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
select {
case <-ctx.Done():
return
case <-r.scanSema:
case <-r.scanSema: // acquire
}
defer func() { r.scanSema <- struct{}{} }()
defer func() { r.scanSema <- struct{}{} }() // release
// We have the lock on r.scannedRoots, and no other scans can run.
for _, root := range roots {
if ctx.Err() != nil {
@ -613,9 +711,6 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
}
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
if err := r.init(); err != nil {
return "", nil, err
}
if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest {
return r.cacheExports(ctx, r.env, info)
}

View File

@ -7,8 +7,12 @@ package imports
import (
"context"
"fmt"
"path"
"path/filepath"
"strings"
"sync"
"golang.org/x/mod/module"
"golang.org/x/tools/internal/gopathwalk"
)
@ -39,6 +43,8 @@ const (
exportsLoaded
)
// directoryPackageInfo holds (possibly incomplete) information about packages
// contained in a given directory.
type directoryPackageInfo struct {
// status indicates the extent to which this struct has been filled in.
status directoryPackageStatus
@ -63,7 +69,10 @@ type directoryPackageInfo struct {
packageName string // the package name, as declared in the source.
// Set when status >= exportsLoaded.
// TODO(rfindley): it's hard to see this, but exports depend implicitly on
// the default build context GOOS and GOARCH.
//
// We can make this explicit, and key exports by GOOS, GOARCH.
exports []string
}
@ -79,7 +88,7 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
return true, nil
}
// dirInfoCache is a concurrency safe map for storing information about
// DirInfoCache is a concurrency-safe map for storing information about
// directories that may contain packages.
//
// The information in this cache is built incrementally. Entries are initialized in scan.
@ -92,21 +101,26 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
// The information in the cache is not expected to change for the cache's
// lifetime, so there is no protection against competing writes. Users should
// take care not to hold the cache across changes to the underlying files.
//
// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc)
type dirInfoCache struct {
type DirInfoCache struct {
mu sync.Mutex
// dirs stores information about packages in directories, keyed by absolute path.
dirs map[string]*directoryPackageInfo
listeners map[*int]cacheListener
}
func NewDirInfoCache() *DirInfoCache {
return &DirInfoCache{
dirs: make(map[string]*directoryPackageInfo),
listeners: make(map[*int]cacheListener),
}
}
type cacheListener func(directoryPackageInfo)
// ScanAndListen calls listener on all the items in the cache, and on anything
// newly added. The returned stop function waits for all in-flight callbacks to
// finish and blocks new ones.
func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
ctx, cancel := context.WithCancel(ctx)
// Flushing out all the callbacks is tricky without knowing how many there
@ -162,8 +176,10 @@ func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener
}
// Store stores the package info for dir.
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
func (d *DirInfoCache) Store(dir string, info directoryPackageInfo) {
d.mu.Lock()
// TODO(rfindley, golang/go#59216): should we overwrite an existing entry?
// That seems incorrect as the cache should be idempotent.
_, old := d.dirs[dir]
d.dirs[dir] = &info
var listeners []cacheListener
@ -180,7 +196,7 @@ func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
}
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
func (d *DirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
d.mu.Lock()
defer d.mu.Unlock()
info, ok := d.dirs[dir]
@ -191,7 +207,7 @@ func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
}
// Keys returns the keys currently present in d.
func (d *dirInfoCache) Keys() (keys []string) {
func (d *DirInfoCache) Keys() (keys []string) {
d.mu.Lock()
defer d.mu.Unlock()
for key := range d.dirs {
@ -200,7 +216,7 @@ func (d *dirInfoCache) Keys() (keys []string) {
return keys
}
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
if loaded, err := info.reachedStatus(nameLoaded); loaded {
return info.packageName, err
}
@ -213,7 +229,7 @@ func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, erro
return info.packageName, info.err
}
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
if reached, _ := info.reachedStatus(exportsLoaded); reached {
return info.packageName, info.exports, info.err
}
@ -234,3 +250,81 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d
d.Store(info.dir, info)
return info.packageName, info.exports, info.err
}
// ScanModuleCache walks the given directory, which must be a GOMODCACHE value,
// for directory package information, storing the results in cache.
func ScanModuleCache(dir string, cache *DirInfoCache, logf func(string, ...any)) {
// Note(rfindley): it's hard to see, but this function attempts to implement
// just the side effects on cache of calling PrimeCache with a ProcessEnv
// that has the given dir as its GOMODCACHE.
//
// Teasing out the control flow, we see that we can avoid any handling of
// vendor/ and can infer module info entirely from the path, simplifying the
// logic here.
root := gopathwalk.Root{
Path: filepath.Clean(dir),
Type: gopathwalk.RootModuleCache,
}
directoryInfo := func(root gopathwalk.Root, dir string) directoryPackageInfo {
// This is a copy of ModuleResolver.scanDirForPackage, trimmed down to
// logic that applies to a module cache directory.
subdir := ""
if dir != root.Path {
subdir = dir[len(root.Path)+len("/"):]
}
matches := modCacheRegexp.FindStringSubmatch(subdir)
if len(matches) == 0 {
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("invalid module cache path: %v", subdir),
}
}
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
if err != nil {
if logf != nil {
logf("decoding module cache path %q: %v", subdir, err)
}
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
}
}
importPath := path.Join(modPath, filepath.ToSlash(matches[3]))
index := strings.Index(dir, matches[1]+"@"+matches[2])
modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2])
modName := readModName(filepath.Join(modDir, "go.mod"))
return directoryPackageInfo{
status: directoryScanned,
dir: dir,
rootType: root.Type,
nonCanonicalImportPath: importPath,
moduleDir: modDir,
moduleName: modName,
}
}
add := func(root gopathwalk.Root, dir string) {
info := directoryInfo(root, dir)
cache.Store(info.dir, info)
}
skip := func(_ gopathwalk.Root, dir string) bool {
// Skip directories that have already been scanned.
//
// Note that gopathwalk only adds "package" directories, which must contain
// a .go file, and all such package directories in the module cache are
// immutable. So if we can load a dir, it can be skipped.
info, ok := cache.Load(dir)
if !ok {
return false
}
packageScanned, _ := info.reachedStatus(directoryScanned)
return packageScanned
}
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: logf, ModulesEnabled: true})
}

View File

@ -151,6 +151,7 @@ var stdlib = map[string][]string{
"cmp": {
"Compare",
"Less",
"Or",
"Ordered",
},
"compress/bzip2": {
@ -632,6 +633,8 @@ var stdlib = map[string][]string{
"NameMismatch",
"NewCertPool",
"NotAuthorizedToSign",
"OID",
"OIDFromInts",
"PEMCipher",
"PEMCipher3DES",
"PEMCipherAES128",
@ -706,6 +709,7 @@ var stdlib = map[string][]string{
"LevelWriteCommitted",
"Named",
"NamedArg",
"Null",
"NullBool",
"NullByte",
"NullFloat64",
@ -1921,6 +1925,7 @@ var stdlib = map[string][]string{
"R_LARCH_32",
"R_LARCH_32_PCREL",
"R_LARCH_64",
"R_LARCH_64_PCREL",
"R_LARCH_ABS64_HI12",
"R_LARCH_ABS64_LO20",
"R_LARCH_ABS_HI20",
@ -1928,12 +1933,17 @@ var stdlib = map[string][]string{
"R_LARCH_ADD16",
"R_LARCH_ADD24",
"R_LARCH_ADD32",
"R_LARCH_ADD6",
"R_LARCH_ADD64",
"R_LARCH_ADD8",
"R_LARCH_ADD_ULEB128",
"R_LARCH_ALIGN",
"R_LARCH_B16",
"R_LARCH_B21",
"R_LARCH_B26",
"R_LARCH_CFA",
"R_LARCH_COPY",
"R_LARCH_DELETE",
"R_LARCH_GNU_VTENTRY",
"R_LARCH_GNU_VTINHERIT",
"R_LARCH_GOT64_HI12",
@ -1953,6 +1963,7 @@ var stdlib = map[string][]string{
"R_LARCH_PCALA64_LO20",
"R_LARCH_PCALA_HI20",
"R_LARCH_PCALA_LO12",
"R_LARCH_PCREL20_S2",
"R_LARCH_RELATIVE",
"R_LARCH_RELAX",
"R_LARCH_SOP_ADD",
@ -1983,8 +1994,10 @@ var stdlib = map[string][]string{
"R_LARCH_SUB16",
"R_LARCH_SUB24",
"R_LARCH_SUB32",
"R_LARCH_SUB6",
"R_LARCH_SUB64",
"R_LARCH_SUB8",
"R_LARCH_SUB_ULEB128",
"R_LARCH_TLS_DTPMOD32",
"R_LARCH_TLS_DTPMOD64",
"R_LARCH_TLS_DTPREL32",
@ -2035,6 +2048,7 @@ var stdlib = map[string][]string{
"R_MIPS_LO16",
"R_MIPS_NONE",
"R_MIPS_PC16",
"R_MIPS_PC32",
"R_MIPS_PJUMP",
"R_MIPS_REL16",
"R_MIPS_REL32",
@ -2952,6 +2966,8 @@ var stdlib = map[string][]string{
"RegisterName",
},
"encoding/hex": {
"AppendDecode",
"AppendEncode",
"Decode",
"DecodeString",
"DecodedLen",
@ -3233,6 +3249,7 @@ var stdlib = map[string][]string{
"TypeSpec",
"TypeSwitchStmt",
"UnaryExpr",
"Unparen",
"ValueSpec",
"Var",
"Visitor",
@ -3492,6 +3509,7 @@ var stdlib = map[string][]string{
"XOR_ASSIGN",
},
"go/types": {
"Alias",
"ArgumentError",
"Array",
"AssertableTo",
@ -3559,6 +3577,7 @@ var stdlib = map[string][]string{
"MethodVal",
"MissingMethod",
"Named",
"NewAlias",
"NewArray",
"NewChan",
"NewChecker",
@ -3627,6 +3646,7 @@ var stdlib = map[string][]string{
"Uint64",
"Uint8",
"Uintptr",
"Unalias",
"Union",
"Universe",
"Unsafe",
@ -3643,6 +3663,11 @@ var stdlib = map[string][]string{
"WriteSignature",
"WriteType",
},
"go/version": {
"Compare",
"IsValid",
"Lang",
},
"hash": {
"Hash",
"Hash32",
@ -4078,6 +4103,7 @@ var stdlib = map[string][]string{
"NewTextHandler",
"Record",
"SetDefault",
"SetLogLoggerLevel",
"Source",
"SourceKey",
"String",
@ -4367,6 +4393,35 @@ var stdlib = map[string][]string{
"Uint64",
"Zipf",
},
"math/rand/v2": {
"ChaCha8",
"ExpFloat64",
"Float32",
"Float64",
"Int",
"Int32",
"Int32N",
"Int64",
"Int64N",
"IntN",
"N",
"New",
"NewChaCha8",
"NewPCG",
"NewZipf",
"NormFloat64",
"PCG",
"Perm",
"Rand",
"Shuffle",
"Source",
"Uint32",
"Uint32N",
"Uint64",
"Uint64N",
"UintN",
"Zipf",
},
"mime": {
"AddExtensionType",
"BEncoding",
@ -4540,6 +4595,7 @@ var stdlib = map[string][]string{
"FS",
"File",
"FileServer",
"FileServerFS",
"FileSystem",
"Flusher",
"Get",
@ -4566,6 +4622,7 @@ var stdlib = map[string][]string{
"MethodPut",
"MethodTrace",
"NewFileTransport",
"NewFileTransportFS",
"NewRequest",
"NewRequestWithContext",
"NewResponseController",
@ -4599,6 +4656,7 @@ var stdlib = map[string][]string{
"Serve",
"ServeContent",
"ServeFile",
"ServeFileFS",
"ServeMux",
"ServeTLS",
"Server",
@ -5106,6 +5164,7 @@ var stdlib = map[string][]string{
"StructTag",
"Swapper",
"Type",
"TypeFor",
"TypeOf",
"Uint",
"Uint16",
@ -5342,6 +5401,7 @@ var stdlib = map[string][]string{
"CompactFunc",
"Compare",
"CompareFunc",
"Concat",
"Contains",
"ContainsFunc",
"Delete",
@ -10824,6 +10884,7 @@ var stdlib = map[string][]string{
"Value",
},
"testing/slogtest": {
"Run",
"TestHandler",
},
"text/scanner": {

View File

@ -535,8 +535,8 @@ const minBatchSize = 1000
// size is too low to give stream goroutines a chance to fill it up.
//
// Upon exiting, if the error causing the exit is not an I/O error, run()
// flushes and closes the underlying connection. Otherwise, the connection is
// left open to allow the I/O error to be encountered by the reader instead.
// flushes the underlying connection. The connection is always left open to
// allow different closing behavior on the client and server.
func (l *loopyWriter) run() (err error) {
defer func() {
if l.logger.V(logLevel) {
@ -544,7 +544,6 @@ func (l *loopyWriter) run() (err error) {
}
if !isIOError(err) {
l.framer.writer.Flush()
l.conn.Close()
}
l.cbuf.finish()
}()

View File

@ -451,7 +451,13 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
go func() {
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
t.loopy.run()
if err := t.loopy.run(); !isIOError(err) {
// Immediately close the connection, as the loopy writer returns
// when there are no more active streams and we were draining (the
// server sent a GOAWAY). For I/O errors, the reader will hit it
// after draining any remaining incoming data.
t.conn.Close()
}
close(t.writerDone)
}()
return t, nil

View File

@ -322,8 +322,24 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
go func() {
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
t.loopy.run()
err := t.loopy.run()
close(t.loopyWriterDone)
if !isIOError(err) {
// Close the connection if a non-I/O error occurs (for I/O errors
// the reader will also encounter the error and close). Wait 1
// second before closing the connection, or when the reader is done
// (i.e. the client already closed the connection or a connection
// error occurred). This avoids the potential problem where there
// is unread data on the receive side of the connection, which, if
// closed, would lead to a TCP RST instead of FIN, and the client
// encountering errors. For more info:
// https://github.com/grpc/grpc-go/issues/5358
select {
case <-t.readerDone:
case <-time.After(time.Second):
}
t.conn.Close()
}
}()
go t.keepalive()
return t, nil
@ -609,8 +625,8 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
// traceCtx attaches trace to ctx and returns the new context.
func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
defer func() {
<-t.loopyWriterDone
close(t.readerDone)
<-t.loopyWriterDone
}()
for {
t.controlBuf.throttle()
@ -1329,6 +1345,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
return false, err
}
t.framer.writer.Flush()
if retErr != nil {
return false, retErr
}
@ -1349,7 +1366,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
return false, err
}
go func() {
timer := time.NewTimer(time.Minute)
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case <-t.drainEvent.Done():

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.61.0"
const Version = "1.61.1"

20
vendor/modules.txt vendored
View File

@ -137,7 +137,7 @@ github.com/prometheus/client_golang/prometheus/promhttp
# github.com/prometheus/client_model v0.5.0
## explicit; go 1.19
github.com/prometheus/client_model/go
# github.com/prometheus/common v0.46.0
# github.com/prometheus/common v0.47.0
## explicit; go 1.20
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
@ -187,7 +187,7 @@ go.uber.org/zap/internal/exit
go.uber.org/zap/internal/pool
go.uber.org/zap/internal/stacktrace
go.uber.org/zap/zapcore
# golang.org/x/mod v0.14.0
# golang.org/x/mod v0.15.0
## explicit; go 1.18
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
@ -226,7 +226,7 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.5.0
## explicit; go 1.18
golang.org/x/time/rate
# golang.org/x/tools v0.17.0
# golang.org/x/tools v0.18.0
## explicit; go 1.18
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/imports
@ -241,7 +241,7 @@ golang.org/x/tools/internal/imports
# gomodules.xyz/jsonpatch/v2 v2.4.0
## explicit; go 1.20
gomodules.xyz/jsonpatch/v2
# google.golang.org/api v0.163.0
# google.golang.org/api v0.165.0
## explicit; go 1.19
google.golang.org/api/support/bundler
# google.golang.org/appengine v1.6.8
@ -253,16 +253,16 @@ google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac
# google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe
## explicit; go 1.19
google.golang.org/genproto/protobuf/field_mask
# google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457
# google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014
## explicit; go 1.19
google.golang.org/genproto/googleapis/api/httpbody
# google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe
# google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.61.0
# google.golang.org/grpc v1.61.1
## explicit; go 1.19
google.golang.org/grpc
google.golang.org/grpc/attributes
@ -693,10 +693,10 @@ k8s.io/utils/net
k8s.io/utils/pointer
k8s.io/utils/strings/slices
k8s.io/utils/trace
# knative.dev/hack v0.0.0-20240201013652-f3881d90c189
# knative.dev/hack v0.0.0-20240214131420-999d7e6b8495
## explicit; go 1.18
knative.dev/hack
# knative.dev/pkg v0.0.0-20240212080204-2d2e27d30a44
# knative.dev/pkg v0.0.0-20240219120257-9227ebb57a4e
## explicit; go 1.18
knative.dev/pkg/apis
knative.dev/pkg/apis/duck