[main] Upgrade to latest dependencies (#1910)

* upgrade to latest dependencies

bumping k8s.io/code-generator ad20935...0a2d87a:
  > 0a2d87a Merge pull request # 121545 from dims/automated-cherry-pick-of-# 121364-upstream-release-1.28
  > 4e70e93 bump golang.org/grpc to v1.56.3
  > 69e9240 Merge pull request # 121128 from MadhavJivrajani/bump-x-net-128
  > 6cf3458 .: bump golang.org/x/net to v0.17.0
  > 791c213 Merge remote-tracking branch 'origin/master' into release-1.28
  > a66bf75 .*: bump golang.org/x/net to v0.13.0
  > fee1cc9 Merge pull request # 118204 from sttts/sttts-openapi-v2-parameter-refs
  > 44fe511 Merge pull request # 119312 from pacoxu/prometheus/common-v0.44
  > 9ac045b Bump kube-openapi
  > c56ebf3 upgrade prometheus common to v0.44.0
  > 3cea41d Merge pull request # 119330 from bertinatto/fix-conn-reuse-test
  > c293887 Proactively bump golang.org/x/net to v0.12.0
  > b265455 Fix lifecycle generator to check the version correctly (# 119268)
  > b510e2c Merge pull request # 118689 from bzsuni/clean
  > 04d27a1 update prometheus/client_golang v1.14.0 to v1.16.0
  > 41a1e2d Merge pull request # 118507 from jeremyrickard/go1205
  > 0ffabab Update vendor with hack/update-vendor.sh
  > 4e0c7a3 Merge pull request # 118522 from jpbetz/remove-bad-merge-key
  > 81f56dc Remove invalid merge key
  > b92bd47 Merge pull request # 118414 from thockin/remove_codegen_debug
  > 5686b85 Fix errant debugging code in codegen
  > e60f563 Merge pull request # 118384 from Jefftree/gnostic-models-branch
  > f9f5739 Update gnostic references
  > 049b8c1 vendor
  > 7eed8e6 Merge pull request # 118269 from liggitt/genproto
  > ce9404a Update google.golang.org/genproto
  > db4dff4 Merge pull request # 118240 from Jefftree/bump-kube-openapi
  > 8f5c773 Update vendor
  > 03cdae3 Merge pull request # 117985 from howardjohn/patch-2
  > 932443c Merge pull request # 118014 from liggitt/mapstructure
  > ab3121a codegen
  > 254bead Update kube-openapi, drop mapstructure
  > 96be077 Fix comments on InformerFor
  > 04bedf1 Merge pull request # 117982 from jpbetz/sig-roles-jpbetz
  > 276d6b6 Merge pull request # 117645 from humblec/etcd-2
  > b4cd301 Add api-machinery TL owners permissions for jpbetz
  > f45259c Merge pull request # 117139 from pohly/test-integration-race-detection-update-utils
  > c60f5e6 update vendor dependencies for the change
  > 7eb0f51 dependencies: bump k8s.io/utils
  > b2afdc9 etcd depdencies are updated to v3.5.9
  > 8437e77 Merge pull request # 117946 from lavalamp/lavalamp-taking-a-break
  > 7850b0d Merge pull request # 117961 from humblec/ginkgo
  > 29a942d lavalamp is taking a long break
  > d13e2c2 Merge pull request # 116761 from iancoolidge/devel-cpuset-revendor
  > a40ed9b ginkgo update to v2.9.4 and gomega to 1.27.6
  > a2fdf24 Merge pull request # 117897 from thockin/codegen_deprecate_generate_groups_scripts
  > 5e46b7b Update k8s.io/utils version to v0.0.0-20230313181309-38a27ef9d749
  > 6027f11 Merge pull request # 117899 from thockin/codegen_purge_openapi_shell_indirection
  > 1305626 Deprecate generate*groups.sh -> kube_codegen.sh
  > 04a3650 Use the same report files as before
  > 6d32803 Simpler openapi gen - subprojects do themselves
  > 89a7a4d Merge pull request # 117262 from thockin/codegen_new_script_for_subprojects
  > c40d37c Convert code-generator/examples to new codegen
  > 0eb9f05 Add a new way for subprojects to do codegen
  > f731524 Merge pull request # 117687 from pohly/klog-update
  > 69dc822 dependencies: klog v2.100.1
  > ce06575 Merge pull request # 117705 from Jefftree/update-openapi-fix-race
  > dfd3d2e Update kube-openapi to fix race
  > 9b6e38c Merge pull request # 117253 from akhilerm/update-containerd-dependencies
  > f2fce9b chore: update cgroups and ttrpc versions
  > fcf3837 Merge pull request # 117350 from mohitsharma-in/update/google-golang-protobuf
  > 9423c3d Merge pull request # 117483 from ArkaSaha30/bump-gofuzz
  > e0c5a53 Dependencies Update google.golang.org/protobuf v1.28.1 to v1.30.0
  > d0b82bb Merge pull request # 117482 from ArkaSaha30/bump-go-logr
  > dfd550e update gofuzz dependency
  > abb7434 Merge pull request # 117352 from mohitsharma-in/update/golang_x_tools
  > de902bb Update go-logr dependencies
  > ddd36a3 Dependencies Update golang.org/x/tools v0.7.0 to v0.8.0
  > d0339d3 Merge pull request # 117399 from mohitsharma-in/update/golang_time
  > e13ba37 Dependencies Update golang.org/x/timet 90d013bbcef8  to v0.3.0
  > 473f30d Merge pull request # 117285 from humblec/azure-go-autorest
  > 68bbb45 dependencies: update gh/Azure/auto-test/{adal,validation}
  > e28c955 Merge pull request # 116282 from thockin/codegen_subprojects_nuke_old_files
  > ff97b26 Merge pull request # 116281 from thockin/codegen_subproject_chdir
  > 0ecf58f Codegen: subprojects: nuke existing files
  > fe4722c Codegen: simpler k8s.io/code-generator/examples
  > 2d17b8d Merge pull request # 116280 from thockin/codegen_no_internal_subprojects
  > 25c3b12 Codegen: new tools will not become part of 'all'
  > 8bfbf3d Codegen subprojects: consolidate generate-*groups
  > 399d412 Codegen subprojects: deprecate "all" in scripts
  > e04cc82 Codegen subprojects: regen defaulters when needed
  > 5ccd5be Codegen subprojects: reify 'all' into explicit
  > 0577864 Trivial change to reduce diffs
  > 450bfcd Merge pull request # 116948 from MadhavJivrajani/fix-verify-vendor
  > 5d100ef .*: update vendor dir and cleanup
  > 8fead9f Merge pull request # 116539 from pohly/ginkgo-gomega-update
  > e6c58e4 dependencies: ginkgo v2.9.1, gomega v1.27.4
bumping knative.dev/pkg 9386ad6...b488e7b:
  > b488e7b upgrade to latest dependencies (# 2938)
  > f95090a Bump github.com/evanphx/json-patch/v5 from 5.7.0 to 5.8.0 (# 2935)
  > 347a4b5 Bump github.com/prometheus/common from 0.45.0 to 0.46.0 (# 2937)
  > e8c79d4 Bump golang.org/x/oauth2 from 0.15.0 to 0.16.0 (# 2934)
  > ff26179 Bump golang.org/x/tools from 0.16.1 to 0.17.0 (# 2936)
  > e0d5064 Bump golang.org/x/net from 0.19.0 to 0.20.0 (# 2933)
  > bc230ae Update community files (# 2932)
  > 21d8c37 Bump K8s dependencies to v0.28.5 and set K8s min-version to 1.27 (# 2928)
  > a459076 upgrade to latest dependencies (# 2930)
  > de3e9cc Fixing all deprecated use of sets.String (# 2915)
  > a65a9e2 lower log level for AdmissionController.Admit validation  failures (# 2905)
  > 4c06610 Update community files (# 2929)
  > 697d669 Bump google.golang.org/api from 0.154.0 to 0.155.0 (# 2926)
  > 9e6a998 Bump golang.org/x/sync from 0.5.0 to 0.6.0 (# 2925)
  > dfa0a2c Bump github.com/prometheus/client_golang from 1.17.0 to 1.18.0 (# 2924)
  > 32bea20 Bump google.golang.org/protobuf from 1.31.0 to 1.32.0 (# 2923)
  > d513e48 Bump google.golang.org/grpc from 1.59.0 to 1.60.1 (# 2922)
  > 1709d20 Bump golang.org/x/crypto from 0.16.0 to 0.17.0 (# 2921)
  > 03265f7 Bump golang.org/x/tools from 0.16.0 to 0.16.1 (# 2917)
  > 155eba4 Bump google.golang.org/api from 0.153.0 to 0.154.0 (# 2920)
  > d71ca9a Bump cloud.google.com/go/storage from 1.35.1 to 1.36.0 (# 2918)
  > 5413322 Bump github.com/google/uuid from 1.4.0 to 1.5.0 (# 2916)
  > 4914c47 Bump google.golang.org/api from 0.152.0 to 0.153.0 (# 2914)
  > 9ad79ff Bump actions/setup-go from 4 to 5 (# 2913)
bumping k8s.io/apiextensions-apiserver 6090d8e...1dff568:
  > 1dff568 Update dependencies to v0.28.5 tag
  > bc1f37f Merge pull request # 121545 from dims/automated-cherry-pick-of-# 121364-upstream-release-1.28
  > baeeb81 bump golang.org/grpc to v1.56.3
  > c624896 Merge pull request # 121128 from MadhavJivrajani/bump-x-net-128
  > da001b1 .: bump golang.org/x/net to v0.17.0
  > 2d89f88 Merge pull request # 120356 from stevekuznetsov/automated-cherry-pick-of-# 120177-kubernetes-release-1.28
  > efb98fd Merge pull request # 119807 from jpbetz/automated-cherry-pick-of-# 119800-origin-release-1.28
  > 6c12248 apiextensions-apiserver: generate applyconfigurations
  > 6087aeb Merge pull request # 120141 from Jefftree/automated-cherry-pick-of-# 120109-upstream-release-1.28
  > 4d28f48 Fix CEL cost handling of zero length replacement strings
  > 3ea4ec4 Merge pull request # 120329 from liggitt/automated-cherry-pick-of-# 120327-upstream-release-1.28
  > b838126 Add wait for cache sync
  > 0404ad2 Add test coverage of result size of string operations
  > 0ede49e Revert to json-patch 4.12.0
  > 3f6c83c Bump cel-go to v0.16.1
  > eb9dafe Merge remote-tracking branch 'origin/master' into release-1.28
  > f13dc21 .*: bump golang.org/x/net to v0.13.0
  > 82eb7fc Merge remote-tracking branch 'origin/master' into release-1.28
  > 7709b76 apiextensions: fix validation error for status.storedVersions
  > b69767a Use statusReason for reason under x-kubernetes-validations (# 119544)
  > 124026f Merge pull request # 119543 from jpbetz/fix-xvalidations-flake
  > d60d893 Treat empty string as nil in fuzzer for CEL Reason field
  > ce3ed21 Merge pull request # 119510 from jpbetz/fix-mutation
  > 33bfdc8 Merge pull request # 119453 from cici37/addTest
  > b1b1775 Fix XValidations deepcopy to copy contents of nested pointers
  > d4e28b3 Address comment
  > f337e93 Refactor jsonpath parser and add tests.
  > d4d25b0 Merge pull request # 119340 from alexzielenski/apiserver/apiextensions/use-statusSchema
  > ed307c0 [KEP-2876]Add reason and fieldPath into CRD validation rules (# 118041)
  > ee459cb apiextensions: validate status updates with status schema
  > ac58466 Merge pull request # 118990 from alexzielenski/apiserver/apiextensions/crd-validation-ratcheting
  > baf073b Merge pull request # 118808 from Jefftree/updated-lazy-crd-controller-v2
  > d5f0213 update go.mod
  > 886cc80 Make CRDs built and aggregated lazily for oasv2
  > 0a5e7b0 use ratcheting schema validator when feature is enabled
  > b43ae52 Merge pull request # 118204 from sttts/sttts-openapi-v2-parameter-refs
  > 102e7eb add ratcheting schema validator using new openapi hooks
  > 80047f7 Merge pull request # 119312 from pacoxu/prometheus/common-v0.44
  > 4570537 Bump kube-openapi
  > 26778c0 add CRDValidationRatcheting feature gate
  > c1d26c5 upgrade prometheus common to v0.44.0
  > df2cf11 openapi: reference shared parameters
  > 2b42730 refactor: convert slices to []interface before inserion into VendorExtensible
  > dfc8251 Merge pull request # 119330 from bertinatto/fix-conn-reuse-test
  > 61188af refactor: add ValidateCustomResourceUpdate to support future validators for CRD Updates
  > 59f9b3d Proactively bump golang.org/x/net to v0.12.0
  > d63f8bb refactor: cleanup to NewSchemaValidator takes JSONSchemaProps
  > 6542909 Merge pull request # 117108 from pohly/test-integration-race-detection-component-base-logs
  > 0e74a9b refactor: rename apiservervalidation -> apiextensionsvalidation
  > 780eaa5 component-base/logs: improve handling of re-applying a configuration
  > 3efa7e2 Merge pull request # 118689 from bzsuni/clean
  > 6bd4851 Merge pull request # 118542 from cchapla/crd_webhook_metrics_updates
  > 70978ae update prometheus/client_golang v1.14.0 to v1.16.0
  > 37caa61 Minor fix on variable
  > a1be513 Updating names from webhookconversion to conversionwebhook
  > 1db2b07 Merge pull request # 118507 from jeremyrickard/go1205
  > 5a3e605 Update vendor with hack/update-vendor.sh
  > 0f42c58 Merge pull request # 118522 from jpbetz/remove-bad-merge-key
  > 75fa40c Merge pull request # 118292 from cchapla/crd_webhook_metrics
  > 23d6cfa Remove invalid merge key
  > 00091a3 Merge pull request # 118416 from sttts/sttts-etcd-options-complete
  > dfe6aaa Changes to buckets and comments
  > 6dca9f1 Merge pull request # 117294 from humblec/open-containers
  > 4e061f6 k8s.io/apiserver: remove skewed completion from EtcdOptions
  > f0093e4 Changes to histogram buckets
  > 2221bc5 dependencies: update github.com/dustin/go-humanize v1.0.1
  > f25167f Review comments, added metric namespace, moved utility functions, and etc
  > e81b4f1 Merge pull request # 118384 from Jefftree/gnostic-models-branch
  > 76af56a Webhook conversion metrics
  > f1f58f0 Update gnostic references
  > 35ec793 vendor
  > 7a5b05c Merge pull request # 118339 from jpbetz/bump-cel-go
  > 54ebdaa Enable optionals and add tests
  > 0a0fe1f Bump cel-go to v0.16.0
  > e0b0416 Merge pull request # 118269 from liggitt/genproto
  > 6a6001e Update google.golang.org/genproto
  > 37c0f7d Merge pull request # 118240 from Jefftree/bump-kube-openapi
  > 19cc2e6 Update vendor
  > 501bf5e Merge pull request # 117985 from howardjohn/patch-2
  > 56f7e82 Merge pull request # 118014 from liggitt/mapstructure
  > c163323 codegen
  > 8ef6b90 Update kube-openapi, drop mapstructure
  > 4cbfb5b Merge pull request # 117645 from humblec/etcd-2
  > c417362 Merge pull request # 117139 from pohly/test-integration-race-detection-update-utils
  > 0c33ac7 update vendor dependencies for the change
  > 07c89d1 dependencies: bump k8s.io/utils
  > 0a0501d etcd depdencies are updated to v3.5.9
  > 98774ff Merge pull request # 117946 from lavalamp/lavalamp-taking-a-break
  > 998aac2 Merge pull request # 117961 from humblec/ginkgo
  > 147a7f8 lavalamp is taking a long break
  > 3ef489c Merge pull request # 116761 from iancoolidge/devel-cpuset-revendor
  > b19cde5 ginkgo update to v2.9.4 and gomega to 1.27.6
  > 8b066bc Merge pull request # 117899 from thockin/codegen_purge_openapi_shell_indirection
  > 581611d Update k8s.io/utils version to v0.0.0-20230313181309-38a27ef9d749
  > f259ad6 Use the same report files as before
  > 962290e Simpler openapi gen - subprojects do themselves
  > 255d31a Codegen: subprojects openapi
  > cb87797 Merge pull request # 117262 from thockin/codegen_new_script_for_subprojects
  > 715e784 Merge pull request # 116779 from jpbetz/cel-ratcheting
  > eb41537 Convert apiextensions-apiserver to new codegen
  > eb8d569 Merge pull request # 117342 from kkkkun/update-etcd-deps-3.5.8
  > da3e785 Introduce CEL EnvSets for managing safe rollout of new CEL features, libraries and expression variables
  > 398f193 Merge pull request # 117618 from jpbetz/jpbetz-apiextensions-approver
  > 859639b upgrade etcd deps to v3.5.8
  > 4f2e374 Merge pull request # 117687 from pohly/klog-update
  > 6d99cd0 Add jpbetz to approvers of apiextensions-apiserver
  > 62e755e Merge pull request # 117328 from humblec/uber
  > 55f73a5 dependencies: klog v2.100.1
  > 612a866 Merge pull request # 117090 from jpmcb/cobra-1.7.0
  > 135a00c dependencies: go.uber.org/atomic v1.10.0
  > 7c4a384 Upgrades spf13/cobra to 1.7.0
  > 83c9eef dependencies: update go.uber.org/multierr v1.11.0
  > 9cb4c2d Merge pull request # 114998 from alexzielenski/apiserver/smd/ssa-conditionals
  > 1997a38 include apiextensions types in apiextensions generated openapi
  > 5739196 fix scale not being given gvk in CRD openapi schemas
  > 72bab68 add OpenAPIV3 config to tests and server options
  > 061320c Merge pull request # 117705 from Jefftree/update-openapi-fix-race
  > 2312078 Update kube-openapi to fix race
  > 18409ed Merge pull request # 117253 from akhilerm/update-containerd-dependencies
  > f727663 Merge pull request # 117349 from mohitsharma-in/deps-update/natefinch-lumberjack.v2
  > e5cc1de chore: update cgroups and ttrpc versions
  > f0da574 Merge pull request # 117593 from jpbetz/test-join
  > 5f8077d update Deps gopkg.in/natefinch/lumberjack.v2 v2.0.0 to v2.2.1
  > 5dcc218 Merge pull request # 117350 from mohitsharma-in/update/google-golang-protobuf
  > caad8b0 Fix bug where CEL listOfString.join() results in unexpected error
  > 96f3820 Merge pull request # 117483 from ArkaSaha30/bump-gofuzz
  > b9b41db Dependencies Update google.golang.org/protobuf v1.28.1 to v1.30.0
  > cf8773d Merge pull request # 117482 from ArkaSaha30/bump-go-logr
  > 9c80e35 update gofuzz dependency
  > 6a79604 Merge pull request # 117352 from mohitsharma-in/update/golang_x_tools
  > 5e0eadb Update go-logr dependencies
  > 699c276 Dependencies Update golang.org/x/tools v0.7.0 to v0.8.0
  > 2522eac Merge pull request # 117408 from cenkalti/backoff
  > 7dbdc67 Merge pull request # 117399 from mohitsharma-in/update/golang_time
  > 10c705f dependencies: update github.com/cenkalti/backoff/v4 to v4.2.1
  > 8c56b90 Dependencies Update golang.org/x/timet 90d013bbcef8  to v0.3.0
  > e79b4a0 Merge pull request # 117275 from akhilerm/update-coreos-deps
  > dd0f73c dependencies: update go-semver to v0.3.1
  > 98a31a3 dependencies: update go-systemd to v22.5.0
  > c91de0d Merge pull request # 117285 from humblec/azure-go-autorest
  > 1b32f81 dependencies: update gh/Azure/auto-test/{adal,validation}
  > b1c69c0 Merge pull request # 117301 from ncdc/revert-conversion-refactoring
  > 15521be Revert "Merge pull request # 113151 from ncdc/refactor-crd-conversion"
  > d5a5c83 Revert "CR conversion: protect from converter input edits"
  > 56acced Merge pull request # 116896 from thockin/apimachinery_util_diff_cleanup
  > c6d3475 Replace uses of ObjectReflectDiff with cmp.Diff
  > 7ab76f1 Replace uses of ObjectGoPrintDiff with cmp.Diff
  > 4e0e033 Replace apimachinery diff.StringDiff with cmp
  > 176c9c5 Merge pull request # 116348 from jkh52/knp-0.1.2
  > 69191a2 Merge pull request # 116280 from thockin/codegen_no_internal_subprojects
  > 1b79e64 Bump Konnectivity to 0.1.2
  > 0e0d0ca Codegen subprojects: regen defaulters when needed
  > be7cf1c Codegen subprojects: reify 'all' into explicit
  > 6961dc4 Merge pull request # 116948 from MadhavJivrajani/fix-verify-vendor
  > 70ef815 .*: update vendor dir and cleanup
  > 51f7c2f Merge pull request # 116770 from alexzielenski/agg-discovery-err-sources
  > adddae7 allow multiple sources to add/remove from discovery without clobbering each other
bumping github.com/prometheus/client_model 9a2bf30...1c92cad:
  > 1c92cad Merge pull request # 72 from prometheus/repo_sync
  > cbe84de Merge pull request # 71 from prometheus/superq/update_build
  > 58db340 Update common Prometheus files
  > baaa038 Merge pull request # 70 from prometheus/repo_sync
  > 568c466 Update Go
  > 7b38251 Merge pull request # 69 from prometheus/repo_sync
  > 185bafb Update common Prometheus files
  > cfbcccb Merge pull request # 67 from prometheus/beorn7/histogram
  > 4dbcf4e Update common Prometheus files
  > 95a0733 Merge pull request # 66 from ArthurSens/created-timestamp
  > 2a367b9 histogram: Add a doc comment about when to add a no-op span
bumping knative.dev/serving 7744087...1c46c07:
  > 1c46c07 upgrade to latest dependencies (# 14828)
  > 7509f7d Update net-contour nightly (# 14827)
  > f234d38 Update net-istio nightly (# 14826)
  > 40b0628 Update net-kourier nightly (# 14824)
  > 785b9d3 Update net-gateway-api nightly (# 14823)
  > 232cfd8 Update net-certmanager nightly (# 14825)
  > 99ea2f3 Update community files (# 14822)
  > bbabe5b upgrade to latest dependencies (# 14816)
  > 4928853 Create less load for TestActivatorChainHandlerWithFullDuplex (# 14820)
  > 4546f92 Update net-gateway-api nightly (# 14812)
  > 5d9bcf6 Update net-istio nightly (# 14817)
  > 9acc277 Update net-kourier nightly (# 14813)
  > 85efcf7 Update net-contour nightly (# 14814)
  > 1fcbcc6 Update net-certmanager nightly (# 14811)
  > 906d309 Update community files (# 14810)
  > bacd818 take 2: set appProtocol on services (# 14809)
  > 45ddf2d bump istio for gateway api tests (# 14808)
  > 9a867e1 Update net-gateway-api nightly (# 14789)
  > ad0a989 Revert "Set AppProtocol on Services (# 14757)" (# 14805)
  > 03b32e2 Update net-kourier nightly (# 14798)
  > 34da024 Support http1 full duplex per workload (# 14568)
  > e9a751f Update net-contour nightly (# 14801)
  > d96dabb Update net-istio nightly (# 14800)
  > 5ec6f8c Update net-certmanager nightly (# 14799)
  > aa3fb05 upgrade to latest dependencies (# 14796)
  > 3cea8b4 [Automated] Update net-istio nightly (# 14706)
  > 752314e Improve log output for Services with no LoadBalancer IP (# 14788)
  > 564e72b upgrade to latest dependencies (# 14794)
  > 1e2faa3 Remove /tmp and /var from reserved paths (# 14686) (# 14719)
  > 05b640b Update net-contour nightly (# 14792)
  > 9e0229c Update net-certmanager nightly (# 14791)
  > 921028c Update net-kourier nightly (# 14790)
  > 8162fe2 Add Vegeta rates / targets to SLA in performance tests (# 14429)
  > e5602d7 Set AppProtocol on Services (# 14757)
  > 52b3d0c Update net-gateway-api nightly (# 14785)
  > 45db086 Update net-contour nightly (# 14784)
  > 996d5b6 upgrade to latest dependencies (# 14783)
  > 7feb050 Add defaulting for all GRPC probe types (# 14773)
  > 85d698e Update net-kourier nightly (# 14781)
  > 7a9c068 Update net-contour nightly (# 14779)
  > 6a8bcf3 Update net-certmanager nightly (# 14780)
  > 842c0d9 upgrade to latest dependencies (# 14778)
  > d4807eb Update community files (# 14777)
  > 499dc1d Replaces all deprecated usages of sets with new generic versions (# 14765)
  > 0a291d6 Update net-certmanager nightly (# 14772)
  > b6552f9 Update net-kourier nightly (# 14768)
  > 5c02409 Update net-contour nightly (# 14769)
  > 32fde8b Update net-gateway-api nightly (# 14767)
  > 8028e2e Update net-certmanager nightly (# 14766)
  > 1962f25 upgrade to latest dependencies (# 14761)
  > cc1df09 Update community files (# 14760)
  > 328d03b Update net-kourier nightly (# 14759)
  > cc18593 Update net-certmanager nightly (# 14758)
  > 4697ed1 upgrade to latest dependencies (# 14756)
  > 8b13398 Update net-gateway-api nightly (# 14755)
  > afc93b9 Update net-contour nightly (# 14754)
  > 81149da Update net-kourier nightly (# 14743)
  > 0c6dc59 Update net-contour nightly (# 14742)
  > cc0359d Update net-certmanager nightly (# 14741)
  > 6291578 Update net-kourier nightly (# 14736)
  > cc15a56 upgrade to latest dependencies (# 14738)
  > df85272 Update net-certmanager nightly (# 14735)
  > 11c5aa9 upgrade to latest dependencies (# 14731)
  > a826aa4 Update net-certmanager nightly (# 14729)
  > 8f33f44 Update net-kourier nightly (# 14728)
  > fc479ae upgrade to latest dependencies (# 14726)
  > bab7163 Update net-kourier nightly (# 14725)
  > 3717446 upgrade to latest dependencies (# 14722)
  > 514e466 Update net-gateway-api nightly (# 14714)
  > f695a0b Update net-contour nightly (# 14716)
  > 1637bfc Update net-certmanager nightly (# 14715)
  > 2c93fee Update net-kourier nightly (# 14712)
  > 49d1226 Update net-certmanager nightly (# 14708)
  > aed404b Update net-kourier nightly (# 14707)
  > 2a873fc upgrade to latest dependencies (# 14704)
  > 4d0fdd1 Update net-kourier nightly (# 14702)
bumping k8s.io/klog/v2 d7fc505...6ded808:
  > 6ded808 Merge pull request # 375 from pohly/json-fallback
  > 77b73d5 Merge pull request # 374 from pohly/format
  > d731661 formatting: replace Sprintf("%+v") with JSON
  > 2fdc76f Merge pull request # 369 from mikedanese/stdlog
  > deffe14 add Format wrapper
  > 6bb2990 test: add more test cases for map, slice, struct
  > 4de3d37 expose logBridge via NewStandardLog()

Signed-off-by: Knative Automation <automation@knative.team>

* Fix codegen

---------

Signed-off-by: Knative Automation <automation@knative.team>
Co-authored-by: David Simansky <dsimansk@redhat.com>
This commit is contained in:
Knative Automation 2024-01-24 08:51:12 -05:00 committed by GitHub
parent 3c1b9c0412
commit 543522a33e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
603 changed files with 74554 additions and 52616 deletions

85
go.mod
View File

@ -12,24 +12,24 @@ require (
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.16.0
golang.org/x/mod v0.14.0
golang.org/x/term v0.15.0
golang.org/x/term v0.16.0
gotest.tools/v3 v3.3.0
k8s.io/api v0.27.6
k8s.io/apiextensions-apiserver v0.27.6
k8s.io/apimachinery v0.27.6
k8s.io/cli-runtime v0.26.5
k8s.io/client-go v0.27.6
k8s.io/code-generator v0.27.6
knative.dev/client-pkg v0.0.0-20231201015203-44c94192e5cf
knative.dev/eventing v0.39.1-0.20231201104147-3fcc78a3d716
knative.dev/hack v0.0.0-20231201014241-7030d5bf584d
knative.dev/networking v0.0.0-20231201014832-2274702ccc5f
knative.dev/pkg v0.0.0-20231204120332-9386ad6703ee
knative.dev/serving v0.39.1-0.20231204120610-774408705c3f
k8s.io/api v0.28.5
k8s.io/apiextensions-apiserver v0.28.5
k8s.io/apimachinery v0.28.5
k8s.io/cli-runtime v0.28.5
k8s.io/client-go v0.28.5
k8s.io/code-generator v0.28.5
knative.dev/client-pkg v0.0.0-20240124090003-67fca0ca8681
knative.dev/eventing v0.40.0
knative.dev/hack v0.0.0-20240123162936-f3f03ac0ab1a
knative.dev/networking v0.0.0-20240116081125-ce0738abf051
knative.dev/pkg v0.0.0-20240116073220-b488e7be5902
knative.dev/serving v0.40.0
sigs.k8s.io/yaml v1.4.0
)
require k8s.io/utils v0.0.0-20230209194617-a36077c30491
require k8s.io/utils v0.0.0-20230406110748-d93618cff8a2
require (
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
@ -45,24 +45,24 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.7.0 // indirect
github.com/evanphx/json-patch/v5 v5.8.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.1 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/gnostic v0.6.9 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-containerregistry v0.13.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.4.0 // indirect
github.com/google/uuid v1.5.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
github.com/imdario/mergo v0.3.13 // indirect
@ -73,7 +73,6 @@ require (
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
@ -84,10 +83,10 @@ require (
github.com/pelletier/go-toml/v2 v2.0.9 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.17.0 // indirect
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/prometheus/client_golang v1.18.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.46.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/statsd_exporter v0.22.8 // indirect
github.com/rickb777/date v1.20.0 // indirect
github.com/rickb777/plural v1.4.1 // indirect
@ -97,36 +96,36 @@ require (
github.com/spf13/cast v1.5.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.starlark.net v0.0.0-20220817180228-f738f5508c12 // indirect
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/oauth2 v0.15.0 // indirect
golang.org/x/sync v0.5.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.16.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.16.0 // indirect
golang.org/x/tools v0.17.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.152.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect
google.golang.org/grpc v1.59.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
google.golang.org/api v0.155.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect
google.golang.org/grpc v1.60.1 // indirect
google.golang.org/protobuf v1.32.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.12.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)

219
go.sum
View File

@ -42,7 +42,6 @@ contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@ -58,11 +57,9 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE=
github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
@ -78,14 +75,14 @@ github.com/cloudevents/sdk-go/v2 v2.13.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUE
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo=
github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@ -93,13 +90,11 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc=
github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro=
github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
@ -110,6 +105,8 @@ github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3Bop
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
@ -123,12 +120,12 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@ -174,8 +171,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0=
github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -216,17 +213,20 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-retryablehttp v0.6.7 h1:8/CAEZt/+F7kR7GevNHulKkUjLht3CPmn7egmhieNKo=
github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
@ -275,8 +275,6 @@ github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3v
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@ -320,30 +318,30 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI=
github.com/prometheus/statsd_exporter v0.22.8 h1:Qo2D9ZzaQG+id9i5NYNGmbf1aa/KxKbB9aKfMS+Yib0=
github.com/prometheus/statsd_exporter v0.22.8/go.mod h1:/DzwbTEaFTE0Ojz5PqcSk6+PFHOPWGxdXVr6yC8eFOM=
@ -355,8 +353,8 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
@ -364,7 +362,6 @@ github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNX
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
@ -378,7 +375,6 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@ -400,15 +396,13 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk=
github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@ -418,17 +412,16 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.starlark.net v0.0.0-20220817180228-f738f5508c12 h1:xOBJXWGEDwU5xSDxH6macxO11Us0AH2fTa9rmsbbF7g=
go.starlark.net v0.0.0-20220817180228-f738f5508c12/go.mod h1:VZcBMdr3cT3PnBoWunTabuSEXwVAH+ZJ5zxfs3AdASk=
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ=
go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
@ -439,7 +432,10 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -473,6 +469,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -510,14 +507,13 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -529,8 +525,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -542,8 +538,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -583,33 +580,33 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -667,8 +664,9 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -695,16 +693,17 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.152.0 h1:t0r1vPnfMc260S2Ci+en7kfCZaLOPs5KI0sVV/6jZrY=
google.golang.org/api v0.152.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA=
google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -743,13 +742,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY=
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic=
google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3 h1:EWIeHfGuUf00zrVZGEgYFxok7plSAXBGcH7NNdMAWvA=
google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -763,14 +761,11 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -783,11 +778,10 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -809,7 +803,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@ -822,48 +815,48 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.27.6 h1:PBWu/lywJe2qQcshMjubzcBg7+XDZOo7O8JJAWuYtUo=
k8s.io/api v0.27.6/go.mod h1:AQYj0UsFCp3qJE7bOVnUuy4orCsXVkvHefnbYQiNWgk=
k8s.io/apiextensions-apiserver v0.27.6 h1:mOwSBJtThZhpJr+8gEkc3wFDIjq87E3JspR5mtZxIg8=
k8s.io/apiextensions-apiserver v0.27.6/go.mod h1:AVNlLYRrESG5Poo6ASRUhY2pvoKPcNt8y/IuZ4lx3o8=
k8s.io/apimachinery v0.27.6 h1:mGU8jmBq5o8mWBov+mLjdTBcU+etTE19waies4AQ6NE=
k8s.io/apimachinery v0.27.6/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
k8s.io/cli-runtime v0.26.5 h1:1YTQt6cWaiyA+6NptNMVqkGkh+BFN9cG+PESgz24//U=
k8s.io/cli-runtime v0.26.5/go.mod h1:iZMA8+AVNSownXlJ1h64s59C5/oHSA6hGBarfHjRDl8=
k8s.io/client-go v0.27.6 h1:vzI8804gpUtpMCNaFjIFyJrifH7u//LJCJPy8fQuYQg=
k8s.io/client-go v0.27.6/go.mod h1:PMsXcDKiJTW7PHJ64oEsIUJF319wm+EFlCj76oE5QXM=
k8s.io/code-generator v0.27.6 h1:1zkSDvylcA11s91aYg5U7fZ24EXMZ+KIDOj/Z3Ti4c8=
k8s.io/code-generator v0.27.6/go.mod h1:DPung1sI5vBgn4AGKtlPRQAyagj/ir/4jI55ipZHVww=
k8s.io/api v0.28.5 h1:XIPNr3nBgTEaCdEiwZ+dXaO9SB4NeTOZ2pNDRrFgfb4=
k8s.io/api v0.28.5/go.mod h1:98zkTCc60iSnqqCIyCB1GI7PYDiRDYTSfL0PRIxpM4c=
k8s.io/apiextensions-apiserver v0.28.5 h1:YKW9O9T/0Gkyl6LTFDLIhCbouSRh+pHt2vMLB38Snfc=
k8s.io/apiextensions-apiserver v0.28.5/go.mod h1:7p7TQ0X9zCJLNFlOTi5dncAi2dkPsdsrcvu5ILa7PEk=
k8s.io/apimachinery v0.28.5 h1:EEj2q1qdTcv2p5wl88KavAn3VlFRjREgRu8Sm/EuMPY=
k8s.io/apimachinery v0.28.5/go.mod h1:wI37ncBvfAoswfq626yPTe6Bz1c22L7uaJ8dho83mgg=
k8s.io/cli-runtime v0.28.5 h1:xTL2Zpx//2+mKysdDUogpY0qwYf5Qkuij3Ikmr6xh5Q=
k8s.io/cli-runtime v0.28.5/go.mod h1:FZZy7DAfum2co5rjGMM86sumPojroT3V06mP45erB/0=
k8s.io/client-go v0.28.5 h1:6UNmc33vuJhh3+SAOEKku3QnKa+DtPKGnhO2MR0IEbk=
k8s.io/client-go v0.28.5/go.mod h1:+pt086yx1i0HAlHzM9S+RZQDqdlzuXFl4hY01uhpcpA=
k8s.io/code-generator v0.28.5 h1:6LXs+I/LOMGNLVI7z8xImLjI98o9vcwiHiQY6PyqpmU=
k8s.io/code-generator v0.28.5/go.mod h1:OQAfl6bZikQ/tK6faJ18Vyzo54rUII2NmjurHyiN1g4=
k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 h1:iu3o/SxaHVI7tKPtkGzD3M9IzrE21j+CUKH98NQJ8Ms=
k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY=
k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/client-pkg v0.0.0-20231201015203-44c94192e5cf h1:wHLl5DdPfLjAfdyMP0n2T6o5+fPQ2cWOaUWcjNk8y54=
knative.dev/client-pkg v0.0.0-20231201015203-44c94192e5cf/go.mod h1:clbAkxi9x0pUNk0Vjd4NOF9yAHrBNGA29ZDaqwk8MrQ=
knative.dev/eventing v0.39.1-0.20231201104147-3fcc78a3d716 h1:yRbcddcU0Th2S7omozBqILDOGfefM8yAHF4K6vzOkY4=
knative.dev/eventing v0.39.1-0.20231201104147-3fcc78a3d716/go.mod h1:hibqweYk29xszfCrMZLYllRqsJJUl4K6pLBB1P/s5Bo=
knative.dev/hack v0.0.0-20231201014241-7030d5bf584d h1:IqXY770znXS9tLJDEh+OUcLMgtIFslSxqao3uplpUlY=
knative.dev/hack v0.0.0-20231201014241-7030d5bf584d/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/networking v0.0.0-20231201014832-2274702ccc5f h1:8NN+0CcrZiIIJIdVyHrN1bsGaGVwHnyakaFcM7ZebJo=
knative.dev/networking v0.0.0-20231201014832-2274702ccc5f/go.mod h1:ESsvanBOBBkDxO1qa9mlBhV5yoBH1svcpy/dVmBNdJw=
knative.dev/pkg v0.0.0-20231204120332-9386ad6703ee h1:O1bJlEC4pzAEyTt8+f0Qe50QqS2JJxhK269CAGZ68vg=
knative.dev/pkg v0.0.0-20231204120332-9386ad6703ee/go.mod h1:aJX49KSaKufMCwJgrCbHxXLTQ/j6LGspSZxn9VIv51w=
knative.dev/serving v0.39.1-0.20231204120610-774408705c3f h1:toTPD7gly79U02vvgi9NT7KcmKkMJAsit0sTyfMao0o=
knative.dev/serving v0.39.1-0.20231204120610-774408705c3f/go.mod h1:n2BNbdMBl00cDYFTd//rt7hExJkTHTMaBx2MJedkDG4=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/client-pkg v0.0.0-20240124090003-67fca0ca8681 h1:vlXHcYG/rayUB1MGUAnpu5eLzMKuqEQS2Q5m7Z9WyKs=
knative.dev/client-pkg v0.0.0-20240124090003-67fca0ca8681/go.mod h1:3+IED1Q8eP1BwUq0cTxyznhTVrROzkGZcJ68X6KS8NA=
knative.dev/eventing v0.40.0 h1:zvMeKGBdQ5Us94Hdy7jmxpzyc1fdFnO4SS21+6nDSiU=
knative.dev/eventing v0.40.0/go.mod h1:+yUUIyvX9fn9bCSH3012kc8rG7YBbjvvxwy1Kr53dRc=
knative.dev/hack v0.0.0-20240123162936-f3f03ac0ab1a h1:+4Mdk0Lt3LGAVEI6vYyhfjBlVBx7sqS4wECtTkuXoSY=
knative.dev/hack v0.0.0-20240123162936-f3f03ac0ab1a/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/networking v0.0.0-20240116081125-ce0738abf051 h1:bTRVfwmfu4/7U1YBcgBl1VANAwmal6zkoAI9p7PQwDY=
knative.dev/networking v0.0.0-20240116081125-ce0738abf051/go.mod h1:rdzGL1OVP6VItEiJUN/FTCrDnIzkA6ykhSvaK+0Ne6o=
knative.dev/pkg v0.0.0-20240116073220-b488e7be5902 h1:H6+JJN23fhwYWCHY1339sY6uhIyoUwDy1a8dN233fdk=
knative.dev/pkg v0.0.0-20240116073220-b488e7be5902/go.mod h1:NYk8mMYoLkO7CQWnNkti4YGGnvLxN6MIDbUvtgeo0C0=
knative.dev/serving v0.40.0 h1:feTBe+6J/woNbPu8pv5AEfaumOZSrSBuIubkPTjxCzo=
knative.dev/serving v0.40.0/go.mod h1:Ory3XczDB8b1lH757CSdeDeouY3LHzSamX8IjmStuoU=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM=
sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s=
sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk=
sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4=
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY=
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U=
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@ -34,7 +34,6 @@ group "Kubernetes Codegen"
"${CODEGEN_PKG}"/generate-groups.sh "deepcopy" \
knative.dev/client/pkg/apis/client/v1alpha1/generated knative.dev/client/pkg/apis \
client:v1alpha1 \
--output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." \
--go-header-file "${REPO_ROOT_DIR}"/hack/boilerplate.go.txt
group "Update deps post-codegen"

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1 +0,0 @@
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,141 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"unicode/utf8"
)
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// - S maps to s and to U+017F 'ſ' Latin small letter long s
// - k maps to K and to U+212A '' Kelvin sign
//
// See https://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
return len(t) == 0
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}

View File

@ -0,0 +1,42 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gofuzz
package json
import (
"fmt"
)
func Fuzz(data []byte) (score int) {
for _, ctor := range []func() any{
func() any { return new(any) },
func() any { return new(map[string]any) },
func() any { return new([]any) },
} {
v := ctor()
err := Unmarshal(data, v)
if err != nil {
continue
}
score = 1
m, err := Marshal(v)
if err != nil {
fmt.Printf("v=%#v\n", v)
panic(err)
}
u := ctor()
err = Unmarshal(m, u)
if err != nil {
fmt.Printf("v=%#v\n", v)
fmt.Printf("m=%s\n", m)
panic(err)
}
}
return
}

View File

@ -0,0 +1,143 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
)
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
origLen := dst.Len()
scan := newScanner()
defer freeScanner(scan)
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if escape && c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
v := scan.step(scan, c)
if v >= scanSkipSpace {
if v == scanError {
break
}
if start < i {
dst.Write(src[start:i])
}
start = i + 1
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
if start < len(src) {
dst.Write(src[start:])
}
return nil
}
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
dst.WriteByte('\n')
dst.WriteString(prefix)
for i := 0; i < depth; i++ {
dst.WriteString(indent)
}
}
// Indent appends to dst an indented form of the JSON-encoded src.
// Each element in a JSON object or array begins on a new,
// indented line beginning with prefix followed by one or more
// copies of indent according to the indentation nesting.
// The data appended to dst does not begin with the prefix nor
// any indentation, to make it easier to embed inside other formatted JSON data.
// Although leading space characters (space, tab, carriage return, newline)
// at the beginning of src are dropped, trailing space characters
// at the end of src are preserved and copied to dst.
// For example, if src has no trailing spaces, neither will dst;
// if src ends in a trailing newline, so will dst.
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
origLen := dst.Len()
scan := newScanner()
defer freeScanner(scan)
needIndent := false
depth := 0
for _, c := range src {
scan.bytes++
v := scan.step(scan, c)
if v == scanSkipSpace {
continue
}
if v == scanError {
break
}
if needIndent && v != scanEndObject && v != scanEndArray {
needIndent = false
depth++
newline(dst, prefix, indent, depth)
}
// Emit semantically uninteresting bytes
// (in particular, punctuation in strings) unmodified.
if v == scanContinue {
dst.WriteByte(c)
continue
}
// Add spacing around real punctuation.
switch c {
case '{', '[':
// delay indent so that empty object and array are formatted as {} and [].
needIndent = true
dst.WriteByte(c)
case ',':
dst.WriteByte(c)
newline(dst, prefix, indent, depth)
case ':':
dst.WriteByte(c)
dst.WriteByte(' ')
case '}', ']':
if needIndent {
// suppress indent in empty object/array
needIndent = false
} else {
depth--
newline(dst, prefix, indent, depth)
}
dst.WriteByte(c)
default:
dst.WriteByte(c)
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
return nil
}

View File

@ -0,0 +1,610 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
// JSON value parser state machine.
// Just about at the limit of what is reasonable to write by hand.
// Some parts are a bit tedious, but overall it nicely factors out the
// otherwise common code from the multiple scanning functions
// in this package (Compact, Indent, checkValid, etc).
//
// This file starts with two simple examples using the scanner
// before diving into the scanner itself.
import (
"strconv"
"sync"
)
// Valid reports whether data is a valid JSON encoding.
func Valid(data []byte) bool {
scan := newScanner()
defer freeScanner(scan)
return checkValid(data, scan) == nil
}
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
// checkValid returns nil or a SyntaxError.
func checkValid(data []byte, scan *scanner) error {
scan.reset()
for _, c := range data {
scan.bytes++
if scan.step(scan, c) == scanError {
return scan.err
}
}
if scan.eof() == scanError {
return scan.err
}
return nil
}
// A SyntaxError is a description of a JSON syntax error.
// Unmarshal will return a SyntaxError if the JSON can't be parsed.
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
// A scanner is a JSON scanning state machine.
// Callers call scan.reset and then pass bytes in one at a time
// by calling scan.step(&scan, c) for each byte.
// The return value, referred to as an opcode, tells the
// caller about significant parsing events like beginning
// and ending literals, objects, and arrays, so that the
// caller can follow along if it wishes.
// The return value scanEnd indicates that a single top-level
// JSON value has been completed, *before* the byte that
// just got passed in. (The indication must be delayed in order
// to recognize the end of numbers: is 123 a whole value or
// the beginning of 12345e+6?).
type scanner struct {
// The step is a func to be called to execute the next transition.
// Also tried using an integer constant and a single func
// with a switch, but using the func directly was 10% faster
// on a 64-bit Mac Mini, and it's nicer to read.
step func(*scanner, byte) int
// Reached end of top-level value.
endTop bool
// Stack of what we're in the middle of - array values, object keys, object values.
parseState []int
// Error that happened, if any.
err error
// total bytes consumed, updated by decoder.Decode (and deliberately
// not set to zero by scan.reset)
bytes int64
}
var scannerPool = sync.Pool{
New: func() any {
return &scanner{}
},
}
func newScanner() *scanner {
scan := scannerPool.Get().(*scanner)
// scan.reset by design doesn't set bytes to zero
scan.bytes = 0
scan.reset()
return scan
}
func freeScanner(scan *scanner) {
// Avoid hanging on to too much memory in extreme cases.
if len(scan.parseState) > 1024 {
scan.parseState = nil
}
scannerPool.Put(scan)
}
// These values are returned by the state transition functions
// assigned to scanner.state and the method scanner.eof.
// They give details about the current state of the scan that
// callers might be interested to know about.
// It is okay to ignore the return value of any particular
// call to scanner.state: if one call returns scanError,
// every subsequent call will return scanError too.
const (
// Continue.
scanContinue = iota // uninteresting byte
scanBeginLiteral // end implied by next result != scanContinue
scanBeginObject // begin object
scanObjectKey // just finished object key (string)
scanObjectValue // just finished non-last object value
scanEndObject // end object (implies scanObjectValue if possible)
scanBeginArray // begin array
scanArrayValue // just finished array value
scanEndArray // end array (implies scanArrayValue if possible)
scanSkipSpace // space byte; can skip; known to be last "continue" result
// Stop.
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
scanError // hit an error, scanner.err.
)
// These values are stored in the parseState stack.
// They give the current state of a composite value
// being scanned. If the parser is inside a nested value
// the parseState describes the nested state, outermost at entry 0.
const (
parseObjectKey = iota // parsing object key (before colon)
parseObjectValue // parsing object value (after colon)
parseArrayValue // parsing array value
)
// This limits the max nesting depth to prevent stack overflow.
// This is permitted by https://tools.ietf.org/html/rfc7159#section-9
const maxNestingDepth = 10000
// reset prepares the scanner for use.
// It must be called before calling s.step.
func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
s.endTop = false
}
// eof tells the scanner that the end of input has been reached.
// It returns a scan status just as s.step does.
func (s *scanner) eof() int {
if s.err != nil {
return scanError
}
if s.endTop {
return scanEnd
}
s.step(s, ' ')
if s.endTop {
return scanEnd
}
if s.err == nil {
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
}
return scanError
}
// pushParseState pushes a new parse state p onto the parse stack.
// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
func (s *scanner) pushParseState(c byte, newParseState int, successState int) int {
s.parseState = append(s.parseState, newParseState)
if len(s.parseState) <= maxNestingDepth {
return successState
}
return s.error(c, "exceeded max depth")
}
// popParseState pops a parse state (already obtained) off the stack
// and updates s.step accordingly.
func (s *scanner) popParseState() {
n := len(s.parseState) - 1
s.parseState = s.parseState[0:n]
if n == 0 {
s.step = stateEndTop
s.endTop = true
} else {
s.step = stateEndValue
}
}
func isSpace(c byte) bool {
return c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
}
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
if c == ']' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
switch c {
case '{':
s.step = stateBeginStringOrEmpty
return s.pushParseState(c, parseObjectKey, scanBeginObject)
case '[':
s.step = stateBeginValueOrEmpty
return s.pushParseState(c, parseArrayValue, scanBeginArray)
case '"':
s.step = stateInString
return scanBeginLiteral
case '-':
s.step = stateNeg
return scanBeginLiteral
case '0': // beginning of 0.123
s.step = state0
return scanBeginLiteral
case 't': // beginning of true
s.step = stateT
return scanBeginLiteral
case 'f': // beginning of false
s.step = stateF
return scanBeginLiteral
case 'n': // beginning of null
s.step = stateN
return scanBeginLiteral
}
if '1' <= c && c <= '9' { // beginning of 1234.5
s.step = state1
return scanBeginLiteral
}
return s.error(c, "looking for beginning of value")
}
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
if c == '}' {
n := len(s.parseState)
s.parseState[n-1] = parseObjectValue
return stateEndValue(s, c)
}
return stateBeginString(s, c)
}
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c byte) int {
if isSpace(c) {
return scanSkipSpace
}
if c == '"' {
s.step = stateInString
return scanBeginLiteral
}
return s.error(c, "looking for beginning of object key string")
}
// stateEndValue is the state after completing a value,
// such as after reading `{}` or `true` or `["x"`.
func stateEndValue(s *scanner, c byte) int {
n := len(s.parseState)
if n == 0 {
// Completed top-level before the current byte.
s.step = stateEndTop
s.endTop = true
return stateEndTop(s, c)
}
if isSpace(c) {
s.step = stateEndValue
return scanSkipSpace
}
ps := s.parseState[n-1]
switch ps {
case parseObjectKey:
if c == ':' {
s.parseState[n-1] = parseObjectValue
s.step = stateBeginValue
return scanObjectKey
}
return s.error(c, "after object key")
case parseObjectValue:
if c == ',' {
s.parseState[n-1] = parseObjectKey
s.step = stateBeginString
return scanObjectValue
}
if c == '}' {
s.popParseState()
return scanEndObject
}
return s.error(c, "after object key:value pair")
case parseArrayValue:
if c == ',' {
s.step = stateBeginValue
return scanArrayValue
}
if c == ']' {
s.popParseState()
return scanEndArray
}
return s.error(c, "after array element")
}
return s.error(c, "")
}
// stateEndTop is the state after finishing the top-level value,
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c byte) int {
if !isSpace(c) {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
return scanEnd
}
// stateInString is the state after reading `"`.
func stateInString(s *scanner, c byte) int {
if c == '"' {
s.step = stateEndValue
return scanContinue
}
if c == '\\' {
s.step = stateInStringEsc
return scanContinue
}
if c < 0x20 {
return s.error(c, "in string literal")
}
return scanContinue
}
// stateInStringEsc is the state after reading `"\` during a quoted string.
func stateInStringEsc(s *scanner, c byte) int {
switch c {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
s.step = stateInString
return scanContinue
case 'u':
s.step = stateInStringEscU
return scanContinue
}
return s.error(c, "in string escape code")
}
// stateInStringEscU is the state after reading `"\u` during a quoted string.
func stateInStringEscU(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU1
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
func stateInStringEscU1(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU12
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
func stateInStringEscU12(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU123
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
func stateInStringEscU123(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInString
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateNeg is the state after reading `-` during a number.
func stateNeg(s *scanner, c byte) int {
if c == '0' {
s.step = state0
return scanContinue
}
if '1' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return s.error(c, "in numeric literal")
}
// state1 is the state after reading a non-zero integer during a number,
// such as after reading `1` or `100` but not `0`.
func state1(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return state0(s, c)
}
// state0 is the state after reading `0` during a number.
func state0(s *scanner, c byte) int {
if c == '.' {
s.step = stateDot
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateDot is the state after reading the integer and decimal point in a number,
// such as after reading `1.`.
func stateDot(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
return s.error(c, "after decimal point in numeric literal")
}
// stateDot0 is the state after reading the integer, decimal point, and subsequent
// digits of a number, such as after reading `3.14`.
func stateDot0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateE is the state after reading the mantissa and e in a number,
// such as after reading `314e` or `0.314e`.
func stateE(s *scanner, c byte) int {
if c == '+' || c == '-' {
s.step = stateESign
return scanContinue
}
return stateESign(s, c)
}
// stateESign is the state after reading the mantissa, e, and sign in a number,
// such as after reading `314e-` or `0.314e+`.
func stateESign(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return s.error(c, "in exponent of numeric literal")
}
// stateE0 is the state after reading the mantissa, e, optional sign,
// and at least one digit of the exponent in a number,
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
func stateE0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
return stateEndValue(s, c)
}
// stateT is the state after reading `t`.
func stateT(s *scanner, c byte) int {
if c == 'r' {
s.step = stateTr
return scanContinue
}
return s.error(c, "in literal true (expecting 'r')")
}
// stateTr is the state after reading `tr`.
func stateTr(s *scanner, c byte) int {
if c == 'u' {
s.step = stateTru
return scanContinue
}
return s.error(c, "in literal true (expecting 'u')")
}
// stateTru is the state after reading `tru`.
func stateTru(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal true (expecting 'e')")
}
// stateF is the state after reading `f`.
func stateF(s *scanner, c byte) int {
if c == 'a' {
s.step = stateFa
return scanContinue
}
return s.error(c, "in literal false (expecting 'a')")
}
// stateFa is the state after reading `fa`.
func stateFa(s *scanner, c byte) int {
if c == 'l' {
s.step = stateFal
return scanContinue
}
return s.error(c, "in literal false (expecting 'l')")
}
// stateFal is the state after reading `fal`.
func stateFal(s *scanner, c byte) int {
if c == 's' {
s.step = stateFals
return scanContinue
}
return s.error(c, "in literal false (expecting 's')")
}
// stateFals is the state after reading `fals`.
func stateFals(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal false (expecting 'e')")
}
// stateN is the state after reading `n`.
func stateN(s *scanner, c byte) int {
if c == 'u' {
s.step = stateNu
return scanContinue
}
return s.error(c, "in literal null (expecting 'u')")
}
// stateNu is the state after reading `nu`.
func stateNu(s *scanner, c byte) int {
if c == 'l' {
s.step = stateNul
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateNul is the state after reading `nul`.
func stateNul(s *scanner, c byte) int {
if c == 'l' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
func stateError(s *scanner, c byte) int {
return scanError
}
// error records an error and switches to the error state.
func (s *scanner) error(c byte, context string) int {
s.step = stateError
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
return scanError
}
// quoteChar formats c as a quoted character literal.
func quoteChar(c byte) string {
// special cases - different from quoted strings
if c == '\'' {
return `'\''`
}
if c == '"' {
return `'"'`
}
// use quoted string with different quotation marks
s := strconv.Quote(string(c))
return "'" + s[1:len(s)-1] + "'"
}

View File

@ -0,0 +1,515 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"errors"
"io"
)
// A Decoder reads and decodes JSON values from an input stream.
type Decoder struct {
r io.Reader
buf []byte
d decodeState
scanp int // start of unread data in buf
scanned int64 // amount of data already scanned
scan scanner
err error
tokenState int
tokenStack []int
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may
// read data from r beyond the JSON values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// DisallowUnknownFields causes the Decoder to return an error when the destination
// is a struct and the input contains object keys which do not match any
// non-ignored, exported fields in the destination.
func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about
// the conversion of JSON into a Go value.
func (dec *Decoder) Decode(v any) error {
if dec.err != nil {
return dec.err
}
if err := dec.tokenPrepareForDecode(); err != nil {
return err
}
if !dec.tokenValueAllowed() {
return &SyntaxError{msg: "not at beginning of value", Offset: dec.InputOffset()}
}
// Read whole value into buffer.
n, err := dec.readValue()
if err != nil {
return err
}
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
dec.scanp += n
// Don't save err from unmarshal into dec.err:
// the connection is still usable since we read a complete JSON
// object from it before the error happened.
err = dec.d.unmarshal(v)
// fixup token streaming state
dec.tokenValueEnd()
return err
}
// Buffered returns a reader of the data remaining in the Decoder's
// buffer. The reader is valid until the next call to Decode.
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.buf[dec.scanp:])
}
// readValue reads a JSON value into dec.buf.
// It returns the length of the encoding.
func (dec *Decoder) readValue() (int, error) {
dec.scan.reset()
scanp := dec.scanp
var err error
Input:
// help the compiler see that scanp is never negative, so it can remove
// some bounds checks below.
for scanp >= 0 {
// Look in the buffer for a new value.
for ; scanp < len(dec.buf); scanp++ {
c := dec.buf[scanp]
dec.scan.bytes++
switch dec.scan.step(&dec.scan, c) {
case scanEnd:
// scanEnd is delayed one byte so we decrement
// the scanner bytes count by 1 to ensure that
// this value is correct in the next call of Decode.
dec.scan.bytes--
break Input
case scanEndObject, scanEndArray:
// scanEnd is delayed one byte.
// We might block trying to get that byte from src,
// so instead invent a space byte.
if stateEndValue(&dec.scan, ' ') == scanEnd {
scanp++
break Input
}
case scanError:
dec.err = dec.scan.err
return 0, dec.scan.err
}
}
// Did the last read have an error?
// Delayed until now to allow buffer scan.
if err != nil {
if err == io.EOF {
if dec.scan.step(&dec.scan, ' ') == scanEnd {
break Input
}
if nonSpace(dec.buf) {
err = io.ErrUnexpectedEOF
}
}
dec.err = err
return 0, err
}
n := scanp - dec.scanp
err = dec.refill()
scanp = dec.scanp + n
}
return scanp - dec.scanp, nil
}
func (dec *Decoder) refill() error {
// Make room to read more into the buffer.
// First slide down data already consumed.
if dec.scanp > 0 {
dec.scanned += int64(dec.scanp)
n := copy(dec.buf, dec.buf[dec.scanp:])
dec.buf = dec.buf[:n]
dec.scanp = 0
}
// Grow buffer if not large enough.
const minRead = 512
if cap(dec.buf)-len(dec.buf) < minRead {
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
copy(newBuf, dec.buf)
dec.buf = newBuf
}
// Read. Delay error for next iteration (after scan).
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
dec.buf = dec.buf[0 : len(dec.buf)+n]
return err
}
func nonSpace(b []byte) bool {
for _, c := range b {
if !isSpace(c) {
return true
}
}
return false
}
// An Encoder writes JSON values to an output stream.
type Encoder struct {
w io.Writer
err error
escapeHTML bool
indentBuf *bytes.Buffer
indentPrefix string
indentValue string
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w, escapeHTML: true}
}
// Encode writes the JSON encoding of v to the stream,
// followed by a newline character.
//
// See the documentation for Marshal for details about the
// conversion of Go values to JSON.
func (enc *Encoder) Encode(v any) error {
if enc.err != nil {
return enc.err
}
e := newEncodeState()
defer encodeStatePool.Put(e)
err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
if err != nil {
return err
}
// Terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
e.WriteByte('\n')
b := e.Bytes()
if enc.indentPrefix != "" || enc.indentValue != "" {
if enc.indentBuf == nil {
enc.indentBuf = new(bytes.Buffer)
}
enc.indentBuf.Reset()
err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
if err != nil {
return err
}
b = enc.indentBuf.Bytes()
}
if _, err = enc.w.Write(b); err != nil {
enc.err = err
}
return err
}
// SetIndent instructs the encoder to format each subsequent encoded
// value as if indented by the package-level function Indent(dst, src, prefix, indent).
// Calling SetIndent("", "") disables indentation.
func (enc *Encoder) SetIndent(prefix, indent string) {
enc.indentPrefix = prefix
enc.indentValue = indent
}
// SetEscapeHTML specifies whether problematic HTML characters
// should be escaped inside JSON quoted strings.
// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e
// to avoid certain safety problems that can arise when embedding JSON in HTML.
//
// In non-HTML settings where the escaping interferes with the readability
// of the output, SetEscapeHTML(false) disables this behavior.
func (enc *Encoder) SetEscapeHTML(on bool) {
enc.escapeHTML = on
}
// RawMessage is a raw encoded JSON value.
// It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage []byte
// MarshalJSON returns m as the JSON encoding of m.
func (m RawMessage) MarshalJSON() ([]byte, error) {
if m == nil {
return []byte("null"), nil
}
return m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}
var _ Marshaler = (*RawMessage)(nil)
var _ Unmarshaler = (*RawMessage)(nil)
// A Token holds a value of one of these types:
//
// Delim, for the four JSON delimiters [ ] { }
// bool, for JSON booleans
// float64, for JSON numbers
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
type Token any
const (
tokenTopValue = iota
tokenArrayStart
tokenArrayValue
tokenArrayComma
tokenObjectStart
tokenObjectKey
tokenObjectColon
tokenObjectValue
tokenObjectComma
)
// advance tokenstate from a separator state to a value state
func (dec *Decoder) tokenPrepareForDecode() error {
// Note: Not calling peek before switch, to avoid
// putting peek into the standard Decode path.
// peek is only called when using the Token API.
switch dec.tokenState {
case tokenArrayComma:
c, err := dec.peek()
if err != nil {
return err
}
if c != ',' {
return &SyntaxError{"expected comma after array element", dec.InputOffset()}
}
dec.scanp++
dec.tokenState = tokenArrayValue
case tokenObjectColon:
c, err := dec.peek()
if err != nil {
return err
}
if c != ':' {
return &SyntaxError{"expected colon after object key", dec.InputOffset()}
}
dec.scanp++
dec.tokenState = tokenObjectValue
}
return nil
}
func (dec *Decoder) tokenValueAllowed() bool {
switch dec.tokenState {
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
return true
}
return false
}
func (dec *Decoder) tokenValueEnd() {
switch dec.tokenState {
case tokenArrayStart, tokenArrayValue:
dec.tokenState = tokenArrayComma
case tokenObjectValue:
dec.tokenState = tokenObjectComma
}
}
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
type Delim rune
func (d Delim) String() string {
return string(d)
}
// Token returns the next JSON token in the input stream.
// At the end of the input stream, Token returns nil, io.EOF.
//
// Token guarantees that the delimiters [ ] { } it returns are
// properly nested and matched: if Token encounters an unexpected
// delimiter in the input, it will return an error.
//
// The input stream consists of basic JSON values—bool, string,
// number, and null—along with delimiters [ ] { } of type Delim
// to mark the start and end of arrays and objects.
// Commas and colons are elided.
func (dec *Decoder) Token() (Token, error) {
for {
c, err := dec.peek()
if err != nil {
return nil, err
}
switch c {
case '[':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenArrayStart
return Delim('['), nil
case ']':
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim(']'), nil
case '{':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenObjectStart
return Delim('{'), nil
case '}':
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim('}'), nil
case ':':
if dec.tokenState != tokenObjectColon {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = tokenObjectValue
continue
case ',':
if dec.tokenState == tokenArrayComma {
dec.scanp++
dec.tokenState = tokenArrayValue
continue
}
if dec.tokenState == tokenObjectComma {
dec.scanp++
dec.tokenState = tokenObjectKey
continue
}
return dec.tokenError(c)
case '"':
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
var x string
old := dec.tokenState
dec.tokenState = tokenTopValue
err := dec.Decode(&x)
dec.tokenState = old
if err != nil {
return nil, err
}
dec.tokenState = tokenObjectColon
return x, nil
}
fallthrough
default:
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
var x any
if err := dec.Decode(&x); err != nil {
return nil, err
}
return x, nil
}
}
}
func (dec *Decoder) tokenError(c byte) (Token, error) {
var context string
switch dec.tokenState {
case tokenTopValue:
context = " looking for beginning of value"
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
context = " looking for beginning of value"
case tokenArrayComma:
context = " after array element"
case tokenObjectKey:
context = " looking for beginning of object key string"
case tokenObjectColon:
context = " after object key"
case tokenObjectComma:
context = " after object key:value pair"
}
return nil, &SyntaxError{"invalid character " + quoteChar(c) + context, dec.InputOffset()}
}
// More reports whether there is another element in the
// current array or object being parsed.
func (dec *Decoder) More() bool {
c, err := dec.peek()
return err == nil && c != ']' && c != '}'
}
func (dec *Decoder) peek() (byte, error) {
var err error
for {
for i := dec.scanp; i < len(dec.buf); i++ {
c := dec.buf[i]
if isSpace(c) {
continue
}
dec.scanp = i
return c, nil
}
// buffer has been scanned, now report any error
if err != nil {
return 0, err
}
err = dec.refill()
}
}
// InputOffset returns the input stream byte offset of the current decoder position.
// The offset gives the location of the end of the most recently returned token
// and the beginning of the next token.
func (dec *Decoder) InputOffset() int64 {
return dec.scanned + int64(dec.scanp)
}

View File

@ -0,0 +1,218 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import "unicode/utf8"
// safeSet holds the value true if the ASCII character with the given array
// position can be represented inside a JSON string without any further
// escaping.
//
// All values are true except for the ASCII control characters (0-31), the
// double quote ("), and the backslash character ("\").
var safeSet = [utf8.RuneSelf]bool{
' ': true,
'!': true,
'"': false,
'#': true,
'$': true,
'%': true,
'&': true,
'\'': true,
'(': true,
')': true,
'*': true,
'+': true,
',': true,
'-': true,
'.': true,
'/': true,
'0': true,
'1': true,
'2': true,
'3': true,
'4': true,
'5': true,
'6': true,
'7': true,
'8': true,
'9': true,
':': true,
';': true,
'<': true,
'=': true,
'>': true,
'?': true,
'@': true,
'A': true,
'B': true,
'C': true,
'D': true,
'E': true,
'F': true,
'G': true,
'H': true,
'I': true,
'J': true,
'K': true,
'L': true,
'M': true,
'N': true,
'O': true,
'P': true,
'Q': true,
'R': true,
'S': true,
'T': true,
'U': true,
'V': true,
'W': true,
'X': true,
'Y': true,
'Z': true,
'[': true,
'\\': false,
']': true,
'^': true,
'_': true,
'`': true,
'a': true,
'b': true,
'c': true,
'd': true,
'e': true,
'f': true,
'g': true,
'h': true,
'i': true,
'j': true,
'k': true,
'l': true,
'm': true,
'n': true,
'o': true,
'p': true,
'q': true,
'r': true,
's': true,
't': true,
'u': true,
'v': true,
'w': true,
'x': true,
'y': true,
'z': true,
'{': true,
'|': true,
'}': true,
'~': true,
'\u007f': true,
}
// htmlSafeSet holds the value true if the ASCII character with the given
// array position can be safely represented inside a JSON string, embedded
// inside of HTML <script> tags, without any additional escaping.
//
// All values are true except for the ASCII control characters (0-31), the
// double quote ("), the backslash character ("\"), HTML opening and closing
// tags ("<" and ">"), and the ampersand ("&").
var htmlSafeSet = [utf8.RuneSelf]bool{
' ': true,
'!': true,
'"': false,
'#': true,
'$': true,
'%': true,
'&': false,
'\'': true,
'(': true,
')': true,
'*': true,
'+': true,
',': true,
'-': true,
'.': true,
'/': true,
'0': true,
'1': true,
'2': true,
'3': true,
'4': true,
'5': true,
'6': true,
'7': true,
'8': true,
'9': true,
':': true,
';': true,
'<': false,
'=': true,
'>': false,
'?': true,
'@': true,
'A': true,
'B': true,
'C': true,
'D': true,
'E': true,
'F': true,
'G': true,
'H': true,
'I': true,
'J': true,
'K': true,
'L': true,
'M': true,
'N': true,
'O': true,
'P': true,
'Q': true,
'R': true,
'S': true,
'T': true,
'U': true,
'V': true,
'W': true,
'X': true,
'Y': true,
'Z': true,
'[': true,
'\\': false,
']': true,
'^': true,
'_': true,
'`': true,
'a': true,
'b': true,
'c': true,
'd': true,
'e': true,
'f': true,
'g': true,
'h': true,
'i': true,
'j': true,
'k': true,
'l': true,
'm': true,
'n': true,
'o': true,
'p': true,
'q': true,
'r': true,
's': true,
't': true,
'u': true,
'v': true,
'w': true,
'x': true,
'y': true,
'z': true,
'{': true,
'|': true,
'}': true,
'~': true,
'\u007f': true,
}

View File

@ -0,0 +1,38 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"strings"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
tag, opt, _ := strings.Cut(tag, ",")
return tag, tagOptions(opt)
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var name string
name, s, _ = strings.Cut(s, ",")
if name == optionName {
return true
}
}
return false
}

View File

@ -2,9 +2,12 @@ package jsonpatch
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"reflect"
"github.com/evanphx/json-patch/v5/internal/json"
)
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
@ -88,14 +91,14 @@ func pruneDocNulls(doc *partialDoc) *partialDoc {
func pruneAryNulls(ary *partialArray) *partialArray {
newAry := []*lazyNode{}
for _, v := range *ary {
for _, v := range ary.nodes {
if v != nil {
pruneNulls(v)
}
newAry = append(newAry, v)
}
*ary = newAry
ary.nodes = newAry
return ary
}
@ -117,20 +120,28 @@ func MergePatch(docData, patchData []byte) ([]byte, error) {
}
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
if !json.Valid(docData) {
return nil, errBadJSONDoc
}
if !json.Valid(patchData) {
return nil, errBadJSONPatch
}
doc := &partialDoc{}
docErr := json.Unmarshal(docData, doc)
docErr := doc.UnmarshalJSON(docData)
patch := &partialDoc{}
patchErr := json.Unmarshal(patchData, patch)
patchErr := patch.UnmarshalJSON(patchData)
if isSyntaxError(docErr) {
return nil, errBadJSONDoc
}
if isSyntaxError(patchErr) {
return nil, errBadJSONPatch
return patchData, nil
}
if docErr == nil && doc.obj == nil {
@ -138,7 +149,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
}
if patchErr == nil && patch.obj == nil {
return nil, errBadJSONPatch
return patchData, nil
}
if docErr != nil || patchErr != nil {
@ -151,15 +162,19 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
}
} else {
patchAry := &partialArray{}
patchErr = json.Unmarshal(patchData, patchAry)
patchErr = unmarshal(patchData, &patchAry.nodes)
if patchErr != nil {
// Not an array either, a literal is the result directly.
if json.Valid(patchData) {
return patchData, nil
}
return nil, errBadJSONPatch
}
pruneAryNulls(patchAry)
out, patchErr := json.Marshal(patchAry)
out, patchErr := json.Marshal(patchAry.nodes)
if patchErr != nil {
return nil, errBadJSONPatch
@ -175,6 +190,12 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
}
func isSyntaxError(err error) bool {
if errors.Is(err, io.EOF) {
return true
}
if errors.Is(err, io.ErrUnexpectedEOF) {
return true
}
if _, ok := err.(*json.SyntaxError); ok {
return true
}
@ -227,12 +248,12 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalDoc := map[string]interface{}{}
modifiedDoc := map[string]interface{}{}
err := json.Unmarshal(originalJSON, &originalDoc)
err := unmarshal(originalJSON, &originalDoc)
if err != nil {
return nil, errBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
err = unmarshal(modifiedJSON, &modifiedDoc)
if err != nil {
return nil, errBadJSONDoc
}
@ -245,6 +266,10 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
return json.Marshal(dest)
}
func unmarshal(data []byte, into interface{}) error {
return json.UnmarshalValid(data, into)
}
// createArrayMergePatch will return an array of merge-patch documents capable
// of converting the original document to the modified document for each
// pair of JSON documents provided in the arrays.
@ -253,12 +278,12 @@ func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalDocs := []json.RawMessage{}
modifiedDocs := []json.RawMessage{}
err := json.Unmarshal(originalJSON, &originalDocs)
err := unmarshal(originalJSON, &originalDocs)
if err != nil {
return nil, errBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
err = unmarshal(modifiedJSON, &modifiedDocs)
if err != nil {
return nil, errBadJSONDoc
}
@ -314,6 +339,11 @@ func matchesValue(av, bv interface{}) bool {
if bt == at {
return true
}
case json.Number:
bt := bv.(json.Number)
if bt == at {
return true
}
case float64:
bt := bv.(float64)
if bt == at {
@ -377,7 +407,7 @@ func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
if len(dst) > 0 {
into[key] = dst
}
case string, float64, bool:
case string, float64, bool, json.Number:
if !matchesValue(av, bv) {
into[key] = bv
}

View File

@ -2,11 +2,12 @@ package jsonpatch
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
"strings"
"unicode"
"github.com/evanphx/json-patch/v5/internal/json"
"github.com/pkg/errors"
)
@ -45,7 +46,7 @@ var (
type lazyNode struct {
raw *json.RawMessage
doc *partialDoc
ary partialArray
ary *partialArray
which int
}
@ -56,11 +57,15 @@ type Operation map[string]*json.RawMessage
type Patch []Operation
type partialDoc struct {
self *lazyNode
keys []string
obj map[string]*lazyNode
}
type partialArray []*lazyNode
type partialArray struct {
self *lazyNode
nodes []*lazyNode
}
type container interface {
get(key string, options *ApplyOptions) (*lazyNode, error)
@ -107,14 +112,14 @@ func newRawMessage(buf []byte) *json.RawMessage {
return &ra
}
func (n *lazyNode) MarshalJSON() ([]byte, error) {
func (n *lazyNode) RedirectMarshalJSON() (any, error) {
switch n.which {
case eRaw:
return json.Marshal(n.raw)
return n.raw, nil
case eDoc:
return json.Marshal(n.doc)
return n.doc, nil
case eAry:
return json.Marshal(n.ary)
return n.ary.nodes, nil
default:
return nil, ErrUnknownType
}
@ -128,39 +133,38 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
return nil
}
func (n *partialDoc) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
if _, err := buf.WriteString("{"); err != nil {
return nil, err
func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error {
if err := buf.WriteByte('{'); err != nil {
return err
}
for i, k := range n.keys {
if i > 0 {
if _, err := buf.WriteString(", "); err != nil {
return nil, err
if err := buf.WriteByte(','); err != nil {
return err
}
}
key, err := json.Marshal(k)
if err != nil {
return nil, err
return err
}
if _, err := buf.Write(key); err != nil {
return nil, err
return err
}
if _, err := buf.WriteString(": "); err != nil {
return nil, err
if err := buf.WriteByte(':'); err != nil {
return err
}
value, err := json.Marshal(n.obj[k])
if err != nil {
return nil, err
return err
}
if _, err := buf.Write(value); err != nil {
return nil, err
return err
}
}
if _, err := buf.WriteString("}"); err != nil {
return nil, err
if err := buf.WriteByte('}'); err != nil {
return err
}
return buf.Bytes(), nil
return nil
}
type syntaxError struct {
@ -172,70 +176,29 @@ func (err *syntaxError) Error() string {
}
func (n *partialDoc) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &n.obj); err != nil {
keys, err := json.UnmarshalValidWithKeys(data, &n.obj)
if err != nil {
return err
}
buffer := bytes.NewBuffer(data)
d := json.NewDecoder(buffer)
if t, err := d.Token(); err != nil {
return err
} else if t != startObject {
return &syntaxError{fmt.Sprintf("unexpected JSON token in document node: %v", t)}
}
for d.More() {
k, err := d.Token()
if err != nil {
return err
}
key, ok := k.(string)
if !ok {
return &syntaxError{fmt.Sprintf("unexpected JSON token as document node key: %s", k)}
}
if err := skipValue(d); err != nil {
return err
}
n.keys = append(n.keys, key)
}
n.keys = keys
return nil
}
func skipValue(d *json.Decoder) error {
t, err := d.Token()
if err != nil {
return err
}
if t != startObject && t != startArray {
return nil
}
for d.More() {
if t == startObject {
// consume key token
if _, err := d.Token(); err != nil {
return err
}
}
if err := skipValue(d); err != nil {
return err
}
}
end, err := d.Token()
if err != nil {
return err
}
if t == startObject && end != endObject {
return &syntaxError{msg: "expected close object token"}
}
if t == startArray && end != endArray {
return &syntaxError{msg: "expected close object token"}
}
return nil
func (n *partialArray) UnmarshalJSON(data []byte) error {
return json.UnmarshalValid(data, &n.nodes)
}
func (n *partialArray) RedirectMarshalJSON() (interface{}, error) {
return n.nodes, nil
}
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
if src == nil {
return nil, 0, nil
}
a, err := src.MarshalJSON()
a, err := json.Marshal(src)
if err != nil {
return nil, 0, err
}
@ -243,6 +206,16 @@ func deepCopy(src *lazyNode) (*lazyNode, int, error) {
return newLazyNode(newRawMessage(a)), sz, nil
}
func (n *lazyNode) nextByte() byte {
s := []byte(*n.raw)
for unicode.IsSpace(rune(s[0])) {
s = s[1:]
}
return s[0]
}
func (n *lazyNode) intoDoc() (*partialDoc, error) {
if n.which == eDoc {
return n.doc, nil
@ -252,7 +225,15 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) {
return nil, ErrInvalid
}
err := json.Unmarshal(*n.raw, &n.doc)
if n.nextByte() != '{' {
return nil, ErrInvalid
}
err := unmarshal(*n.raw, &n.doc)
if n.doc == nil {
return nil, ErrInvalid
}
if err != nil {
return nil, err
@ -264,21 +245,21 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) {
func (n *lazyNode) intoAry() (*partialArray, error) {
if n.which == eAry {
return &n.ary, nil
return n.ary, nil
}
if n.raw == nil {
return nil, ErrInvalid
}
err := json.Unmarshal(*n.raw, &n.ary)
err := unmarshal(*n.raw, &n.ary)
if err != nil {
return nil, err
}
n.which = eAry
return &n.ary, nil
return n.ary, nil
}
func (n *lazyNode) compact() []byte {
@ -302,12 +283,16 @@ func (n *lazyNode) tryDoc() bool {
return false
}
err := json.Unmarshal(*n.raw, &n.doc)
err := unmarshal(*n.raw, &n.doc)
if err != nil {
return false
}
if n.doc == nil {
return false
}
n.which = eDoc
return true
}
@ -317,7 +302,7 @@ func (n *lazyNode) tryAry() bool {
return false
}
err := json.Unmarshal(*n.raw, &n.ary)
err := unmarshal(*n.raw, &n.ary)
if err != nil {
return false
@ -327,6 +312,18 @@ func (n *lazyNode) tryAry() bool {
return true
}
func (n *lazyNode) isNull() bool {
if n == nil {
return true
}
if n.raw == nil {
return true
}
return bytes.Equal(n.compact(), rawJSONNull)
}
func (n *lazyNode) equal(o *lazyNode) bool {
if n.which == eRaw {
if !n.tryDoc() && !n.tryAry() {
@ -334,7 +331,27 @@ func (n *lazyNode) equal(o *lazyNode) bool {
return false
}
return bytes.Equal(n.compact(), o.compact())
nc := n.compact()
oc := o.compact()
if nc[0] == '"' && oc[0] == '"' {
// ok, 2 strings
var ns, os string
err := json.UnmarshalValid(nc, &ns)
if err != nil {
return false
}
err = json.UnmarshalValid(oc, &os)
if err != nil {
return false
}
return ns == os
}
return bytes.Equal(nc, oc)
}
}
@ -380,12 +397,12 @@ func (n *lazyNode) equal(o *lazyNode) bool {
return false
}
if len(n.ary) != len(o.ary) {
if len(n.ary.nodes) != len(o.ary.nodes) {
return false
}
for idx, val := range n.ary {
if !val.equal(o.ary[idx]) {
for idx, val := range n.ary.nodes {
if !val.equal(o.ary.nodes[idx]) {
return false
}
}
@ -398,7 +415,7 @@ func (o Operation) Kind() string {
if obj, ok := o["op"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
err := unmarshal(*obj, &op)
if err != nil {
return "unknown"
@ -415,7 +432,7 @@ func (o Operation) Path() (string, error) {
if obj, ok := o["path"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
err := unmarshal(*obj, &op)
if err != nil {
return "unknown", err
@ -432,7 +449,7 @@ func (o Operation) From() (string, error) {
if obj, ok := o["from"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
err := unmarshal(*obj, &op)
if err != nil {
return "unknown", err
@ -446,6 +463,10 @@ func (o Operation) From() (string, error) {
func (o Operation) value() *lazyNode {
if obj, ok := o["value"]; ok {
// A `null` gets decoded as a nil RawMessage, so let's fix it up here.
if obj == nil {
return newLazyNode(newRawMessage(rawJSONNull))
}
return newLazyNode(obj)
}
@ -461,7 +482,7 @@ func (o Operation) ValueInterface() (interface{}, error) {
var v interface{}
err := json.Unmarshal(*obj, &v)
err := unmarshal(*obj, &v)
if err != nil {
return nil, err
@ -497,6 +518,9 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s
split := strings.Split(path, "/")
if len(split) < 2 {
if path == "" {
return doc, ""
}
return nil, ""
}
@ -552,6 +576,9 @@ func (d *partialDoc) add(key string, val *lazyNode, options *ApplyOptions) error
}
func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
if key == "" {
return d.self, nil
}
v, ok := d.obj[key]
if !ok {
return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key)
@ -591,19 +618,19 @@ func (d *partialArray) set(key string, val *lazyNode, options *ApplyOptions) err
if !options.SupportNegativeIndices {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(*d) {
if idx < -len(d.nodes) {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
idx += len(*d)
idx += len(d.nodes)
}
(*d)[idx] = val
d.nodes[idx] = val
return nil
}
func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) error {
if key == "-" {
*d = append(*d, val)
d.nodes = append(d.nodes, val)
return nil
}
@ -612,11 +639,11 @@ func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) err
return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
}
sz := len(*d) + 1
sz := len(d.nodes) + 1
ary := make([]*lazyNode, sz)
cur := *d
cur := d
if idx >= len(ary) {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
@ -632,15 +659,19 @@ func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) err
idx += len(ary)
}
copy(ary[0:idx], cur[0:idx])
copy(ary[0:idx], cur.nodes[0:idx])
ary[idx] = val
copy(ary[idx+1:], cur[idx:])
copy(ary[idx+1:], cur.nodes[idx:])
*d = ary
d.nodes = ary
return nil
}
func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error) {
if key == "" {
return d.self, nil
}
idx, err := strconv.Atoi(key)
if err != nil {
@ -651,17 +682,17 @@ func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error)
if !options.SupportNegativeIndices {
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(*d) {
if idx < -len(d.nodes) {
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
idx += len(*d)
idx += len(d.nodes)
}
if idx >= len(*d) {
if idx >= len(d.nodes) {
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
return (*d)[idx], nil
return d.nodes[idx], nil
}
func (d *partialArray) remove(key string, options *ApplyOptions) error {
@ -670,9 +701,9 @@ func (d *partialArray) remove(key string, options *ApplyOptions) error {
return err
}
cur := *d
cur := d
if idx >= len(cur) {
if idx >= len(cur.nodes) {
if options.AllowMissingPathOnRemove {
return nil
}
@ -683,21 +714,21 @@ func (d *partialArray) remove(key string, options *ApplyOptions) error {
if !options.SupportNegativeIndices {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(cur) {
if idx < -len(cur.nodes) {
if options.AllowMissingPathOnRemove {
return nil
}
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
idx += len(cur)
idx += len(cur.nodes)
}
ary := make([]*lazyNode, len(cur)-1)
ary := make([]*lazyNode, len(cur.nodes)-1)
copy(ary[0:idx], cur[0:idx])
copy(ary[idx:], cur[idx+1:])
copy(ary[0:idx], cur.nodes[0:idx])
copy(ary[idx:], cur.nodes[idx+1:])
*d = ary
d.nodes = ary
return nil
}
@ -707,6 +738,32 @@ func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error {
return errors.Wrapf(ErrMissing, "add operation failed to decode path")
}
// special case, adding to empty means replacing the container with the value given
if path == "" {
val := op.value()
var pd container
if (*val.raw)[0] == '[' {
pd = &partialArray{
self: val,
}
} else {
pd = &partialDoc{
self: val,
}
}
err := json.UnmarshalValid(*val.raw, pd)
if err != nil {
return err
}
*doc = pd
return nil
}
if options.EnsurePathExistsOnAdd {
err = ensurePathExists(doc, path, options)
@ -762,9 +819,9 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
if arrIndex, err = strconv.Atoi(part); err == nil {
pa, ok := doc.(*partialArray)
if ok && arrIndex >= len(*pa)+1 {
if ok && arrIndex >= len(pa.nodes)+1 {
// Pad the array with null values up to the required index.
for i := len(*pa); i <= arrIndex-1; i++ {
for i := len(pa.nodes); i <= arrIndex-1; i++ {
doc.add(strconv.Itoa(i), newLazyNode(newRawMessage(rawJSONNull)), options)
}
}
@ -798,7 +855,10 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
newNode := newLazyNode(newRawMessage(rawJSONObject))
doc.add(part, newNode, options)
doc, _ = newNode.intoDoc()
doc, err = newNode.intoDoc()
if err != nil {
return err
}
}
} else {
if isArray(*target.raw) {
@ -899,7 +959,7 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro
switch val.which {
case eAry:
*doc = &val.ary
*doc = val.ary
case eDoc:
*doc = val.doc
case eRaw:
@ -934,6 +994,10 @@ func (p Patch) move(doc *container, op Operation, options *ApplyOptions) error {
return errors.Wrapf(err, "move operation failed to decode from")
}
if from == "" {
return errors.Wrapf(ErrInvalid, "unable to move entire document to another path")
}
con, key := findObject(doc, from, options)
if con == nil {
@ -983,7 +1047,7 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
self.doc = sv
self.which = eDoc
case *partialArray:
self.ary = *sv
self.ary = sv
self.which = eAry
}
@ -1005,12 +1069,14 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
return errors.Wrapf(err, "error in test for path: '%s'", path)
}
ov := op.value()
if val == nil {
if op.value() == nil || op.value().raw == nil {
if ov.isNull() {
return nil
}
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
} else if op.value() == nil {
} else if ov.isNull() {
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
}
@ -1030,7 +1096,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, op
con, key := findObject(doc, from, options)
if con == nil {
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: \"%s\"", from)
}
val, err := con.get(key, options)
@ -1077,9 +1143,13 @@ func Equal(a, b []byte) bool {
// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
func DecodePatch(buf []byte) (Patch, error) {
if !json.Valid(buf) {
return nil, ErrInvalid
}
var p Patch
err := json.Unmarshal(buf, &p)
err := unmarshal(buf, &p)
if err != nil {
return nil, err
@ -1117,14 +1187,25 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO
return doc, nil
}
var pd container
if doc[0] == '[' {
pd = &partialArray{}
} else {
pd = &partialDoc{}
if !json.Valid(doc) {
return nil, ErrInvalid
}
err := json.Unmarshal(doc, pd)
raw := json.RawMessage(doc)
self := newLazyNode(&raw)
var pd container
if doc[0] == '[' {
pd = &partialArray{
self: self,
}
} else {
pd = &partialDoc{
self: self,
}
}
err := unmarshal(doc, pd)
if err != nil {
return nil, err

View File

@ -1,6 +1,7 @@
# A minimal logging API for Go
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
logr offers an(other) opinion on how Go programs and libraries can do logging
without becoming coupled to a particular logging implementation. This is not
@ -73,6 +74,29 @@ received:
If the Go standard library had defined an interface for logging, this project
probably would not be needed. Alas, here we are.
When the Go developers started developing such an interface with
[slog](https://github.com/golang/go/issues/56345), they adopted some of the
logr design but also left out some parts and changed others:
| Feature | logr | slog |
|---------|------|------|
| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
| Low-level API | `LogSink` | `Handler` |
| Stack unwinding | done by `LogSink` | done by `Logger` |
| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
| Passing logger via context | `NewContext`, `FromContext` | no API |
| Adding a name to a logger | `WithName` | no API |
| Modify verbosity of log entries in a call chain | `V` | no API |
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
The high-level slog API is explicitly meant to be one of many different APIs
that can be layered on top of a shared `slog.Handler`. logr is one such
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
package.
### Inspiration
Before you consider this package, please read [this blog post by the
@ -118,6 +142,91 @@ There are implementations for the following logging libraries:
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
## slog interoperability
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
slog API. `slogr` itself leaves that to the caller.
## Using a `logr.Sink` as backend for slog
Ideally, a logr sink implementation should support both logr and slog by
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
of a conflict in the parameters of the common `Enabled` method, it is [not
possible to implement both slog.Handler and logr.Sink in the same
type](https://github.com/golang/go/issues/59110).
If both are supported, log calls can go from the high-level APIs to the backend
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
convert back and forth without adding additional wrappers, with one exception:
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
log calls.
Such an implementation should also support values that implement specific
interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
`slog.GroupValue`). logr does not convert those.
Not supporting slog has several drawbacks:
- Recording source code locations works correctly if the handler gets called
through `slog.Logger`, but may be wrong in other cases. That's because a
`logr.Sink` does its own stack unwinding instead of using the program counter
provided by the high-level API.
- slog levels <= 0 can be mapped to logr levels by negating the level without a
loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
because logr does not support "more important than info" levels.
- The slog group concept is supported by prefixing each key in a key/value
pair with the group names, separated by a dot. For structured output like
JSON it would be better to group the key/value pairs inside an object.
- Special slog values and interfaces don't work as expected.
- The overhead is likely to be higher.
These drawbacks are severe enough that applications using a mixture of slog and
logr should switch to a different backend.
## Using a `slog.Handler` as backend for logr
Using a plain `slog.Handler` without support for logr works better than the
other direction:
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
by negating them.
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
counter is passed to the `slog.Handler`.
- Names added via `Logger.WithName` are gathered and recorded in an additional
attribute with `logger` as key and the names separated by slash as value.
- `Logger.Error` is turned into a log record with `slog.LevelError` as level
and an additional attribute with `err` as key, if an error was provided.
The main drawback is that `logr.Marshaler` will not be supported. Types should
ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
with logr implementations without slog support is not important, then
`slog.Valuer` is sufficient.
## Context support for slog
Storing a logger in a `context.Context` is not supported by
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
to fill this gap:
func HandlerFromContext(ctx context.Context) slog.Handler {
logger, err := logr.FromContext(ctx)
if err == nil {
return slogr.NewSlogHandler(logger)
}
return slog.Default().Handler()
}
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
return logr.NewContext(ctx, slogr.NewLogr(handler))
}
The downside is that storing and retrieving a `slog.Handler` needs more
allocations compared to using a `logr.Logger`. Therefore the recommendation is
to use the `logr.Logger` API in code which uses contextual logging.
## FAQ
### Conceptual
@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this",
Then gradually choose levels in between as you need them, working your way
down from 10 (for debug and trace style logs) and up from 1 (for chattier
info-type logs.)
info-type logs). For reference, slog pre-defines -4 for debug logs
(corresponds to 4 in logr), which matches what is
[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
#### How do I choose my keys?

18
vendor/github.com/go-logr/logr/SECURITY.md generated vendored Normal file
View File

@ -0,0 +1,18 @@
# Security Policy
If you have discovered a security vulnerability in this project, please report it
privately. **Do not disclose it as a public issue.** This gives us time to work with you
to fix the issue before public exposure, reducing the chance that the exploit will be
used before a patch is released.
You may submit the report in the following ways:
- send an email to go-logr-security@googlegroups.com
- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
Please provide the following information in your report:
- A description of the vulnerability and its impact
- How to reproduce the issue
We ask that you give us 90 days to work on a fix before public exposure.

View File

@ -127,9 +127,9 @@ limitations under the License.
// such a value can call its methods without having to check whether the
// instance is ready for use.
//
// Calling methods with the null logger (Logger{}) as instance will crash
// because it has no LogSink. Therefore this null logger should never be passed
// around. For cases where passing a logger is optional, a pointer to Logger
// The zero logger (= Logger{}) is identical to Discard() and discards all log
// entries. Code that receives a Logger by value can simply call it, the methods
// will never crash. For cases where passing a logger is optional, a pointer to Logger
// should be used.
//
// # Key Naming Conventions
@ -258,6 +258,12 @@ type Logger struct {
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool {
// Some implementations of LogSink look at the caller in Enabled (e.g.
// different verbosity levels per package or file), but we only pass one
// CallDepth in (via Init). This means that all calls from Logger to the
// LogSink's Enabled, Info, and Error methods must have the same number of
// frames. In other words, Logger methods can't call other Logger methods
// which call these LogSink methods unless we do it the same in all paths.
return l.sink != nil && l.sink.Enabled(l.level)
}
@ -267,11 +273,11 @@ func (l Logger) Enabled() bool {
// line. The key/value pairs can then be used to add additional variable
// information. The key/value pairs must alternate string keys and arbitrary
// values.
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
func (l Logger) Info(msg string, keysAndValues ...any) {
if l.sink == nil {
return
}
if l.Enabled() {
if l.sink.Enabled(l.level) { // see comment in Enabled
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
@ -289,7 +295,7 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
// while the err argument should be used to attach the actual error that
// triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance.
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
func (l Logger) Error(err error, msg string, keysAndValues ...any) {
if l.sink == nil {
return
}
@ -314,9 +320,16 @@ func (l Logger) V(level int) Logger {
return l
}
// GetV returns the verbosity level of the logger. If the logger's LogSink is
// nil as in the Discard logger, this will always return 0.
func (l Logger) GetV() int {
// 0 if l.sink nil because of the if check in V above.
return l.level
}
// WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work.
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
func (l Logger) WithValues(keysAndValues ...any) Logger {
if l.sink == nil {
return l
}
@ -467,15 +480,15 @@ type LogSink interface {
// The level argument is provided for optional logging. This method will
// only be called when Enabled(level) is true. See Logger.Info for more
// details.
Info(level int, msg string, keysAndValues ...interface{})
Info(level int, msg string, keysAndValues ...any)
// Error logs an error, with the given message and key/value pairs as
// context. See Logger.Error for more details.
Error(err error, msg string, keysAndValues ...interface{})
Error(err error, msg string, keysAndValues ...any)
// WithValues returns a new LogSink with additional key/value pairs. See
// Logger.WithValues for more details.
WithValues(keysAndValues ...interface{}) LogSink
WithValues(keysAndValues ...any) LogSink
// WithName returns a new LogSink with the specified name appended. See
// Logger.WithName for more details.
@ -546,5 +559,5 @@ type Marshaler interface {
// with exported fields
//
// It may return any value of any type.
MarshalLog() interface{}
MarshalLog() any
}

View File

@ -26,11 +26,16 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`)
// - FlagLowercaseHost
// - FlagRemoveDefaultPort
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
//
// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment.
func NormalizeURL(u *url.URL) {
lowercaseScheme(u)
lowercaseHost(u)
removeDefaultPort(u)
removeDuplicateSlashes(u)
u.RawPath = ""
u.RawFragment = ""
}
func lowercaseScheme(u *url.URL) {

View File

@ -24,7 +24,7 @@ import (
"github.com/golang/protobuf/ptypes/any"
yaml "gopkg.in/yaml.v3"
extensions "github.com/google/gnostic/extensions"
extensions "github.com/google/gnostic-models/extensions"
)
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.

View File

@ -22,7 +22,7 @@ import (
"gopkg.in/yaml.v3"
"github.com/google/gnostic/jsonschema"
"github.com/google/gnostic-models/jsonschema"
)
// compiler helper functions, usually called from generated code

View File

@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.18.1
// protoc-gen-go v1.27.1
// protoc v3.19.3
// source: extensions/extension.proto
package gnostic_extension_v1

View File

@ -1,3 +1,16 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// THIS FILE IS AUTOMATICALLY GENERATED.
@ -81,4 +94,4 @@ YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5
IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg
fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6
IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1
c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)}
c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)}

View File

@ -46,23 +46,8 @@ func (schema *Schema) describeSchema(indent string) string {
if schema.Schema != nil {
result += indent + "$schema: " + *(schema.Schema) + "\n"
}
if schema.ReadOnly != nil && *schema.ReadOnly {
result += indent + fmt.Sprintf("readOnly: %+v\n", *(schema.ReadOnly))
}
if schema.WriteOnly != nil && *schema.WriteOnly {
result += indent + fmt.Sprintf("writeOnly: %+v\n", *(schema.WriteOnly))
}
if schema.ID != nil {
switch strings.TrimSuffix(*schema.Schema, "#") {
case "http://json-schema.org/draft-04/schema#":
fallthrough
case "#":
fallthrough
case "":
result += indent + "id: " + *(schema.ID) + "\n"
default:
result += indent + "$id: " + *(schema.ID) + "\n"
}
result += indent + "id: " + *(schema.ID) + "\n"
}
if schema.MultipleOf != nil {
result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf))

View File

@ -23,11 +23,9 @@ import "gopkg.in/yaml.v3"
// All fields are pointers and are nil if the associated values
// are not specified.
type Schema struct {
Schema *string // $schema
ID *string // id keyword used for $ref resolution scope
Ref *string // $ref, i.e. JSON Pointers
ReadOnly *bool
WriteOnly *bool
Schema *string // $schema
ID *string // id keyword used for $ref resolution scope
Ref *string // $ref, i.e. JSON Pointers
// http://json-schema.org/latest/json-schema-validation.html
// 5.1. Validation keywords for numeric instances (number and integer)

View File

@ -165,6 +165,7 @@ func NewSchemaFromObject(jsonData *yaml.Node) *Schema {
default:
fmt.Printf("schemaValue: unexpected node %+v\n", jsonData)
return nil
}
return nil

View File

@ -16,7 +16,6 @@ package jsonschema
import (
"fmt"
"strings"
"gopkg.in/yaml.v3"
)
@ -34,11 +33,7 @@ func renderMappingNode(node *yaml.Node, indent string) (result string) {
value := node.Content[i+1]
switch value.Kind {
case yaml.ScalarNode:
if value.Tag == "!!bool" {
result += value.Value
} else {
result += "\"" + value.Value + "\""
}
result += "\"" + value.Value + "\""
case yaml.MappingNode:
result += renderMappingNode(value, innerIndent)
case yaml.SequenceNode:
@ -63,11 +58,7 @@ func renderSequenceNode(node *yaml.Node, indent string) (result string) {
item := node.Content[i]
switch item.Kind {
case yaml.ScalarNode:
if item.Tag == "!!bool" {
result += innerIndent + item.Value
} else {
result += innerIndent + "\"" + item.Value + "\""
}
result += innerIndent + "\"" + item.Value + "\""
case yaml.MappingNode:
result += innerIndent + renderMappingNode(item, innerIndent) + ""
default:
@ -269,26 +260,11 @@ func (schema *Schema) nodeValue() *yaml.Node {
content = appendPair(content, "title", nodeForString(*schema.Title))
}
if schema.ID != nil {
switch strings.TrimSuffix(*schema.Schema, "#") {
case "http://json-schema.org/draft-04/schema":
fallthrough
case "#":
fallthrough
case "":
content = appendPair(content, "id", nodeForString(*schema.ID))
default:
content = appendPair(content, "$id", nodeForString(*schema.ID))
}
content = appendPair(content, "id", nodeForString(*schema.ID))
}
if schema.Schema != nil {
content = appendPair(content, "$schema", nodeForString(*schema.Schema))
}
if schema.ReadOnly != nil && *schema.ReadOnly {
content = appendPair(content, "readOnly", nodeForBoolean(*schema.ReadOnly))
}
if schema.WriteOnly != nil && *schema.WriteOnly {
content = appendPair(content, "writeOnly", nodeForBoolean(*schema.WriteOnly))
}
if schema.Type != nil {
content = appendPair(content, "type", schema.Type.nodeValue())
}

View File

@ -23,7 +23,7 @@ import (
"gopkg.in/yaml.v3"
"github.com/google/gnostic/compiler"
"github.com/google/gnostic-models/compiler"
)
// Version returns the package name (and OpenAPI version).
@ -7887,12 +7887,7 @@ func (m *Oauth2Scopes) ToRawInfo() *yaml.Node {
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value))
}
}
// &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:}
return info
}

View File

@ -16,8 +16,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.18.1
// protoc-gen-go v1.27.1
// protoc v3.19.3
// source: openapiv2/OpenAPIv2.proto
package openapi_v2

View File

@ -17,7 +17,7 @@ package openapi_v2
import (
"gopkg.in/yaml.v3"
"github.com/google/gnostic/compiler"
"github.com/google/gnostic-models/compiler"
)
// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation.

View File

@ -23,7 +23,7 @@ import (
"gopkg.in/yaml.v3"
"github.com/google/gnostic/compiler"
"github.com/google/gnostic-models/compiler"
)
// Version returns the package name (and OpenAPI version).
@ -8560,12 +8560,7 @@ func (m *Strings) ToRawInfo() *yaml.Node {
if m == nil {
return info
}
if m.AdditionalProperties != nil {
for _, item := range m.AdditionalProperties {
info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name))
info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value))
}
}
// &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:}
return info
}

View File

@ -16,8 +16,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.19.4
// protoc-gen-go v1.27.1
// protoc v3.19.3
// source: openapiv3/OpenAPIv3.proto
package openapi_v3
@ -6760,13 +6760,12 @@ var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{
0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61,
0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x56,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e,
0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33,
0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33,
0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70,
0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@ -42,7 +42,7 @@ option java_package = "org.openapi_v3";
option objc_class_prefix = "OAS";
// The Go package name.
option go_package = "github.com/google/gnostic/openapiv3;openapi_v3";
option go_package = "./openapiv3;openapi_v3";
message AdditionalPropertiesItem {
oneof oneof {

View File

@ -19,7 +19,3 @@ for OpenAPI.
The schema-generator directory contains support code which generates
openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown).
### How to rebuild
`protoc -I=. -I=third_party --go_out=. --go_opt=paths=source_relative openapiv3/*.proto`

View File

@ -17,7 +17,7 @@ package openapi_v3
import (
"gopkg.in/yaml.v3"
"github.com/google/gnostic/compiler"
"github.com/google/gnostic-models/compiler"
)
// ParseDocument reads an OpenAPI v3 description from a YAML/JSON representation.

View File

@ -1,183 +0,0 @@
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.0
// protoc v3.19.4
// source: openapiv3/annotations.proto
package openapi_v3
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
{
ExtendedType: (*descriptorpb.FileOptions)(nil),
ExtensionType: (*Document)(nil),
Field: 1143,
Name: "openapi.v3.document",
Tag: "bytes,1143,opt,name=document",
Filename: "openapiv3/annotations.proto",
},
{
ExtendedType: (*descriptorpb.MethodOptions)(nil),
ExtensionType: (*Operation)(nil),
Field: 1143,
Name: "openapi.v3.operation",
Tag: "bytes,1143,opt,name=operation",
Filename: "openapiv3/annotations.proto",
},
{
ExtendedType: (*descriptorpb.MessageOptions)(nil),
ExtensionType: (*Schema)(nil),
Field: 1143,
Name: "openapi.v3.schema",
Tag: "bytes,1143,opt,name=schema",
Filename: "openapiv3/annotations.proto",
},
{
ExtendedType: (*descriptorpb.FieldOptions)(nil),
ExtensionType: (*Schema)(nil),
Field: 1143,
Name: "openapi.v3.property",
Tag: "bytes,1143,opt,name=property",
Filename: "openapiv3/annotations.proto",
},
}
// Extension fields to descriptorpb.FileOptions.
var (
// optional openapi.v3.Document document = 1143;
E_Document = &file_openapiv3_annotations_proto_extTypes[0]
)
// Extension fields to descriptorpb.MethodOptions.
var (
// optional openapi.v3.Operation operation = 1143;
E_Operation = &file_openapiv3_annotations_proto_extTypes[1]
)
// Extension fields to descriptorpb.MessageOptions.
var (
// optional openapi.v3.Schema schema = 1143;
E_Schema = &file_openapiv3_annotations_proto_extTypes[2]
)
// Extension fields to descriptorpb.FieldOptions.
var (
// optional openapi.v3.Schema property = 1143;
E_Property = &file_openapiv3_annotations_proto_extTypes[3]
)
var File_openapiv3_annotations_proto protoreflect.FileDescriptor
var file_openapiv3_annotations_proto_rawDesc = []byte{
0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f,
0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61,
0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64,
0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70,
0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a,
0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68,
0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70,
0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d,
0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x5a, 0x0a, 0x0e, 0x6f,
0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41,
0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65,
0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76,
0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_openapiv3_annotations_proto_goTypes = []interface{}{
(*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions
(*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions
(*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
(*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions
(*Document)(nil), // 4: openapi.v3.Document
(*Operation)(nil), // 5: openapi.v3.Operation
(*Schema)(nil), // 6: openapi.v3.Schema
}
var file_openapiv3_annotations_proto_depIdxs = []int32{
0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions
1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions
2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions
3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions
4, // 4: openapi.v3.document:type_name -> openapi.v3.Document
5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation
6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema
6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema
8, // [8:8] is the sub-list for method output_type
8, // [8:8] is the sub-list for method input_type
4, // [4:8] is the sub-list for extension type_name
0, // [0:4] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_openapiv3_annotations_proto_init() }
func file_openapiv3_annotations_proto_init() {
if File_openapiv3_annotations_proto != nil {
return
}
file_openapiv3_OpenAPIv3_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_openapiv3_annotations_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 4,
NumServices: 0,
},
GoTypes: file_openapiv3_annotations_proto_goTypes,
DependencyIndexes: file_openapiv3_annotations_proto_depIdxs,
ExtensionInfos: file_openapiv3_annotations_proto_extTypes,
}.Build()
File_openapiv3_annotations_proto = out.File
file_openapiv3_annotations_proto_rawDesc = nil
file_openapiv3_annotations_proto_goTypes = nil
file_openapiv3_annotations_proto_depIdxs = nil
}

View File

@ -1,60 +0,0 @@
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package openapi.v3;
import "openapiv3/OpenAPIv3.proto";
import "google/protobuf/descriptor.proto";
// This option lets the proto compiler generate Java code inside the package
// name (see below) instead of inside an outer class. It creates a simpler
// developer experience by reducing one-level of name nesting and be
// consistent with most programming languages that don't support outer classes.
option java_multiple_files = true;
// The Java outer classname should be the filename in UpperCamelCase. This
// class is only used to hold proto descriptor, so developers don't need to
// work with it directly.
option java_outer_classname = "AnnotationsProto";
// The Java package name must be proto package name with proper prefix.
option java_package = "org.openapi_v3";
// A reasonable prefix for the Objective-C symbols generated from the package.
// It should at a minimum be 3 characters long, all uppercase, and convention
// is to use an abbreviation of the package name. Something short, but
// hopefully unique enough to not conflict with things that may come along in
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
option objc_class_prefix = "OAS";
// The Go package name.
option go_package = "github.com/google/gnostic/openapiv3;openapi_v3";
extend google.protobuf.FileOptions {
Document document = 1143;
}
extend google.protobuf.MethodOptions {
Operation operation = 1143;
}
extend google.protobuf.MessageOptions {
Schema schema = 1143;
}
extend google.protobuf.FieldOptions {
Schema property = 1143;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,12 @@
# Changelog
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
### Features
* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)

View File

@ -108,12 +108,23 @@ func setClockSequence(seq int) {
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1 and 2 UUIDs.
// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
func (uuid UUID) Time() Time {
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
return Time(time)
var t Time
switch uuid.Version() {
case 6:
time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
t = Time(time)
case 7:
time := binary.BigEndian.Uint64(uuid[:8])
t = Time((time>>16)*10000 + g1582ns100)
default: // forward compatible
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
t = Time(time)
}
return t
}
// ClockSequence returns the clock sequence encoded in uuid.

View File

@ -186,6 +186,59 @@ func Must(uuid UUID, err error) UUID {
return uuid
}
// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
// It returns an error if the format is invalid, otherwise nil.
func Validate(s string) error {
switch len(s) {
// Standard UUID format
case 36:
// UUID with "urn:uuid:" prefix
case 36 + 9:
if !strings.EqualFold(s[:9], "urn:uuid:") {
return fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
// UUID enclosed in braces
case 36 + 2:
if s[0] != '{' || s[len(s)-1] != '}' {
return fmt.Errorf("invalid bracketed UUID format")
}
s = s[1 : len(s)-1]
// UUID without hyphens
case 32:
for i := 0; i < len(s); i += 2 {
_, ok := xtob(s[i], s[i+1])
if !ok {
return errors.New("invalid UUID format")
}
}
default:
return invalidLengthError{len(s)}
}
// Check for standard UUID format
if len(s) == 36 {
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return errors.New("invalid UUID format")
}
for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
if _, ok := xtob(s[x], s[x+1]); !ok {
return errors.New("invalid UUID format")
}
}
}
return nil
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {

56
vendor/github.com/google/uuid/version6.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2023 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "encoding/binary"
// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
//
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
//
// NewV6 returns a Version 6 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewV6 returns Nil and an error.
func NewV6() (UUID, error) {
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| time_high |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| time_mid | time_low_and_version |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|clk_seq_hi_res | clk_seq_low | node (0-1) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| node (2-5) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
binary.BigEndian.PutUint64(uuid[0:], uint64(now))
binary.BigEndian.PutUint16(uuid[8:], seq)
uuid[6] = 0x60 | (uuid[6] & 0x0F)
uuid[8] = 0x80 | (uuid[8] & 0x3F)
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
copy(uuid[10:], nodeID[:])
nodeMu.Unlock()
return uuid, nil
}

75
vendor/github.com/google/uuid/version7.go generated vendored Normal file
View File

@ -0,0 +1,75 @@
// Copyright 2023 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// UUID version 7 features a time-ordered value field derived from the widely
// implemented and well known Unix Epoch timestamp source,
// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
// As well as improved entropy characteristics over versions 1 or 6.
//
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
//
// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
//
// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
// Uses the randomness pool if it was enabled with EnableRandPool.
// On error, NewV7 returns Nil and an error
func NewV7() (UUID, error) {
uuid, err := NewRandom()
if err != nil {
return uuid, err
}
makeV7(uuid[:])
return uuid, nil
}
// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
// it use NewRandomFromReader fill random bits.
// On error, NewV7FromReader returns Nil and an error.
func NewV7FromReader(r io.Reader) (UUID, error) {
uuid, err := NewRandomFromReader(r)
if err != nil {
return uuid, err
}
makeV7(uuid[:])
return uuid, nil
}
// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
// uuid[8] already has the right version number (Variant is 10)
// see function NewV7 and NewV7FromReader
func makeV7(uuid []byte) {
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms | ver | rand_a |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|var| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
_ = uuid[15] // bounds check
t := timeNow().UnixMilli()
uuid[0] = byte(t >> 40)
uuid[1] = byte(t >> 32)
uuid[2] = byte(t >> 24)
uuid[3] = byte(t >> 16)
uuid[4] = byte(t >> 8)
uuid[5] = byte(t)
uuid[6] = 0x70 | (uuid[6] & 0x0F)
// uuid[8] has already has right version
}

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1 +0,0 @@
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)

View File

@ -1 +0,0 @@
cover.dat

View File

@ -1,7 +0,0 @@
all:
cover:
go test -cover -v -coverprofile=cover.dat ./...
go tool cover -func cover.dat
.PHONY: cover

View File

@ -1,81 +0,0 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"errors"
"io"
"google.golang.org/protobuf/proto"
)
// TODO: Give error package name prefix in next minor release.
var errInvalidVarint = errors.New("invalid varint32 encountered")
// ReadDelimited decodes a message from the provided length-delimited stream,
// where the length is encoded as 32-bit varint prefix to the message body.
// It returns the total number of bytes read and any applicable error. This is
// roughly equivalent to the companion Java API's
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
// calls r.Read repeatedly as required until exactly one message including its
// prefix is read and decoded (or an error has occurred). The function never
// reads more bytes from the stream than required. The function never returns
// an error if a message has been read and decoded correctly, even if the end
// of the stream has been reached in doing so. In that case, any subsequent
// calls return (0, io.EOF).
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
// TODO: Consider allowing the caller to specify a decode buffer in the
// next major version.
// TODO: Consider using error wrapping to annotate error state in pass-
// through cases in the next minor version.
// Per AbstractParser#parsePartialDelimitedFrom with
// CodedInputStream#readRawVarint32.
var headerBuf [binary.MaxVarintLen32]byte
var bytesRead, varIntBytes int
var messageLength uint64
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
if bytesRead >= len(headerBuf) {
return bytesRead, errInvalidVarint
}
// We have to read byte by byte here to avoid reading more bytes
// than required. Each read byte is appended to what we have
// read before.
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
if newBytesRead == 0 {
if err != nil {
return bytesRead, err
}
// A Reader should not return (0, nil); but if it does, it should
// be treated as no-op according to the Reader contract.
continue
}
bytesRead += newBytesRead
// Now present everything read so far to the varint decoder and
// see if a varint can be decoded already.
messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead])
}
messageBuf := make([]byte, messageLength)
newBytesRead, err := io.ReadFull(r, messageBuf)
bytesRead += newBytesRead
if err != nil {
return bytesRead, err
}
return bytesRead, proto.Unmarshal(messageBuf, m)
}

View File

@ -1,49 +0,0 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"io"
"google.golang.org/protobuf/proto"
)
// WriteDelimited encodes and dumps a message to the provided writer prefixed
// with a 32-bit varint indicating the length of the encoded message, producing
// a length-delimited record stream, which can be used to chain together
// encoded messages of the same type together in a file. It returns the total
// number of bytes written and any applicable error. This is roughly
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
// TODO: Consider allowing the caller to specify an encode buffer in the
// next major version.
buffer, err := proto.Marshal(m)
if err != nil {
return 0, err
}
var buf [binary.MaxVarintLen32]byte
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
sync, err := w.Write(buf[:encodedLength])
if err != nil {
return sync, err
}
n, err = w.Write(buffer)
return n + sync, err
}

View File

@ -475,6 +475,9 @@ type HistogramOpts struct {
// now is for testing purposes, by default it's time.Now.
now func() time.Time
// afterFunc is for testing purposes, by default it's time.AfterFunc.
afterFunc func(time.Duration, func()) *time.Timer
}
// HistogramVecOpts bundles the options to create a HistogramVec metric.
@ -526,7 +529,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
if opts.now == nil {
opts.now = time.Now
}
if opts.afterFunc == nil {
opts.afterFunc = time.AfterFunc
}
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
@ -536,6 +541,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
lastResetTime: opts.now(),
now: opts.now,
afterFunc: opts.afterFunc,
}
if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
h.upperBounds = DefBuckets
@ -716,9 +722,16 @@ type histogram struct {
nativeHistogramMinResetDuration time.Duration
// lastResetTime is protected by mtx. It is also used as created timestamp.
lastResetTime time.Time
// resetScheduled is protected by mtx. It is true if a reset is
// scheduled for a later time (when nativeHistogramMinResetDuration has
// passed).
resetScheduled bool
// now is for testing purposes, by default it's time.Now.
now func() time.Time
// afterFunc is for testing purposes, by default it's time.AfterFunc.
afterFunc func(time.Duration, func()) *time.Timer
}
func (h *histogram) Desc() *Desc {
@ -874,21 +887,31 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket
if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
return
}
// One of the other strategies will happen. To undo what they will do as
// soon as enough time has passed to satisfy
// h.nativeHistogramMinResetDuration, schedule a reset at the right time
// if we haven't done so already.
if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
h.resetScheduled = true
h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
}
if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
return
}
h.doubleBucketWidth(hotCounts, coldCounts)
}
// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration
// has been passed. It returns true if the histogram has been reset. The caller
// must have locked h.mtx.
// maybeReset resets the whole histogram if at least
// h.nativeHistogramMinResetDuration has been passed. It returns true if the
// histogram has been reset. The caller must have locked h.mtx.
func (h *histogram) maybeReset(
hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
) bool {
// We are using the possibly mocked h.now() rather than
// time.Since(h.lastResetTime) to enable testing.
if h.nativeHistogramMinResetDuration == 0 ||
if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
h.resetScheduled || // Do not interefere if a reset is already scheduled.
h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
return false
}
@ -906,6 +929,29 @@ func (h *histogram) maybeReset(
return true
}
// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
// called without having locked h.mtx.
func (h *histogram) reset() {
h.mtx.Lock()
defer h.mtx.Unlock()
n := atomic.LoadUint64(&h.countAndHotIdx)
hotIdx := n >> 63
coldIdx := (^n) >> 63
hot := h.counts[hotIdx]
cold := h.counts[coldIdx]
// Completely reset coldCounts.
h.resetCounts(cold)
// Make coldCounts the new hot counts while resetting countAndHotIdx.
n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
count := n & ((1 << 63) - 1)
waitForCooldown(count, hot)
// Finally, reset the formerly hot counts, too.
h.resetCounts(hot)
h.lastResetTime = h.now()
h.resetScheduled = false
}
// maybeWidenZeroBucket widens the zero bucket until it includes the existing
// buckets closest to the zero bucket (which could be two, if an equidistant
// negative and a positive bucket exists, but usually it's only one bucket to be

View File

@ -165,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
if len(vals) != expectedNumberOfValues {
// The call below makes vals escape, copy them to avoid that.
vals := append([]string(nil), vals...)
return fmt.Errorf(
"%w: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,

View File

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows && !js
// +build !windows,!js
//go:build !windows && !js && !wasip1
// +build !windows,!js,!wasip1
package prometheus

View File

@ -1,10 +1,9 @@
// Copyright 2013 Matt T. Proud
//
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@ -12,5 +11,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pbutil provides record length-delimited Protocol Buffer streaming.
package pbutil
//go:build wasip1
// +build wasip1
package prometheus
func canCollectProcess() bool {
return false
}
func (*processCollector) processCollect(chan<- Metric) {
// noop on this platform
return
}

View File

@ -474,6 +474,9 @@ type Histogram struct {
NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket.
// Positive buckets for the native histogram.
// Use a no-op span (offset 0, length 0) for a native histogram without any
// observations yet and with a zero_threshold of 0. Otherwise, it would be
// indistinguishable from a classic histogram.
PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
// Use either "positive_delta" or "positive_count", the former for
// regular histograms with integer counts, the latter for float

View File

@ -14,6 +14,7 @@
package expfmt
import (
"bufio"
"fmt"
"io"
"math"
@ -21,8 +22,8 @@ import (
"net/http"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/encoding/protodelim"
"github.com/matttproud/golang_protobuf_extensions/v2/pbutil"
"github.com/prometheus/common/model"
)
@ -86,8 +87,10 @@ type protoDecoder struct {
// Decode implements the Decoder interface.
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
_, err := pbutil.ReadDelimited(d.r, v)
if err != nil {
opts := protodelim.UnmarshalOptions{
MaxSize: -1,
}
if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil {
return err
}
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {

View File

@ -18,10 +18,11 @@ import (
"io"
"net/http"
"github.com/matttproud/golang_protobuf_extensions/v2/pbutil"
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
"google.golang.org/protobuf/encoding/protodelim"
"google.golang.org/protobuf/encoding/prototext"
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
dto "github.com/prometheus/client_model/go"
)
@ -120,7 +121,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
case FmtProtoDelim:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := pbutil.WriteDelimited(w, v)
_, err := protodelim.MarshalTo(w, v)
return err
},
close: func() error { return nil },

View File

@ -16,6 +16,7 @@ package expfmt
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"math"
@ -24,8 +25,9 @@ import (
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
"github.com/prometheus/common/model"
)
// A stateFn is a function that represents a state in a state machine. By
@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
// stream. Turn this error into something nicer and more
// meaningful. (io.EOF is often used as a signal for the legitimate end
// of an input stream.)
if p.err == io.EOF {
if p.err != nil && errors.Is(p.err, io.EOF) {
p.parseError("unexpected end of input stream")
}
return p.metricFamiliesByName, p.err
@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn {
// which is not an error but the signal that we are done.
// Any other error that happens to align with the start of
// a line is still an error.
if p.err == io.EOF {
if errors.Is(p.err, io.EOF) {
p.err = nil
}
return nil

View File

@ -90,13 +90,13 @@ func (a *Alert) Validate() error {
return fmt.Errorf("start time must be before end time")
}
if err := a.Labels.Validate(); err != nil {
return fmt.Errorf("invalid label set: %s", err)
return fmt.Errorf("invalid label set: %w", err)
}
if len(a.Labels) == 0 {
return fmt.Errorf("at least one label pair required")
}
if err := a.Annotations.Validate(); err != nil {
return fmt.Errorf("invalid annotations: %s", err)
return fmt.Errorf("invalid annotations: %w", err)
}
return nil
}

28
vendor/github.com/prometheus/common/model/metadata.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
// MetricType represents metric type values.
type MetricType string
const (
MetricTypeCounter = MetricType("counter")
MetricTypeGauge = MetricType("gauge")
MetricTypeHistogram = MetricType("histogram")
MetricTypeGaugeHistogram = MetricType("gaugehistogram")
MetricTypeSummary = MetricType("summary")
MetricTypeInfo = MetricType("info")
MetricTypeStateset = MetricType("stateset")
MetricTypeUnknown = MetricType("unknown")
)

View File

@ -20,12 +20,10 @@ import (
"strings"
)
var (
// MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression.
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
)
// MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression.
var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
// a singleton and refers to one and only one stream of samples.

View File

@ -22,10 +22,8 @@ import (
// when calculating their combined hash value (aka signature aka fingerprint).
const SeparatorByte byte = 255
var (
// cache the signature of an empty label set.
emptyLabelSignature = hashNew()
)
// cache the signature of an empty label set.
var emptyLabelSignature = hashNew()
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
// given label set. (Collisions are possible but unlikely if the number of label

View File

@ -81,7 +81,7 @@ func (s *Silence) Validate() error {
}
for _, m := range s.Matchers {
if err := m.Validate(); err != nil {
return fmt.Errorf("invalid matcher: %s", err)
return fmt.Errorf("invalid matcher: %w", err)
}
}
if s.StartsAt.IsZero() {

View File

@ -21,14 +21,12 @@ import (
"strings"
)
var (
// ZeroSample is the pseudo zero-value of Sample used to signal a
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
// and metric nil. Note that the natural zero value of Sample has a timestamp
// of 0, which is possible to appear in a real Sample and thus not suitable
// to signal a non-existing Sample.
ZeroSample = Sample{Timestamp: Earliest}
)
// ZeroSample is the pseudo zero-value of Sample used to signal a
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
// and metric nil. Note that the natural zero value of Sample has a timestamp
// of 0, which is possible to appear in a real Sample and thus not suitable
// to signal a non-existing Sample.
var ZeroSample = Sample{Timestamp: Earliest}
// Sample is a sample pair associated with a metric. A single sample must either
// define Value or Histogram but not both. Histogram == nil implies the Value
@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error {
value, err := strconv.ParseFloat(f, 64)
if err != nil {
return fmt.Errorf("error parsing sample value: %s", err)
return fmt.Errorf("error parsing sample value: %w", err)
}
s.Value = SampleValue(value)
return nil

View File

@ -20,14 +20,12 @@ import (
"strconv"
)
var (
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
// of 0, which is possible to appear in a real SamplePair and thus not
// suitable to signal a non-existing SamplePair.
ZeroSamplePair = SamplePair{Timestamp: Earliest}
)
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
// of 0, which is possible to appear in a real SamplePair and thus not
// suitable to signal a non-existing SamplePair.
var ZeroSamplePair = SamplePair{Timestamp: Earliest}
// A SampleValue is a representation of a value for a given sample at a given
// time.

View File

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.53.3
GOLANGCI_LINT_VERSION ?= v1.54.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View File

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build netbsd || openbsd || solaris || windows || nostatfs
// +build netbsd openbsd solaris windows nostatfs
//go:build !freebsd && !linux
// +build !freebsd,!linux
package procfs

View File

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs
// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs
//go:build freebsd || linux
// +build freebsd linux
package procfs

View File

@ -44,6 +44,14 @@ const (
fieldTransport11TCPLen = 13
fieldTransport11UDPLen = 10
// kernel version >= 4.14 MaxLen
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
fieldTransport11RDMAMaxLen = 28
// kernel version <= 4.2 MinLen
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
fieldTransport11RDMAMinLen = 20
)
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
@ -233,6 +241,33 @@ type NFSTransportStats struct {
// A running counter, incremented on each request as the current size of the
// pending queue.
CumulativePendingQueue uint64
// Stats below only available with stat version 1.1.
// Transport over RDMA
// accessed when sending a call
ReadChunkCount uint64
WriteChunkCount uint64
ReplyChunkCount uint64
TotalRdmaRequest uint64
// rarely accessed error counters
PullupCopyCount uint64
HardwayRegisterCount uint64
FailedMarshalCount uint64
BadReplyCount uint64
MrsRecovered uint64
MrsOrphaned uint64
MrsAllocated uint64
EmptySendctxQ uint64
// accessed when receiving a reply
TotalRdmaReply uint64
FixupCopyCount uint64
ReplyWaitsForSend uint64
LocalInvNeeded uint64
NomsgCallCount uint64
BcallCount uint64
}
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
@ -587,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
expectedLength = fieldTransport11TCPLen
} else if protocol == "udp" {
expectedLength = fieldTransport11UDPLen
} else if protocol == "rdma" {
expectedLength = fieldTransport11RDMAMinLen
} else {
return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
}
if len(ss) != expectedLength {
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss)
if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
(protocol == "rdma" && len(ss) < expectedLength) {
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
}
default:
return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion)
return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
@ -604,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
// only v1.0 stats are present.
// See: https://github.com/prometheus/node_exporter/issues/571.
ns := make([]uint64, fieldTransport11TCPLen)
//
// Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
for i, s := range ss {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
@ -622,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// we set them to 0 here.
if protocol == "udp" {
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
} else if protocol == "tcp" {
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
} else if protocol == "rdma" {
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
}
return &NFSTransportStats{
// NFS xprt over tcp or udp
Protocol: protocol,
Port: ns[0],
Bind: ns[1],
@ -636,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
BadTransactionIDs: ns[7],
CumulativeActiveRequests: ns[8],
CumulativeBacklog: ns[9],
MaximumRPCSlotsUsed: ns[10],
CumulativeSendingQueue: ns[11],
CumulativePendingQueue: ns[12],
// NFS xprt over tcp or udp
// And statVersion 1.1
MaximumRPCSlotsUsed: ns[10],
CumulativeSendingQueue: ns[11],
CumulativePendingQueue: ns[12],
// NFS xprt over rdma
// And stat Version 1.1
ReadChunkCount: ns[13],
WriteChunkCount: ns[14],
ReplyChunkCount: ns[15],
TotalRdmaRequest: ns[16],
PullupCopyCount: ns[17],
HardwayRegisterCount: ns[18],
FailedMarshalCount: ns[19],
BadReplyCount: ns[20],
MrsRecovered: ns[21],
MrsOrphaned: ns[22],
MrsAllocated: ns[23],
EmptySendctxQ: ns[24],
TotalRdmaReply: ns[25],
FixupCopyCount: ns[26],
ReplyWaitsForSend: ns[27],
LocalInvNeeded: ns[28],
NomsgCallCount: ns[29],
BcallCount: ns[30],
}, nil
}

View File

@ -26,6 +26,7 @@ var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
rIno = regexp.MustCompile(`^ino:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
)
@ -40,6 +41,8 @@ type ProcFDInfo struct {
Flags string
// Mount point ID
MntID string
// Inode number
Ino string
// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
InotifyInfos []InotifyInfo
}
@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
return nil, err
}
var text, pos, flags, mntid string
var text, pos, flags, mntid, ino string
var inotify []InotifyInfo
scanner := bufio.NewScanner(bytes.NewReader(data))
@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
flags = rFlags.FindStringSubmatch(text)[1]
} else if rMntID.MatchString(text) {
mntid = rMntID.FindStringSubmatch(text)[1]
} else if rIno.MatchString(text) {
ino = rIno.FindStringSubmatch(text)[1]
} else if rInotify.MatchString(text) {
newInotify, err := parseInotifyInfo(text)
if err != nil {
@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
Pos: pos,
Flags: flags,
MntID: mntid,
Ino: ino,
InotifyInfos: inotify,
}

View File

@ -63,17 +63,17 @@ type ProcMap struct {
// parseDevice parses the device token of a line and converts it to a dev_t
// (mkdev) like structure.
func parseDevice(s string) (uint64, error) {
toks := strings.Split(s, ":")
if len(toks) < 2 {
return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks))
i := strings.Index(s, ":")
if i == -1 {
return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
}
major, err := strconv.ParseUint(toks[0], 16, 0)
major, err := strconv.ParseUint(s[0:i], 16, 0)
if err != nil {
return 0, err
}
minor, err := strconv.ParseUint(toks[1], 16, 0)
minor, err := strconv.ParseUint(s[i+1:], 16, 0)
if err != nil {
return 0, err
}
@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) {
// parseAddresses parses the start-end address.
func parseAddresses(s string) (uintptr, uintptr, error) {
toks := strings.Split(s, "-")
if len(toks) < 2 {
return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse)
idx := strings.Index(s, "-")
if idx == -1 {
return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
}
saddr, err := parseAddress(toks[0])
saddr, err := parseAddress(s[0:idx])
if err != nil {
return 0, 0, err
}
eaddr, err := parseAddress(toks[1])
eaddr, err := parseAddress(s[idx+1:])
if err != nil {
return 0, 0, err
}

View File

@ -23,7 +23,7 @@ import (
)
// ProcStatus provides status information about the process,
// read from /proc/[pid]/stat.
// read from /proc/[pid]/status.
type ProcStatus struct {
// The process ID.
PID int
@ -32,6 +32,8 @@ type ProcStatus struct {
// Thread group ID.
TGID int
// List of Pid namespace.
NSpids []uint64
// Peak virtual memory size.
VmPeak uint64 // nolint:revive
@ -127,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
copy(s.UIDs[:], strings.Split(vString, "\t"))
case "Gid":
copy(s.GIDs[:], strings.Split(vString, "\t"))
case "NSpid":
s.NSpids = calcNSPidsList(vString)
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":
@ -200,3 +204,18 @@ func calcCpusAllowedList(cpuString string) []uint64 {
sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
return g
}
func calcNSPidsList(nspidsString string) []uint64 {
s := strings.Split(nspidsString, " ")
var nspids []uint64
for _, nspid := range s {
nspid, _ := strconv.ParseUint(nspid, 10, 64)
if nspid == 0 {
continue
}
nspids = append(nspids, nspid)
}
return nspids
}

3
vendor/github.com/xlab/treeprint/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
vendor/**
.idea
**/**.iml

View File

@ -16,28 +16,28 @@ type Value interface{}
type MetaValue interface{}
// NodeVisitor function type for iterating over nodes
type NodeVisitor func(item *node)
type NodeVisitor func(item *Node)
// Tree represents a tree structure with leaf-nodes and branch-nodes.
type Tree interface {
// AddNode adds a new node to a branch.
// AddNode adds a new Node to a branch.
AddNode(v Value) Tree
// AddMetaNode adds a new node with meta value provided to a branch.
// AddMetaNode adds a new Node with meta value provided to a branch.
AddMetaNode(meta MetaValue, v Value) Tree
// AddBranch adds a new branch node (a level deeper).
// AddBranch adds a new branch Node (a level deeper).
AddBranch(v Value) Tree
// AddMetaBranch adds a new branch node (a level deeper) with meta value provided.
// AddMetaBranch adds a new branch Node (a level deeper) with meta value provided.
AddMetaBranch(meta MetaValue, v Value) Tree
// Branch converts a leaf-node to a branch-node,
// applying this on a branch-node does no effect.
// Branch converts a leaf-Node to a branch-Node,
// applying this on a branch-Node does no effect.
Branch() Tree
// FindByMeta finds a node whose meta value matches the provided one by reflect.DeepEqual,
// FindByMeta finds a Node whose meta value matches the provided one by reflect.DeepEqual,
// returns nil if not found.
FindByMeta(meta MetaValue) Tree
// FindByValue finds a node whose value matches the provided one by reflect.DeepEqual,
// FindByValue finds a Node whose value matches the provided one by reflect.DeepEqual,
// returns nil if not found.
FindByValue(value Value) Tree
// returns the last node of a tree
// returns the last Node of a tree
FindLastNode() Tree
// String renders the tree or subtree as a string.
String() string
@ -48,19 +48,19 @@ type Tree interface {
SetMetaValue(meta MetaValue)
// VisitAll iterates over the tree, branches and nodes.
// If need to iterate over the whole tree, use the root node.
// If need to iterate over the whole tree, use the root Node.
// Note this method uses a breadth-first approach.
VisitAll(fn NodeVisitor)
}
type node struct {
Root *node
type Node struct {
Root *Node
Meta MetaValue
Value Value
Nodes []*node
Nodes []*Node
}
func (n *node) FindLastNode() Tree {
func (n *Node) FindLastNode() Tree {
ns := n.Nodes
if len(ns) == 0 {
return nil
@ -68,16 +68,16 @@ func (n *node) FindLastNode() Tree {
return ns[len(ns)-1]
}
func (n *node) AddNode(v Value) Tree {
n.Nodes = append(n.Nodes, &node{
func (n *Node) AddNode(v Value) Tree {
n.Nodes = append(n.Nodes, &Node{
Root: n,
Value: v,
})
return n
}
func (n *node) AddMetaNode(meta MetaValue, v Value) Tree {
n.Nodes = append(n.Nodes, &node{
func (n *Node) AddMetaNode(meta MetaValue, v Value) Tree {
n.Nodes = append(n.Nodes, &Node{
Root: n,
Meta: meta,
Value: v,
@ -85,8 +85,8 @@ func (n *node) AddMetaNode(meta MetaValue, v Value) Tree {
return n
}
func (n *node) AddBranch(v Value) Tree {
branch := &node{
func (n *Node) AddBranch(v Value) Tree {
branch := &Node{
Root: n,
Value: v,
}
@ -94,8 +94,8 @@ func (n *node) AddBranch(v Value) Tree {
return branch
}
func (n *node) AddMetaBranch(meta MetaValue, v Value) Tree {
branch := &node{
func (n *Node) AddMetaBranch(meta MetaValue, v Value) Tree {
branch := &Node{
Root: n,
Meta: meta,
Value: v,
@ -104,12 +104,12 @@ func (n *node) AddMetaBranch(meta MetaValue, v Value) Tree {
return branch
}
func (n *node) Branch() Tree {
func (n *Node) Branch() Tree {
n.Root = nil
return n
}
func (n *node) FindByMeta(meta MetaValue) Tree {
func (n *Node) FindByMeta(meta MetaValue) Tree {
for _, node := range n.Nodes {
if reflect.DeepEqual(node.Meta, meta) {
return node
@ -121,7 +121,7 @@ func (n *node) FindByMeta(meta MetaValue) Tree {
return nil
}
func (n *node) FindByValue(value Value) Tree {
func (n *Node) FindByValue(value Value) Tree {
for _, node := range n.Nodes {
if reflect.DeepEqual(node.Value, value) {
return node
@ -133,7 +133,7 @@ func (n *node) FindByValue(value Value) Tree {
return nil
}
func (n *node) Bytes() []byte {
func (n *Node) Bytes() []byte {
buf := new(bytes.Buffer)
level := 0
var levelsEnded []int
@ -158,19 +158,19 @@ func (n *node) Bytes() []byte {
return buf.Bytes()
}
func (n *node) String() string {
func (n *Node) String() string {
return string(n.Bytes())
}
func (n *node) SetValue(value Value) {
func (n *Node) SetValue(value Value) {
n.Value = value
}
func (n *node) SetMetaValue(meta MetaValue) {
func (n *Node) SetMetaValue(meta MetaValue) {
n.Meta = meta
}
func (n *node) VisitAll(fn NodeVisitor) {
func (n *Node) VisitAll(fn NodeVisitor) {
for _, node := range n.Nodes {
fn(node)
@ -182,7 +182,7 @@ func (n *node) VisitAll(fn NodeVisitor) {
}
func printNodes(wr io.Writer,
level int, levelsEnded []int, nodes []*node) {
level int, levelsEnded []int, nodes []*Node) {
for i, node := range nodes {
edge := EdgeTypeMid
@ -198,7 +198,7 @@ func printNodes(wr io.Writer,
}
func printValues(wr io.Writer,
level int, levelsEnded []int, edge EdgeType, node *node) {
level int, levelsEnded []int, edge EdgeType, node *Node) {
for i := 0; i < level; i++ {
if isEnded(levelsEnded, i) {
@ -227,7 +227,7 @@ func isEnded(levelsEnded []int, level int) bool {
return false
}
func renderValue(level int, node *node) Value {
func renderValue(level int, node *Node) Value {
lines := strings.Split(fmt.Sprintf("%v", node.Value), "\n")
// If value does not contain multiple lines, return itself.
@ -248,10 +248,10 @@ func renderValue(level int, node *node) Value {
// padding returns a padding for the multiline values with correctly placed link edges.
// It is generated by traversing the tree upwards (from leaf to the root of the tree)
// and, on each level, checking if the node the last one of its siblings.
// If a node is the last one, the padding on that level should be empty (there's nothing to link to below it).
// If a node is not the last one, the padding on that level should be the link edge so the sibling below is correctly connected.
func padding(level int, node *node) string {
// and, on each level, checking if the Node the last one of its siblings.
// If a Node is the last one, the padding on that level should be empty (there's nothing to link to below it).
// If a Node is not the last one, the padding on that level should be the link edge so the sibling below is correctly connected.
func padding(level int, node *Node) string {
links := make([]string, level+1)
for node.Root != nil {
@ -267,8 +267,8 @@ func padding(level int, node *node) string {
return strings.Join(links, "")
}
// isLast checks if the node is the last one in the slice of its parent children
func isLast(n *node) bool {
// isLast checks if the Node is the last one in the slice of its parent children
func isLast(n *Node) bool {
return n == n.Root.FindLastNode()
}
@ -285,10 +285,10 @@ var IndentSize = 3
// New Generates new tree
func New() Tree {
return &node{Value: "."}
return &Node{Value: "."}
}
// NewWithRoot Generates new tree with the given root value
func NewWithRoot(root Value) Tree {
return &node{Value: root}
return &Node{Value: root}
}

View File

@ -746,7 +746,7 @@ func (r *resolver) expr(e syntax.Expr) {
}
x := binop.X.(*syntax.Ident)
if seenName[x.Name] {
r.errorf(x.NamePos, "keyword argument %s repeated", x.Name)
r.errorf(x.NamePos, "keyword argument %q is repeated", x.Name)
} else {
if seenName == nil {
seenName = make(map[string]bool)

View File

@ -51,8 +51,13 @@ type Thread struct {
// The default behavior is to call thread.Cancel("too many steps").
OnMaxSteps func(thread *Thread)
// steps counts abstract computation steps executed by this thread.
steps, maxSteps uint64
// Steps a count of abstract computation steps executed
// by this thread. It is incremented by the interpreter. It may be used
// as a measure of the approximate cost of Starlark execution, by
// computing the difference in its value before and after a computation.
//
// The precise meaning of "step" is not specified and may change.
Steps, maxSteps uint64
// cancelReason records the reason from the first call to Cancel.
cancelReason *string
@ -65,14 +70,9 @@ type Thread struct {
proftime time.Duration
}
// ExecutionSteps returns a count of abstract computation steps executed
// by this thread. It is incremented by the interpreter. It may be used
// as a measure of the approximate cost of Starlark execution, by
// computing the difference in its value before and after a computation.
//
// The precise meaning of "step" is not specified and may change.
// ExecutionSteps returns the current value of Steps.
func (thread *Thread) ExecutionSteps() uint64 {
return thread.steps
return thread.Steps
}
// SetMaxExecutionSteps sets a limit on the number of Starlark
@ -84,12 +84,20 @@ func (thread *Thread) SetMaxExecutionSteps(max uint64) {
thread.maxSteps = max
}
// Uncancel resets the cancellation state.
//
// Unlike most methods of Thread, it is safe to call Uncancel from any
// goroutine, even if the thread is actively executing.
func (thread *Thread) Uncancel() {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&thread.cancelReason)), nil)
}
// Cancel causes execution of Starlark code in the specified thread to
// promptly fail with an EvalError that includes the specified reason.
// There may be a delay before the interpreter observes the cancellation
// if the thread is currently in a call to a built-in function.
//
// Cancellation cannot be undone.
// Call [Uncancel] to reset the cancellation state.
//
// Unlike most methods of Thread, it is safe to call Cancel from any
// goroutine, even if the thread is actively executing.
@ -1062,10 +1070,10 @@ func Binary(op syntax.Token, x, y Value) (Value, error) {
if x.Len() > y.Len() {
x, y = y, x // opt: range over smaller set
}
for _, xelem := range x.elems() {
for xe := x.ht.head; xe != nil; xe = xe.next {
// Has, Insert cannot fail here.
if found, _ := y.Has(xelem); found {
set.Insert(xelem)
if found, _ := y.Has(xe.key); found {
set.Insert(xe.key)
}
}
return set, nil
@ -1081,14 +1089,14 @@ func Binary(op syntax.Token, x, y Value) (Value, error) {
case *Set: // symmetric difference
if y, ok := y.(*Set); ok {
set := new(Set)
for _, xelem := range x.elems() {
if found, _ := y.Has(xelem); !found {
set.Insert(xelem)
for xe := x.ht.head; xe != nil; xe = xe.next {
if found, _ := y.Has(xe.key); !found {
set.Insert(xe.key)
}
}
for _, yelem := range y.elems() {
if found, _ := x.Has(yelem); !found {
set.Insert(yelem)
for ye := y.ht.head; ye != nil; ye = ye.next {
if found, _ := x.Has(ye.key); !found {
set.Insert(ye.key)
}
}
return set, nil
@ -1225,8 +1233,22 @@ func Call(thread *Thread, fn Value, args Tuple, kwargs []Tuple) (Value, error) {
fr.callable = c
thread.beginProfSpan()
// Use defer to ensure that panics from built-ins
// pass through the interpreter without leaving
// it in a bad state.
defer func() {
thread.endProfSpan()
// clear out any references
// TODO(adonovan): opt: zero fr.Locals and
// reuse it if it is large enough.
*fr = frame{}
thread.stack = thread.stack[:len(thread.stack)-1] // pop
}()
result, err := c.CallInternal(thread, args, kwargs)
thread.endProfSpan()
// Sanity check: nil is not a valid Starlark value.
if result == nil && err == nil {
@ -1240,9 +1262,6 @@ func Call(thread *Thread, fn Value, args Tuple, kwargs []Tuple) (Value, error) {
}
}
*fr = frame{} // clear out any references
thread.stack = thread.stack[:len(thread.stack)-1] // pop
return result, err
}

View File

@ -66,16 +66,9 @@ func (ht *hashtable) init(size int) {
func (ht *hashtable) freeze() {
if !ht.frozen {
ht.frozen = true
for i := range ht.table {
for p := &ht.table[i]; p != nil; p = p.next {
for i := range p.entries {
e := &p.entries[i]
if e.hash != 0 {
e.key.Freeze()
e.value.Freeze()
}
}
}
for e := ht.head; e != nil; e = e.next {
e.key.Freeze()
e.value.Freeze()
}
}
}
@ -162,13 +155,12 @@ func overloaded(elems, buckets int) bool {
func (ht *hashtable) grow() {
// Double the number of buckets and rehash.
// TODO(adonovan): opt:
// - avoid reentrant calls to ht.insert, and specialize it.
// e.g. we know the calls to Equals will return false since
// there are no duplicates among the old keys.
// - saving the entire hash in the bucket would avoid the need to
// recompute the hash.
// - save the old buckets on a free list.
//
// Even though this makes reentrant calls to ht.insert,
// calls Equals unnecessarily (since there can't be duplicate keys),
// and recomputes the hash unnecessarily, the gains from
// avoiding these steps were found to be too small to justify
// the extra logic: -2% on hashtable benchmark.
ht.table = make([]bucket, len(ht.table)<<1)
oldhead := ht.head
ht.head = nil
@ -372,6 +364,8 @@ func (it *keyIterator) Done() {
}
}
// TODO(adonovan): use go1.19's maphash.String.
// hashString computes the hash of s.
func hashString(s string) uint32 {
if len(s) >= 12 {

View File

@ -190,20 +190,29 @@ func (i Int) Hash() (uint32, error) {
}
return 12582917 * uint32(lo+3), nil
}
func (x Int) CompareSameType(op syntax.Token, v Value, depth int) (bool, error) {
// Required by the TotallyOrdered interface
func (x Int) Cmp(v Value, depth int) (int, error) {
y := v.(Int)
xSmall, xBig := x.get()
ySmall, yBig := y.get()
if xBig != nil || yBig != nil {
return threeway(op, x.bigInt().Cmp(y.bigInt())), nil
return x.bigInt().Cmp(y.bigInt()), nil
}
return threeway(op, signum64(xSmall-ySmall)), nil
return signum64(xSmall - ySmall), nil // safe: int32 operands
}
// Float returns the float value nearest i.
func (i Int) Float() Float {
iSmall, iBig := i.get()
if iBig != nil {
// Fast path for hardware int-to-float conversions.
if iBig.IsUint64() {
return Float(iBig.Uint64())
} else if iBig.IsInt64() {
return Float(iBig.Int64())
}
f, _ := new(big.Float).SetInt(iBig).Float64()
return Float(f)
}

View File

@ -81,14 +81,25 @@ func (fn *Function) CallInternal(thread *Thread, args Tuple, kwargs []Tuple) (Va
var iterstack []Iterator // stack of active iterators
// Use defer so that application panics can pass through
// interpreter without leaving thread in a bad state.
defer func() {
// ITERPOP the rest of the iterator stack.
for _, iter := range iterstack {
iter.Done()
}
fr.locals = nil
}()
sp := 0
var pc uint32
var result Value
code := f.Code
loop:
for {
thread.steps++
if thread.steps >= thread.maxSteps {
thread.Steps++
if thread.Steps >= thread.maxSteps {
if thread.OnMaxSteps != nil {
thread.OnMaxSteps(thread)
} else {
@ -646,14 +657,7 @@ loop:
break loop
}
}
// ITERPOP the rest of the iterator stack.
for _, iter := range iterstack {
iter.Done()
}
fr.locals = nil
// (deferred cleanup runs here)
return result, err
}

View File

@ -300,7 +300,9 @@ func unpackOneArg(v Value, ptr interface{}) error {
// Attempt to call Value.Type method.
func() {
defer func() { recover() }()
paramType = paramVar.MethodByName("Type").Call(nil)[0].String()
if typer, _ := paramVar.Interface().(interface{ Type() string }); typer != nil {
paramType = typer.Type()
}
}()
return fmt.Errorf("got %s, want %s", v.Type(), paramType)
}

View File

@ -7,35 +7,35 @@
// Starlark values are represented by the Value interface.
// The following built-in Value types are known to the evaluator:
//
// NoneType -- NoneType
// Bool -- bool
// Bytes -- bytes
// Int -- int
// Float -- float
// String -- string
// *List -- list
// Tuple -- tuple
// *Dict -- dict
// *Set -- set
// *Function -- function (implemented in Starlark)
// *Builtin -- builtin_function_or_method (function or method implemented in Go)
// NoneType -- NoneType
// Bool -- bool
// Bytes -- bytes
// Int -- int
// Float -- float
// String -- string
// *List -- list
// Tuple -- tuple
// *Dict -- dict
// *Set -- set
// *Function -- function (implemented in Starlark)
// *Builtin -- builtin_function_or_method (function or method implemented in Go)
//
// Client applications may define new data types that satisfy at least
// the Value interface. Such types may provide additional operations by
// implementing any of these optional interfaces:
//
// Callable -- value is callable like a function
// Comparable -- value defines its own comparison operations
// Iterable -- value is iterable using 'for' loops
// Sequence -- value is iterable sequence of known length
// Indexable -- value is sequence with efficient random access
// Mapping -- value maps from keys to values, like a dictionary
// HasBinary -- value defines binary operations such as * and +
// HasAttrs -- value has readable fields or methods x.f
// HasSetField -- value has settable fields x.f
// HasSetIndex -- value supports element update using x[i]=y
// HasSetKey -- value supports map update using x[k]=v
// HasUnary -- value defines unary operations such as + and -
// Callable -- value is callable like a function
// Comparable -- value defines its own comparison operations
// Iterable -- value is iterable using 'for' loops
// Sequence -- value is iterable sequence of known length
// Indexable -- value is sequence with efficient random access
// Mapping -- value maps from keys to values, like a dictionary
// HasBinary -- value defines binary operations such as * and +
// HasAttrs -- value has readable fields or methods x.f
// HasSetField -- value has settable fields x.f
// HasSetIndex -- value supports element update using x[i]=y
// HasSetKey -- value supports map update using x[k]=v
// HasUnary -- value defines unary operations such as + and -
//
// Client applications may also define domain-specific functions in Go
// and make them available to Starlark programs. Use NewBuiltin to
@ -63,7 +63,6 @@
// through Starlark code and into callbacks. When evaluation fails it
// returns an EvalError from which the application may obtain a
// backtrace of active Starlark calls.
//
package starlark // import "go.starlark.net/starlark"
// This file defines the data types of Starlark and their basic operations.
@ -132,15 +131,41 @@ type Comparable interface {
CompareSameType(op syntax.Token, y Value, depth int) (bool, error)
}
// A TotallyOrdered is a type whose values form a total order:
// if x and y are of the same TotallyOrdered type, then x must be less than y,
// greater than y, or equal to y.
//
// It is simpler than Comparable and should be preferred in new code,
// but if a type implements both interfaces, Comparable takes precedence.
type TotallyOrdered interface {
Value
// Cmp compares two values x and y of the same totally ordered type.
// It returns negative if x < y, positive if x > y, and zero if the values are equal.
//
// Implementations that recursively compare subcomponents of
// the value should use the CompareDepth function, not Cmp, to
// avoid infinite recursion on cyclic structures.
//
// The depth parameter is used to bound comparisons of cyclic
// data structures. Implementations should decrement depth
// before calling CompareDepth and should return an error if depth
// < 1.
//
// Client code should not call this method. Instead, use the
// standalone Compare or Equals functions, which are defined for
// all pairs of operands.
Cmp(y Value, depth int) (int, error)
}
var (
_ Comparable = Int{}
_ Comparable = False
_ Comparable = Float(0)
_ Comparable = String("")
_ Comparable = (*Dict)(nil)
_ Comparable = (*List)(nil)
_ Comparable = Tuple(nil)
_ Comparable = (*Set)(nil)
_ TotallyOrdered = Int{}
_ TotallyOrdered = Float(0)
_ Comparable = False
_ Comparable = String("")
_ Comparable = (*Dict)(nil)
_ Comparable = (*List)(nil)
_ Comparable = Tuple(nil)
_ Comparable = (*Set)(nil)
)
// A Callable value f may be the operand of a function call, f(x).
@ -229,13 +254,12 @@ var (
//
// Example usage:
//
// iter := iterable.Iterator()
// iter := iterable.Iterator()
// defer iter.Done()
// var x Value
// for iter.Next(&x) {
// ...
// }
//
type Iterator interface {
// If the iterator is exhausted, Next returns false.
// Otherwise it sets *p to the current element of the sequence,
@ -276,7 +300,7 @@ type HasSetKey interface {
var _ HasSetKey = (*Dict)(nil)
// A HasBinary value may be used as either operand of these binary operators:
// + - * / // % in not in | & ^ << >>
// + - * / // % in not in | & ^ << >>
//
// The Side argument indicates whether the receiver is the left or right operand.
//
@ -296,7 +320,7 @@ const (
)
// A HasUnary value may be used as the operand of these unary operators:
// + - ~
// + - ~
//
// An implementation may decline to handle an operation by returning (nil, nil).
// For this reason, clients should always call the standalone Unary(op, x)
@ -441,9 +465,9 @@ func isFinite(f float64) bool {
return math.Abs(f) <= math.MaxFloat64
}
func (x Float) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) {
func (x Float) Cmp(y_ Value, depth int) (int, error) {
y := y_.(Float)
return threeway(op, floatCmp(x, y)), nil
return floatCmp(x, y), nil
}
// floatCmp performs a three-valued comparison on floats,
@ -711,6 +735,34 @@ func (fn *Function) Param(i int) (string, syntax.Position) {
id := fn.funcode.Locals[i]
return id.Name, id.Pos
}
// ParamDefault returns the default value of the specified parameter
// (0 <= i < NumParams()), or nil if the parameter is not optional.
func (fn *Function) ParamDefault(i int) Value {
if i < 0 || i >= fn.NumParams() {
panic(i)
}
// fn.defaults omits all required params up to the first optional param. It
// also does not include *args or **kwargs at the end.
firstOptIdx := fn.NumParams() - len(fn.defaults)
if fn.HasVarargs() {
firstOptIdx--
}
if fn.HasKwargs() {
firstOptIdx--
}
if i < firstOptIdx || i >= firstOptIdx+len(fn.defaults) {
return nil
}
dflt := fn.defaults[i-firstOptIdx]
if _, ok := dflt.(mandatory); ok {
return nil
}
return dflt
}
func (fn *Function) HasVarargs() bool { return fn.funcode.HasVarargs }
func (fn *Function) HasKwargs() bool { return fn.funcode.HasKwargs }
@ -754,13 +806,12 @@ func NewBuiltin(name string, fn func(thread *Thread, fn *Builtin, args Tuple, kw
// In the example below, the value of f is the string.index
// built-in method bound to the receiver value "abc":
//
// f = "abc".index; f("a"); f("b")
// f = "abc".index; f("a"); f("b")
//
// In the common case, the receiver is bound only during the call,
// but this still results in the creation of a temporary method closure:
//
// "abc".index("a")
//
// "abc".index("a")
func (b *Builtin) BindReceiver(recv Value) *Builtin {
return &Builtin{name: b.name, fn: b.fn, recv: recv}
}
@ -1065,7 +1116,6 @@ func (s *Set) Len() int { return int(s.ht.len) }
func (s *Set) Iterate() Iterator { return s.ht.iterate() }
func (s *Set) String() string { return toString(s) }
func (s *Set) Type() string { return "set" }
func (s *Set) elems() []Value { return s.ht.keys() }
func (s *Set) Freeze() { s.ht.freeze() }
func (s *Set) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: set") }
func (s *Set) Truth() Bool { return s.Len() > 0 }
@ -1091,8 +1141,8 @@ func setsEqual(x, y *Set, depth int) (bool, error) {
if x.Len() != y.Len() {
return false, nil
}
for _, elem := range x.elems() {
if found, _ := y.Has(elem); !found {
for e := x.ht.head; e != nil; e = e.next {
if found, _ := y.Has(e.key); !found {
return false, nil
}
}
@ -1101,8 +1151,8 @@ func setsEqual(x, y *Set, depth int) (bool, error) {
func (s *Set) Union(iter Iterator) (Value, error) {
set := new(Set)
for _, elem := range s.elems() {
set.Insert(elem) // can't fail
for e := s.ht.head; e != nil; e = e.next {
set.Insert(e.key) // can't fail
}
var x Value
for iter.Next(&x) {
@ -1206,11 +1256,11 @@ func writeValue(out *strings.Builder, x Value, path []Value) {
case *Set:
out.WriteString("set([")
for i, elem := range x.elems() {
if i > 0 {
for e := x.ht.head; e != nil; e = e.next {
if e != x.ht.head {
out.WriteString(", ")
}
writeValue(out, elem, path)
writeValue(out, e.key, path)
}
out.WriteString("])")
@ -1275,6 +1325,14 @@ func CompareDepth(op syntax.Token, x, y Value, depth int) (bool, error) {
return xcomp.CompareSameType(op, y, depth)
}
if xcomp, ok := x.(TotallyOrdered); ok {
t, err := xcomp.Cmp(y, depth)
if err != nil {
return false, err
}
return threeway(op, t), nil
}
// use identity comparison
switch op {
case syntax.EQL:

View File

@ -132,11 +132,12 @@ func (s *Struct) ToStringDict(d starlark.StringDict) {
func (s *Struct) String() string {
buf := new(strings.Builder)
if s.constructor == Default {
switch constructor := s.constructor.(type) {
case starlark.String:
// NB: The Java implementation always prints struct
// even for Bazel provider instances.
buf.WriteString("struct") // avoid String()'s quotation
} else {
buf.WriteString(constructor.GoString()) // avoid String()'s quotation
default:
buf.WriteString(s.constructor.String())
}
buf.WriteByte('(')

View File

@ -119,9 +119,7 @@ func Walk(n Node, f func(Node) bool) {
case *DictExpr:
for _, entry := range n.List {
entry := entry.(*DictEntry)
Walk(entry.Key, f)
Walk(entry.Value, f)
Walk(entry, f)
}
case *UnaryExpr:

Some files were not shown because too many files have changed in this diff Show More