upgrade to latest dependencies (#1912)

bumping google.golang.org/genproto/googleapis/rpc 995d672...1f4bbc5:
  > 1f4bbc5 chore(all): auto-regenerate .pb.go files (# 1083)
  > ef43131 chore(all): auto-regenerate .pb.go files (# 1082)
  > 6c6643b chore(all): update all (# 1078)
  > a9fa171 fix(dataform): Remove broken aliases (# 1080)
  > 9144d31 fix(analytics): Remove broken aliases (# 1079)
  > 35c7eff chore(all): update all (# 1077)
  > 50ed04b chore(all): update all to v4 (# 1076)
  > 0d6446b chore(all): update all (# 1075)
bumping github.com/google/uuid 4d47f8e...0f11ee6:
  > 0f11ee6 chore(master): release 1.6.0 (# 151)
  > 16939da chore(tests):  add strict monotonicity test case for uuid v7. (# 154)
  > 016b199 fix: fix typo in version 7 uuid documentation (# 153)
  > 1d8b6ea ci: set token permissions to github workflows (# 143)
  > a2b2b32 fix: Monotonicity in UUIDv7 (# 150)
  > c58770e feat: add Max UUID constant (# 149)
bumping google.golang.org/genproto 989df2b...a9fa171:
  > a9fa171 fix(dataform): Remove broken aliases (# 1080)
  > 9144d31 fix(analytics): Remove broken aliases (# 1079)
  > 35c7eff chore(all): update all (# 1077)
  > 50ed04b chore(all): update all to v4 (# 1076)
  > 0d6446b chore(all): update all (# 1075)
  > 995d672 chore(all): update all (# 1073)
bumping golang.org/x/sys 0829ab1...914b96c:
  > 914b96c windows: support ill-formed UTF-16 in UTF16PtrToString
  > 511ec84 Revert "windows: support nil done parameter in ReadFile and WriteFile"
  > 628365d windows: support nil done parameter in ReadFile and WriteFile
  > bef1bd8 unix: move mksyscall regexp to package level variables
  > 5710a32 unix/linux: update Linux kernel to 6.7
  > b3ce6a3 windows: build env_windows_test.go only go Go 1.21 and above
  > c3fa2b8 windows: fix parsing of non-ASCII entries in token.Environ
  > f69d32a unix: in TestDirent, make as many ReadDirent calls as are needed
  > 0d9df52 unix: add more SECCOMP constants
bumping knative.dev/eventing fd95228...97e91c5:
  > 97e91c5 [main] Update community files (# 7675)
  > eac7384 Wait for events with poll interval after finished event received (# 7668)
  > ba77c3d [main] Update community files (# 7672)
  > 7d350ea [main] Upgrade to latest dependencies (# 7669)
  > 185fbf4 [main] Update community files (# 7671)
  > 03ed4ab wathola receiver, annotate span by step number (# 7667)
  > 6f2eaed Set default value for scale cache config values (# 7666)
  > bcb6100 e2e: nodeselector in apiserversauce (# 7627)
  > 836f4da feat: replace yaml merge tags (# 7662)
  > 5500bed Cache statefulset scale update/get requests (# 7651)
  > 7b975fc Update KinD for e2e tests to 0.21.0 (# 7656)
  > 96863ba [main] Upgrade to latest dependencies (# 7657)
  > 469d0ac [main] Upgrade to latest dependencies (# 7654)
  > 9d892bf Remove empty labels (# 7648)
  > d258e7d Create teardown script for local development (# 7642)
  > 62c74c1 [main] Update community files (# 7646)
  > 702004a Tiny trustbundle improvements (# 7644)
  > 02bde54 Add helm as one of the development requirement (# 7643)
  > 56ee9b2 Add TLS tests for Trigger and Subscription dead letter sinks (# 7636)
  > ff52881 Use filtered informer to watch OIDC service accounts (# 7527)
  > 54f3952 [main] Upgrade to latest dependencies (# 7641)
  > 3391d1e [main] Upgrade to latest dependencies (# 7639)
  > c6105d6 Update knative.dev/hack to latest main (# 7637)
  > 0340aa5 Fix mtbroker-filter to proxy response headers (# 7614)
  > e9bf641 Use projected volumes for sinkbinding trust bundles (# 7630)
  > bcba98a Move OIDC tests to test/rekt (# 7622)
  > ab47824 support: nodeselector in apiserversource (# 7584)
  > cdb8638 fix: exact -> atleast (# 7624)
  > 42efd06 [main] Upgrade to latest dependencies (# 7623)
  > 214f4b3 [main] Upgrade to latest dependencies (# 7616)
  > 9b6c7e2 Generalize description of EventType in CRD. (# 7617)
  > 95b9345 Adding documentation and linking resources for go setup issues with Linux Ubuntu distribution (removed noisy commits) (# 7612)
bumping golang.org/x/term ae94145...353276a:
  > 353276a go.mod: update golang.org/x dependencies
bumping knative.dev/hack f3f03ac...999d7e6:
  > 999d7e6 Update community files (# 367)
  > 6090613 Update community files (# 366)
  > d1067f2 Update community files (# 365)
  > f3881d9 Update community files (# 364)
  > ab9b690 Don't hardcode serving as the repo when setting highest semver (# 361)
  > 45dcf10 Update community files (# 360)
  > 40f0ac2 Update community files (# 359)
bumping github.com/go-logr/logr 8adefbe...dcdc3f2:
  > dcdc3f2 slogr: fix unintended API break in v0.8.0 (# 253)
  > 5d88f52 funcr: Add LogInfoLevel Option to skip logging level in the info log (# 240)
  > 177005d build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0
  > e7f489a build(deps): bump github/codeql-action from 2.22.9 to 3.22.11
  > cf56c3b build(deps): bump actions/setup-go from 4 to 5
  > 2ad296e build(deps): bump github/codeql-action from 2.22.8 to 2.22.9
  > d55b4e2 Merge pull request # 241 from thockin/master
  > c589653 Merge pull request # 245 from thockin/sloghandler_empty_groups
  > 98ee9d9 Clean up slog testing and restore coverage
  > 006d752 Merge pull request # 246 from thockin/slog_context_tests
  > b01bad7 sloghandler: unnamed groups should be inlined
  > b228ba8 Break examples to new file
  > 5153ab2 unit tests with full SlogSink mock implementation
  > 2665157 Add tests for context with slog
  > 6432877 Add benchmarks for slogSink
  > 58f101e Fix bug in slog support carrying the wrong sink
  > f558531 Put slog tests in a helper, move funcr test
  > 6151b2f support a slog.Logger pointer in a context
  > 83dbe72 Fix some lint
  > 1fffd07 move slogr into main package
  > b5e7d9f funcr: Be consistent about quoted
  > 41d36ee build(deps): bump github/codeql-action from 2.22.7 to 2.22.8
  > 44c6ac8 funcr: Be consistent about colons
  > f36813a build(deps): bump github/codeql-action from 2.22.5 to 2.22.7
  > 955d2aa funcr: Be consistent about commas
  > 24eb27c build(deps): bump actions/github-script from 6.4.1 to 7.0.1
  > 8221825 Add SlogSink support to funcr
  > 5b79d78 Merge pull request # 233 from go-logr/dependabot/github_actions/github/codeql-action-2.22.5
  > d95f9b6 Get rid of testSlogSink
  > 424fad7 Merge pull request # 232 from go-logr/dependabot/github_actions/ossf/scorecard-action-2.3.1
  > e602c1d build(deps): bump github/codeql-action from 2.22.4 to 2.22.5
  > cdb930d Minor cleanups in slogr_test (readability)
  > 64618df build(deps): bump ossf/scorecard-action from 2.3.0 to 2.3.1
bumping golang.org/x/oauth2 39adbb7...ebe81ad:
  > ebe81ad go.mod: update golang.org/x dependencies
  > adffd94 google/internal/externalaccount: update serviceAccountImpersonationRE to support universe domain
  > deefa7e google/downscope: add DownscopingConfig.UniverseDomain to support TPC
bumping google.golang.org/genproto/googleapis/api 989df2b...6c6643b:
  > 6c6643b chore(all): update all (# 1078)
  > a9fa171 fix(dataform): Remove broken aliases (# 1080)
  > 9144d31 fix(analytics): Remove broken aliases (# 1079)
  > 35c7eff chore(all): update all (# 1077)
  > 50ed04b chore(all): update all to v4 (# 1076)
  > 0d6446b chore(all): update all (# 1075)
  > 995d672 chore(all): update all (# 1073)
bumping google.golang.org/grpc dbbcf59...8167bc3:
  > 8167bc3 Change version to 1.61.0 (# 6936)
  > 52e2363 test/xds: Use different import path for gRPC Messages (# 6933)
  > 67e50be transport: Remove redundant if in handleGoAway (# 6930)
  > e96f521 alts: Extract AuthInfo after handshake in ALTS e2e test. (# 6931)
  > 987df13 metadata: move FromOutgoingContextRaw() to internal (# 6765)
  > 61eab37 server: block GracefulStop on method handlers and make blocking optional for Stop (# 6922)
  > ddd377f xds/server: fix RDS handling for non-inline route configs (# 6915)
  > 8b455de removing Roots deprecated Subjects field in tests (# 6907)
  > 953d12a alts: Forward-fix of ALTS queuing of handshake requests. (# 6906)
  > 6ce73bf internal/transport: convert `ConnectionError` to `Unavailable` status when writing headers (# 6891)
  > e7e400b deps: apply `make proto` changes (# 6916)
  > 660c394 examples: Fixed the formatting in the Authentication README.md (# 6908)
  > 3a8270f grpc: skip compression of empty messages (# 6842)
  > 7e9d319 vet: remove ignore of CloseNotifier (# 6911)
  > 5a36bb7 fix 'identitiy' typo in error message   (# 6909)
  > a233d9b build(deps): bump the github-actions group with 1 update (# 6904)
  > 6bc1906 xds: add support for mTLS Credentials in xDS bootstrap (# 6757)
  > 71cc0f1 Revert "alts: Queue ALTS handshakes once limit is reached rather than dropping. (# 6884)" (# 6903)
  > 4f03f3f removing deprecated http closenotifier function (# 6886)
  > bb0d32f xds: don't fail channel/server startup when xds creds is specified, but bootstrap is missing certificate providers (# 6848)
  > e20d5ce reflection: rename non-regenerated pb.go files to not be called '.pb.go' (# 6885)
  > adc7685 alts: Queue ALTS handshakes once limit is reached rather than dropping. (# 6884)
  > 33a60a8 internal: use OS defaults for TCP keepalive params in Windows (# 6863)
  > c109241 interop/xds: Increase go log verbosity to 99 so that EDS is logged (# 6860)
  > 02a4e93 orca: use atomic pointer instead of mutex in server metrics recorder to improve performance (# 6799)
  > df02c11 test/kokoro: Use the Kokoro shared install lib from the new repo (# 6859)
  > 444749d alts: Record network latency and pass it to the handshaker service. (# 6851)
  > 45624f0 grpc: eliminate panics in server worker implementation (# 6856)
  > 6e6914a completely delete WatchListener and WatchRouteConfig APIs (# 6849)
  > 836e5de credentials/alts: update handshaker.pb.go (# 6857)
  > 43e4461 Forbid dependabot from performing major version bumps (# 6852)
  > 686fdd8 security/advancedtls: fix test that relies on min TLS version (# 6824)
  > 52baf16 internal: use OS defaults for TCP keepalive params only on unix (# 6841)
  > d050906 build(deps): bump the github-actions group with 3 updates (# 6835)
  > 477bd62 xds/internal/resolver: switch to generic xDS API for LDS/RDS (# 6729)
  > a03c7f1 client: always enable TCP keepalives with OS defaults (# 6834)
  > c2398ce [infra] Hash-pin GitHub Actions, keep them updated with dependabot (# 6815)
  > 0866ce0 grpc: optional interface to provide channel authority (# 6752)
  > 5d7453e client: rework resolver and balancer wrappers to avoid deadlock (# 6804)
  > 93389b7 doc: fix link to the reflection protocol (# 6833)
  > 1b05500 internal/credentials/xds: Add exported comment for HandshakeInfo (# 6823)
  > 737f87b xds/internal/server: cleanup formatting directives in some logs (# 6820)
  > bc16b5f interop: support custom creds flag for stress test client (# 6809)
  > 02ea031 Bugfix for broken import (# 6816)
  > 287c473 Mark old CRL APIs as deprecated (# 6810)
  > 7935c4f resolver_wrapper: remove serializerScheduleLocked; the lock is unnecessary (# 6803)
  > 914ca65 client: further streamlining of Dial (# 6802)
  > 232054a client: remove deprecated WithServiceConfig DialOption (# 6800)
  > 42fdcc4 client: rename balancer and resolver wrapper files to be consistent (# 6801)
  > 59c0aec xDS: Atomically read and write xDS security configuration client side (# 6796)
  > ce3b538 client: simplify initialization and cleanup a bit (# 6798)
  > b98104e buffer & grpcsync: various cleanups and improvements (# 6785)
  > 424db25 credentials: if not set, restrict to TLS v1.2+ and CipherSuites per RFC7540 (# 6776)
  > 40c279a deps: update dependencies for all modules (# 6795)
  > 3cbbe29 reflection: don't serialize placeholders (# 6771)
  > 4a84ce6 Change version to 1.61.0-dev (# 6794)
bumping knative.dev/networking ce0738a...22eb3d0:
  > 22eb3d0 Update community files (# 924)
  > adc5e58 Update community files (# 923)
  > 1e6aa63 Update community files (# 922)
  > 1e7e42d upgrade to latest dependencies (# 921)
  > 66bdffa Update community files (# 920)
  > 060ef7a Clean up unused reconciler and certs code (# 919)
  > bdf2c51 upgrade to latest dependencies (# 918)
  > a874708 upgrade to latest dependencies (# 917)
  > ed5f0fa upgrade to latest dependencies (# 916)
  > a21edb5 Update community files (# 915)
  > 1dcd961 Update community files (# 914)
bumping google.golang.org/api b52e40b...87aa1d0:
  > 87aa1d0 chore(main): release 0.163.0 (# 2402)
  > 2271ef7 feat(all): auto-regenerate discovery clients (# 2405)
  > 47834b5 feat(all): auto-regenerate discovery clients (# 2403)
  > 62ceaad feat(all): auto-regenerate discovery clients (# 2401)
  > 26a1117 chore(main): release 0.162.0 (# 2389)
  > 6862015 fix(transport): enforce 1s timeout on requests to MDS universe_domain (# 2393)
  > a6d137b chore(all): update all (# 2396)
  > 5437df8 chore(all): update module github.com/google/go-github/v52 to v58 (# 2397)
  > eddfeb9 feat(all): auto-regenerate discovery clients (# 2398)
  > 169ead6 feat(all): auto-regenerate discovery clients (# 2395)
  > 140fb54 feat(all): auto-regenerate discovery clients (# 2394)
  > e5a7a15 feat(all): auto-regenerate discovery clients (# 2392)
  > c8e77f6 feat(all): auto-regenerate discovery clients (# 2391)
  > f2f2d22 feat(all): auto-regenerate discovery clients (# 2388)
  > befbd36 chore(main): release 0.161.0 (# 2387)
  > f29f327 fix(gen): reject repeated object query params (# 2383)
  > 54c764a fix(transport): skip s2a for now if service has direct path enabled (# 2385)
  > 03042ec feat(all): auto-regenerate discovery clients (# 2386)
  > dcb7e96 chore(main): release 0.160.0 (# 2381)
  > fee4c8f feat(all): auto-regenerate discovery clients (# 2384)
  > 5534cf2 feat(all): auto-regenerate discovery clients (# 2382)
  > 8f26f51 feat(all): auto-regenerate discovery clients (# 2380)
  > ee5c9cc chore(main): release 0.159.0 (# 2378)
  > 55b0516 fix(transport): relax universe checks (# 2376)
  > a8d9414 feat(all): auto-regenerate discovery clients (# 2377)
  > 68b1bc1 chore(main): release 0.158.0 (# 2360)
  > df17254 fix(transport/grpc): add universe domain verification (# 2375)
  > b21a1fa fix(internal): support internaloption.WithDefaultUniverseDomain (# 2373)
  > ddb3a12 chore(google-api-go-generator): replace literal with const (# 2363)
  > d266978 feat(all): auto-regenerate discovery clients (# 2374)
  > 73fc7fd fix(transport): not enable s2a when there is endpoint override (# 2368)
  > 2d69d97 feat(all): auto-regenerate discovery clients (# 2372)
  > 45c097f feat(all): auto-regenerate discovery clients (# 2369)
  > addc18c chore(all): update module github.com/google/go-github/v52 to v58 (# 2351)
  > a4e39d8 chore(all): update all to v0.47.0 (# 2367)
  > e0db6a5 feat(all): auto-regenerate discovery clients (# 2366)
  > f40db7f feat(all): auto-regenerate discovery clients (# 2365)
  > 69626e3 feat(transport): add universe domain support (# 2355)
  > 6c3b622 feat(all): auto-regenerate discovery clients (# 2361)
  > 6ef1144 feat(impersonate): add universe domain support (# 2296)
  > 6e77ef2 chore(main): release 0.157.0 (# 2346)
  > 72a8ffd feat(all): auto-regenerate discovery clients (# 2358)
  > 74a1558 docs(option): update WithDefaultEndpointTemplate docs (# 2356)
  > 135da01 chore(all): update all to a9fa171 (# 2350)
  > 0d002f9 feat(all): auto-regenerate discovery clients (# 2354)
  > da330c2 feat(all): auto-regenerate discovery clients (# 2353)
  > 5bf46ee feat(all): auto-regenerate discovery clients (# 2352)
  > 3bf8f4f feat(all): auto-regenerate discovery clients (# 2349)
  > 763c331 feat(all): auto-regenerate discovery clients (# 2348)
  > c3e43a1 feat(all): auto-regenerate discovery clients (# 2345)
  > d016573 chore(main): release 0.156.0 (# 2333)
  > 05de776 feat(all): auto-regenerate discovery clients (# 2344)
  > 811e925 feat(all): auto-regenerate discovery clients (# 2343)
  > 9e45101 feat(google-api-go-generator): add universe domain support (# 2335)
  > 3f90b98 feat(all): auto-regenerate discovery clients (# 2341)
  > 9745014 test: fix overflow (# 2342)
  > 3f8b548 chore(all): update all (# 2338)
  > c8905be chore(deps): bump github.com/cloudflare/circl from 1.3.3 to 1.3.7 in /internal/kokoro/discogen (# 2340)
  > d008b6e feat(all): auto-regenerate discovery clients (# 2339)
  > ceefb9b feat(all): auto-regenerate discovery clients (# 2337)
  > bd4dad1 feat(all): auto-regenerate discovery clients (# 2336)
  > 014a8e0 feat(all): auto-regenerate discovery clients (# 2332)
bumping golang.org/x/net cb5b10f...73d21fd:
  > 73d21fd go.mod: update golang.org/x dependencies
  > 643fd16 html: fix SOLIDUS '/' handling in attribute parsing
  > 73e4b50 dns/dnsmessage: allow name compression for SRV resource parsing
  > b2208d0 internal/quic/qlog: fix typo
  > 0d0b98c http2: avoid goroutine starvation in TestServer_Push_RejectAfterGoAway
  > 07e05fd http2: remove suspicious uint32->v conversion in frame code
  > 26b646e quic: avoid deadlock in Endpoint.Close
bumping knative.dev/client-pkg 67fca0c...2c46c44:
  > 2c46c44 Update community files (# 150)
  > adb84bd Update community files (# 149)
  > a722b38 Update community files (# 148)
  > 788b4b0 Update community files (# 147)
bumping knative.dev/pkg b488e7b...b8f9b22:
  > b8f9b22 Update community files (# 2957)
  > bc60487 Update community files (# 2956)
  > 405f0c4 Update community files (# 2955)
  > 2d2e27d Bump golang.org/x/oauth2 from 0.16.0 to 0.17.0 (# 2952)
  > 3b39834 Bump google.golang.org/api from 0.161.0 to 0.163.0 (# 2953)
  > 085f505 Bump golang.org/x/net from 0.20.0 to 0.21.0 (# 2954)
  > 4104e42 Bump google.golang.org/api from 0.159.0 to 0.161.0 (# 2951)
  > e85c3cf Update community files (# 2950)
  > b6659cc Bump cloud.google.com/go/storage from 1.36.0 to 1.37.0 (# 2948)
  > 9bdb511 Bump google.golang.org/grpc from 1.60.1 to 1.61.0 (# 2947)
  > d34ca03 Update knative.dev/hack to latest main (# 2949)
  > 91c6ec8 Bump google.golang.org/api from 0.157.0 to 0.159.0 (# 2946)
  > 6d59538 Bump github.com/evanphx/json-patch/v5 from 5.8.1 to 5.9.0 (# 2945)
  > 6eb53f5 Bump github.com/google/uuid from 1.5.0 to 1.6.0 (# 2944)
  > 2b8687a upgrade to latest dependencies (# 2943)
  > b42fab9 Update community files (# 2942)
  > 3f7ce63 Bump google.golang.org/api from 0.155.0 to 0.157.0 (# 2941)
  > 2a92e9d Bump github.com/evanphx/json-patch/v5 from 5.8.0 to 5.8.1 (# 2940)
  > 455683d Update community files (# 2939)
bumping knative.dev/serving 1c46c07...096adcc:
  > 096adcc Update net-certmanager nightly (# 14898)
  > fd7a6c8 Update net-contour nightly (# 14895)
  > ec32a4c Update net-kourier nightly (# 14894)
  > 148e001 Update net-certmanager nightly (# 14891)
  > 6283914 Fix: run ./hack/update-deps.sh (# 14888)
  > d8bc015 Update net-istio nightly (# 14893)
  > 45f9eeb Update community files (# 14896)
  > 5efa21e Update net-gateway-api nightly (# 14892)
  > dad0326 Update net-contour nightly (# 14884)
  > 4286c63 Update community files (# 14890)
  > 06eac8e Update net-kourier nightly (# 14885)
  > 3544fa0 Update net-certmanager nightly (# 14883)
  > 7da1f45 Update net-gateway-api nightly (# 14882)
  > 48ca4cd Update net-istio nightly (# 14886)
  > 280c922 Update community files (# 14880)
  > 2125772 Updating DEVELOPMENT.md to remove reference to allowedFields or preserveUnknownFields (# 14865)
  > 7882fa9 upgrade to latest dependencies (# 14878)
  > 774cca0 Update net-istio nightly (# 14873)
  > 87e6199 feat: Add ability to configure multiple wilcard domains (# 14543)
  > 52b4b09 Update net-istio nightly (# 14870)
  > 1760f08 Don't drop traffic when upgrading a deployment fails (# 14795)
  > 05d1927 upgrade to latest dependencies (# 14868)
  > 24bc968 Update net-contour nightly (# 14861)
  > f4bbb19 Update net-kourier nightly (# 14857)
  > 772dd36 Update net-gateway-api nightly (# 14856)
  > b44bea9 Update net-contour nightly (# 14851)
  > 0c2d16b Update net-certmanager nightly (# 14858)
  > 19f9f30 Update net-istio nightly (# 14859)
  > a303128 Update community files (# 14854)
  > 71a54e4 Update net-kourier nightly (# 14850)
  > 391c796 Update net-istio nightly (# 14852)
  > c58131a Update net-kourier nightly (# 14844)
  > 3a39279 upgrade to latest dependencies (# 14849)
  > 48a3a52 Update net-certmanager nightly (# 14845)
  > 61f5cad Update net-istio nightly (# 14843)
  > f4ef3c8 Update net-gateway-api nightly (# 14842)
  > 6d89c70 upgrade to latest dependencies (# 14841)
  > 51b0337 Capitalize the first letter of config() (# 14821)
  > 0bae8a2 implement cluster-local-domain-tls in serving (# 14610)
  > 5712497 Update net-kourier nightly (# 14832)
  > 87cef50 Update net-istio nightly (# 14831)
  > 9130d61 Update net-contour nightly (# 14834)
  > fb69b9a Update net-gateway-api nightly (# 14833)
  > 16c823b Update net-certmanager nightly (# 14830)

Signed-off-by: Knative Automation <automation@knative.team>
This commit is contained in:
Knative Automation 2024-02-15 08:56:24 -05:00 committed by GitHub
parent 16ae868cac
commit dab9d4aa95
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
86 changed files with 1596 additions and 456 deletions

36
go.mod
View File

@ -12,7 +12,7 @@ require (
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.16.0 github.com/spf13/viper v1.16.0
golang.org/x/mod v0.14.0 golang.org/x/mod v0.14.0
golang.org/x/term v0.16.0 golang.org/x/term v0.17.0
gotest.tools/v3 v3.3.0 gotest.tools/v3 v3.3.0
k8s.io/api v0.28.5 k8s.io/api v0.28.5
k8s.io/apiextensions-apiserver v0.28.5 k8s.io/apiextensions-apiserver v0.28.5
@ -20,12 +20,12 @@ require (
k8s.io/cli-runtime v0.28.5 k8s.io/cli-runtime v0.28.5
k8s.io/client-go v0.28.5 k8s.io/client-go v0.28.5
k8s.io/code-generator v0.28.5 k8s.io/code-generator v0.28.5
knative.dev/client-pkg v0.0.0-20240124090003-67fca0ca8681 knative.dev/client-pkg v0.0.0-20240214132329-2c46c4434d4e
knative.dev/eventing v0.40.0 knative.dev/eventing v0.40.1-0.20240214130959-97e91c540b0c
knative.dev/hack v0.0.0-20240123162936-f3f03ac0ab1a knative.dev/hack v0.0.0-20240214131420-999d7e6b8495
knative.dev/networking v0.0.0-20240116081125-ce0738abf051 knative.dev/networking v0.0.0-20240214132427-22eb3d0fda5c
knative.dev/pkg v0.0.0-20240116073220-b488e7be5902 knative.dev/pkg v0.0.0-20240214130941-b8f9b2204947
knative.dev/serving v0.40.0 knative.dev/serving v0.40.1-0.20240215124546-096adcc220b2
sigs.k8s.io/yaml v1.4.0 sigs.k8s.io/yaml v1.4.0
) )
@ -45,12 +45,12 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect github.com/go-errors/errors v1.4.2 // indirect
github.com/go-kit/log v0.2.1 // indirect github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/logr v1.4.1 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/swag v0.22.3 // indirect
@ -62,7 +62,7 @@ require (
github.com/google/go-containerregistry v0.13.0 // indirect github.com/google/go-containerregistry v0.13.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.5.0 // indirect github.com/google/uuid v1.6.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
github.com/imdario/mergo v0.3.13 // indirect github.com/imdario/mergo v0.3.13 // indirect
@ -102,20 +102,20 @@ require (
go.uber.org/atomic v1.10.0 // indirect go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect go.uber.org/zap v1.26.0 // indirect
golang.org/x/net v0.20.0 // indirect golang.org/x/net v0.21.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect
golang.org/x/sync v0.6.0 // indirect golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.16.0 // indirect golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.17.0 // indirect golang.org/x/tools v0.17.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.155.0 // indirect google.golang.org/api v0.163.0 // indirect
google.golang.org/appengine v1.6.8 // indirect google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 // indirect google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect
google.golang.org/grpc v1.60.1 // indirect google.golang.org/grpc v1.61.0 // indirect
google.golang.org/protobuf v1.32.0 // indirect google.golang.org/protobuf v1.32.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect

76
go.sum
View File

@ -93,8 +93,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
@ -120,8 +120,8 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
@ -213,8 +213,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
@ -434,8 +434,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -512,8 +512,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -525,8 +525,8 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -591,13 +591,13 @@ golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -693,8 +693,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA= google.golang.org/api v0.163.0 h1:4BBDpPaSH+H28NhnX+WwjXxbRLQ7TWuEKp4BQyEjxvk=
google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk= google.golang.org/api v0.163.0/go.mod h1:6SulDkfoBIg4NFmCuZ39XeeAgSHCPecfSUuDyYlAHs0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -742,12 +742,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg= google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg=
google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k=
google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3 h1:EWIeHfGuUf00zrVZGEgYFxok7plSAXBGcH7NNdMAWvA= google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 h1:KHBtwE+eQc3+NxpjmRFlQ3pJQ2FNnhhgB9xOV8kyBuU=
google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg= google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe h1:bQnxqljG/wqi4NTXu2+DJ3n7APcEA882QZ1JvhQAq9o=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA= google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -764,8 +764,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0=
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -836,18 +836,18 @@ k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5Ohx
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/client-pkg v0.0.0-20240124090003-67fca0ca8681 h1:vlXHcYG/rayUB1MGUAnpu5eLzMKuqEQS2Q5m7Z9WyKs= knative.dev/client-pkg v0.0.0-20240214132329-2c46c4434d4e h1:kbQ6OAF+MnxJgMqy340BUEab8bjTNuCSp0QVhqfKQjI=
knative.dev/client-pkg v0.0.0-20240124090003-67fca0ca8681/go.mod h1:3+IED1Q8eP1BwUq0cTxyznhTVrROzkGZcJ68X6KS8NA= knative.dev/client-pkg v0.0.0-20240214132329-2c46c4434d4e/go.mod h1:3+IED1Q8eP1BwUq0cTxyznhTVrROzkGZcJ68X6KS8NA=
knative.dev/eventing v0.40.0 h1:zvMeKGBdQ5Us94Hdy7jmxpzyc1fdFnO4SS21+6nDSiU= knative.dev/eventing v0.40.1-0.20240214130959-97e91c540b0c h1:eWYpGLsxhQSHTTn4IuohxA7VKBLG8AgbyAD5CEt4AV0=
knative.dev/eventing v0.40.0/go.mod h1:+yUUIyvX9fn9bCSH3012kc8rG7YBbjvvxwy1Kr53dRc= knative.dev/eventing v0.40.1-0.20240214130959-97e91c540b0c/go.mod h1:P8wzbDpfUlEqpCVI4gBhaZOoDlGJuDPiK0cpMVy3DnY=
knative.dev/hack v0.0.0-20240123162936-f3f03ac0ab1a h1:+4Mdk0Lt3LGAVEI6vYyhfjBlVBx7sqS4wECtTkuXoSY= knative.dev/hack v0.0.0-20240214131420-999d7e6b8495 h1:Eh+3WsTecxutSwtpzU4Py1dNCcToxgqRDLSLjBKfdEE=
knative.dev/hack v0.0.0-20240123162936-f3f03ac0ab1a/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q= knative.dev/hack v0.0.0-20240214131420-999d7e6b8495/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/networking v0.0.0-20240116081125-ce0738abf051 h1:bTRVfwmfu4/7U1YBcgBl1VANAwmal6zkoAI9p7PQwDY= knative.dev/networking v0.0.0-20240214132427-22eb3d0fda5c h1:nfA2dENoBNgJS1y0ftH9tMCVD9ZVhskGiGVuiMnGNdA=
knative.dev/networking v0.0.0-20240116081125-ce0738abf051/go.mod h1:rdzGL1OVP6VItEiJUN/FTCrDnIzkA6ykhSvaK+0Ne6o= knative.dev/networking v0.0.0-20240214132427-22eb3d0fda5c/go.mod h1:2BcnPqes9Ov7NX/QzIJfIILtcIK8XN7ruR7NsrMJa7k=
knative.dev/pkg v0.0.0-20240116073220-b488e7be5902 h1:H6+JJN23fhwYWCHY1339sY6uhIyoUwDy1a8dN233fdk= knative.dev/pkg v0.0.0-20240214130941-b8f9b2204947 h1:ktYSfwHummr0ZNCOk5JfJY0RPl3dSOmJAPwVT+SQfjA=
knative.dev/pkg v0.0.0-20240116073220-b488e7be5902/go.mod h1:NYk8mMYoLkO7CQWnNkti4YGGnvLxN6MIDbUvtgeo0C0= knative.dev/pkg v0.0.0-20240214130941-b8f9b2204947/go.mod h1:jmjP5/Vy9LamN82J3DfoJpVAw5JHpNxjUF0f/QCDX6E=
knative.dev/serving v0.40.0 h1:feTBe+6J/woNbPu8pv5AEfaumOZSrSBuIubkPTjxCzo= knative.dev/serving v0.40.1-0.20240215124546-096adcc220b2 h1:g4TIAY6Jas3eUlR4J20nzBqHcyDFTWz/uwsQZ5gHZ4w=
knative.dev/serving v0.40.0/go.mod h1:Ory3XczDB8b1lH757CSdeDeouY3LHzSamX8IjmStuoU= knative.dev/serving v0.40.1-0.20240215124546-096adcc220b2/go.mod h1:HK9wXTSW1kIGWvxHDMuKawIsBzCDYgZK35DN2N32Wng=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@ -167,6 +167,19 @@ func Marshal(v any) ([]byte, error) {
return buf, nil return buf, nil
} }
func MarshalEscaped(v any, escape bool) ([]byte, error) {
e := newEncodeState()
defer encodeStatePool.Put(e)
err := e.marshal(v, encOpts{escapeHTML: escape})
if err != nil {
return nil, err
}
buf := append([]byte(nil), e.Bytes()...)
return buf, nil
}
// MarshalIndent is like Marshal but applies Indent to format the output. // MarshalIndent is like Marshal but applies Indent to format the output.
// Each JSON element in the output will begin on a new line beginning with prefix // Each JSON element in the output will begin on a new line beginning with prefix
// followed by one or more copies of indent according to the indentation nesting. // followed by one or more copies of indent according to the indentation nesting.

View File

@ -6,7 +6,7 @@ package json
import ( import (
"bytes" "bytes"
"errors" "encoding/json"
"io" "io"
) )
@ -259,27 +259,7 @@ func (enc *Encoder) SetEscapeHTML(on bool) {
// RawMessage is a raw encoded JSON value. // RawMessage is a raw encoded JSON value.
// It implements Marshaler and Unmarshaler and can // It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding. // be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage []byte type RawMessage = json.RawMessage
// MarshalJSON returns m as the JSON encoding of m.
func (m RawMessage) MarshalJSON() ([]byte, error) {
if m == nil {
return []byte("null"), nil
}
return m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}
var _ Marshaler = (*RawMessage)(nil)
var _ Unmarshaler = (*RawMessage)(nil)
// A Token holds a value of one of these types: // A Token holds a value of one of these types:
// //

View File

@ -10,26 +10,26 @@ import (
"github.com/evanphx/json-patch/v5/internal/json" "github.com/evanphx/json-patch/v5/internal/json"
) )
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { func merge(cur, patch *lazyNode, mergeMerge bool, options *ApplyOptions) *lazyNode {
curDoc, err := cur.intoDoc() curDoc, err := cur.intoDoc(options)
if err != nil { if err != nil {
pruneNulls(patch) pruneNulls(patch, options)
return patch return patch
} }
patchDoc, err := patch.intoDoc() patchDoc, err := patch.intoDoc(options)
if err != nil { if err != nil {
return patch return patch
} }
mergeDocs(curDoc, patchDoc, mergeMerge) mergeDocs(curDoc, patchDoc, mergeMerge, options)
return cur return cur
} }
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { func mergeDocs(doc, patch *partialDoc, mergeMerge bool, options *ApplyOptions) {
for k, v := range patch.obj { for k, v := range patch.obj {
if v == nil { if v == nil {
if mergeMerge { if mergeMerge {
@ -45,55 +45,55 @@ func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
} }
doc.obj[k] = nil doc.obj[k] = nil
} else { } else {
_ = doc.remove(k, &ApplyOptions{}) _ = doc.remove(k, options)
} }
} else { } else {
cur, ok := doc.obj[k] cur, ok := doc.obj[k]
if !ok || cur == nil { if !ok || cur == nil {
if !mergeMerge { if !mergeMerge {
pruneNulls(v) pruneNulls(v, options)
} }
_ = doc.set(k, v, &ApplyOptions{}) _ = doc.set(k, v, options)
} else { } else {
_ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{}) _ = doc.set(k, merge(cur, v, mergeMerge, options), options)
} }
} }
} }
} }
func pruneNulls(n *lazyNode) { func pruneNulls(n *lazyNode, options *ApplyOptions) {
sub, err := n.intoDoc() sub, err := n.intoDoc(options)
if err == nil { if err == nil {
pruneDocNulls(sub) pruneDocNulls(sub, options)
} else { } else {
ary, err := n.intoAry() ary, err := n.intoAry()
if err == nil { if err == nil {
pruneAryNulls(ary) pruneAryNulls(ary, options)
} }
} }
} }
func pruneDocNulls(doc *partialDoc) *partialDoc { func pruneDocNulls(doc *partialDoc, options *ApplyOptions) *partialDoc {
for k, v := range doc.obj { for k, v := range doc.obj {
if v == nil { if v == nil {
_ = doc.remove(k, &ApplyOptions{}) _ = doc.remove(k, &ApplyOptions{})
} else { } else {
pruneNulls(v) pruneNulls(v, options)
} }
} }
return doc return doc
} }
func pruneAryNulls(ary *partialArray) *partialArray { func pruneAryNulls(ary *partialArray, options *ApplyOptions) *partialArray {
newAry := []*lazyNode{} newAry := []*lazyNode{}
for _, v := range ary.nodes { for _, v := range ary.nodes {
if v != nil { if v != nil {
pruneNulls(v) pruneNulls(v, options)
} }
newAry = append(newAry, v) newAry = append(newAry, v)
} }
@ -128,11 +128,17 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
return nil, errBadJSONPatch return nil, errBadJSONPatch
} }
doc := &partialDoc{} options := NewApplyOptions()
doc := &partialDoc{
opts: options,
}
docErr := doc.UnmarshalJSON(docData) docErr := doc.UnmarshalJSON(docData)
patch := &partialDoc{} patch := &partialDoc{
opts: options,
}
patchErr := patch.UnmarshalJSON(patchData) patchErr := patch.UnmarshalJSON(patchData)
@ -158,7 +164,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
if mergeMerge { if mergeMerge {
doc = patch doc = patch
} else { } else {
doc = pruneDocNulls(patch) doc = pruneDocNulls(patch, options)
} }
} else { } else {
patchAry := &partialArray{} patchAry := &partialArray{}
@ -172,7 +178,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
return nil, errBadJSONPatch return nil, errBadJSONPatch
} }
pruneAryNulls(patchAry) pruneAryNulls(patchAry, options)
out, patchErr := json.Marshal(patchAry.nodes) out, patchErr := json.Marshal(patchAry.nodes)
@ -183,7 +189,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
return out, nil return out, nil
} }
} else { } else {
mergeDocs(doc, patch, mergeMerge) mergeDocs(doc, patch, mergeMerge, options)
} }
return json.Marshal(doc) return json.Marshal(doc)

View File

@ -38,6 +38,8 @@ var (
ErrInvalid = errors.New("invalid state detected") ErrInvalid = errors.New("invalid state detected")
ErrInvalidIndex = errors.New("invalid index referenced") ErrInvalidIndex = errors.New("invalid index referenced")
ErrExpectedObject = errors.New("invalid value, expected object")
rawJSONArray = []byte("[]") rawJSONArray = []byte("[]")
rawJSONObject = []byte("{}") rawJSONObject = []byte("{}")
rawJSONNull = []byte("null") rawJSONNull = []byte("null")
@ -60,6 +62,8 @@ type partialDoc struct {
self *lazyNode self *lazyNode
keys []string keys []string
obj map[string]*lazyNode obj map[string]*lazyNode
opts *ApplyOptions
} }
type partialArray struct { type partialArray struct {
@ -90,6 +94,8 @@ type ApplyOptions struct {
// EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation. // EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation.
// Default to false. // Default to false.
EnsurePathExistsOnAdd bool EnsurePathExistsOnAdd bool
EscapeHTML bool
} }
// NewApplyOptions creates a default set of options for calls to ApplyWithOptions. // NewApplyOptions creates a default set of options for calls to ApplyWithOptions.
@ -99,6 +105,7 @@ func NewApplyOptions() *ApplyOptions {
AccumulatedCopySizeLimit: AccumulatedCopySizeLimit, AccumulatedCopySizeLimit: AccumulatedCopySizeLimit,
AllowMissingPathOnRemove: false, AllowMissingPathOnRemove: false,
EnsurePathExistsOnAdd: false, EnsurePathExistsOnAdd: false,
EscapeHTML: true,
} }
} }
@ -134,16 +141,28 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
} }
func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error { func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error {
if n.obj == nil {
return ErrExpectedObject
}
if err := buf.WriteByte('{'); err != nil { if err := buf.WriteByte('{'); err != nil {
return err return err
} }
escaped := true
// n.opts should always be set, but in case we missed a case,
// guard.
if n.opts != nil {
escaped = n.opts.EscapeHTML
}
for i, k := range n.keys { for i, k := range n.keys {
if i > 0 { if i > 0 {
if err := buf.WriteByte(','); err != nil { if err := buf.WriteByte(','); err != nil {
return err return err
} }
} }
key, err := json.Marshal(k) key, err := json.MarshalEscaped(k, escaped)
if err != nil { if err != nil {
return err return err
} }
@ -153,7 +172,7 @@ func (n *partialDoc) TrustMarshalJSON(buf *bytes.Buffer) error {
if err := buf.WriteByte(':'); err != nil { if err := buf.WriteByte(':'); err != nil {
return err return err
} }
value, err := json.Marshal(n.obj[k]) value, err := json.MarshalEscaped(n.obj[k], escaped)
if err != nil { if err != nil {
return err return err
} }
@ -194,11 +213,11 @@ func (n *partialArray) RedirectMarshalJSON() (interface{}, error) {
return n.nodes, nil return n.nodes, nil
} }
func deepCopy(src *lazyNode) (*lazyNode, int, error) { func deepCopy(src *lazyNode, options *ApplyOptions) (*lazyNode, int, error) {
if src == nil { if src == nil {
return nil, 0, nil return nil, 0, nil
} }
a, err := json.Marshal(src) a, err := json.MarshalEscaped(src, options.EscapeHTML)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
@ -216,7 +235,7 @@ func (n *lazyNode) nextByte() byte {
return s[0] return s[0]
} }
func (n *lazyNode) intoDoc() (*partialDoc, error) { func (n *lazyNode) intoDoc(options *ApplyOptions) (*partialDoc, error) {
if n.which == eDoc { if n.which == eDoc {
return n.doc, nil return n.doc, nil
} }
@ -235,6 +254,7 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) {
return nil, ErrInvalid return nil, ErrInvalid
} }
n.doc.opts = options
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -545,7 +565,7 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s
return nil, "" return nil, ""
} }
} else { } else {
doc, err = next.intoDoc() doc, err = next.intoDoc(options)
if err != nil { if err != nil {
return nil, "" return nil, ""
@ -557,6 +577,10 @@ func findObject(pd *container, path string, options *ApplyOptions) (container, s
} }
func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error { func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error {
if d.obj == nil {
return ErrExpectedObject
}
found := false found := false
for _, k := range d.keys { for _, k := range d.keys {
if k == key { if k == key {
@ -579,6 +603,11 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
if key == "" { if key == "" {
return d.self, nil return d.self, nil
} }
if d.obj == nil {
return nil, ErrExpectedObject
}
v, ok := d.obj[key] v, ok := d.obj[key]
if !ok { if !ok {
return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key) return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key)
@ -587,6 +616,10 @@ func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
} }
func (d *partialDoc) remove(key string, options *ApplyOptions) error { func (d *partialDoc) remove(key string, options *ApplyOptions) error {
if d.obj == nil {
return ErrExpectedObject
}
_, ok := d.obj[key] _, ok := d.obj[key]
if !ok { if !ok {
if options.AllowMissingPathOnRemove { if options.AllowMissingPathOnRemove {
@ -750,6 +783,7 @@ func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error {
} else { } else {
pd = &partialDoc{ pd = &partialDoc{
self: val, self: val,
opts: options,
} }
} }
@ -855,7 +889,7 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
newNode := newLazyNode(newRawMessage(rawJSONObject)) newNode := newLazyNode(newRawMessage(rawJSONObject))
doc.add(part, newNode, options) doc.add(part, newNode, options)
doc, err = newNode.intoDoc() doc, err = newNode.intoDoc(options)
if err != nil { if err != nil {
return err return err
} }
@ -868,7 +902,7 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
return err return err
} }
} else { } else {
doc, err = target.intoDoc() doc, err = target.intoDoc(options)
if err != nil { if err != nil {
return err return err
@ -954,6 +988,8 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro
if !val.tryAry() { if !val.tryAry() {
return errors.Wrapf(err, "replace operation value must be object or array") return errors.Wrapf(err, "replace operation value must be object or array")
} }
} else {
val.doc.opts = options
} }
} }
@ -1115,7 +1151,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, op
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
} }
valCopy, sz, err := deepCopy(val) valCopy, sz, err := deepCopy(val, options)
if err != nil { if err != nil {
return errors.Wrapf(err, "error while performing deep copy") return errors.Wrapf(err, "error while performing deep copy")
} }
@ -1202,6 +1238,7 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO
} else { } else {
pd = &partialDoc{ pd = &partialDoc{
self: self, self: self,
opts: options,
} }
} }
@ -1238,11 +1275,18 @@ func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyO
} }
} }
if indent != "" { data, err := json.MarshalEscaped(pd, options.EscapeHTML)
return json.MarshalIndent(pd, "", indent) if err != nil {
return nil, err
} }
return json.Marshal(pd) if indent == "" {
return data, nil
}
var buf bytes.Buffer
json.Indent(&buf, data, "", indent)
return buf.Bytes(), nil
} }
// From http://tools.ietf.org/html/rfc6901#section-4 : // From http://tools.ietf.org/html/rfc6901#section-4 :

View File

@ -91,11 +91,12 @@ logr design but also left out some parts and changed others:
| Adding a name to a logger | `WithName` | no API | | Adding a name to a logger | `WithName` | no API |
| Modify verbosity of log entries in a call chain | `V` | no API | | Modify verbosity of log entries in a call chain | `V` | no API |
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | | Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
The high-level slog API is explicitly meant to be one of many different APIs The high-level slog API is explicitly meant to be one of many different APIs
that can be layered on top of a shared `slog.Handler`. logr is one such that can be layered on top of a shared `slog.Handler`. logr is one such
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) alternative API, with [interoperability](#slog-interoperability) provided by
package. some conversion functions.
### Inspiration ### Inspiration
@ -145,24 +146,24 @@ There are implementations for the following logging libraries:
## slog interoperability ## slog interoperability
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. `ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
slog API. `slogr` itself leaves that to the caller. slog API.
## Using a `logr.Sink` as backend for slog ### Using a `logr.LogSink` as backend for slog
Ideally, a logr sink implementation should support both logr and slog by Ideally, a logr sink implementation should support both logr and slog by
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because implementing both the normal logr interface(s) and `SlogSink`. Because
of a conflict in the parameters of the common `Enabled` method, it is [not of a conflict in the parameters of the common `Enabled` method, it is [not
possible to implement both slog.Handler and logr.Sink in the same possible to implement both slog.Handler and logr.Sink in the same
type](https://github.com/golang/go/issues/59110). type](https://github.com/golang/go/issues/59110).
If both are supported, log calls can go from the high-level APIs to the backend If both are supported, log calls can go from the high-level APIs to the backend
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
convert back and forth without adding additional wrappers, with one exception: convert back and forth without adding additional wrappers, with one exception:
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future `ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
log calls. log calls.
Such an implementation should also support values that implement specific Such an implementation should also support values that implement specific
@ -187,13 +188,13 @@ Not supporting slog has several drawbacks:
These drawbacks are severe enough that applications using a mixture of slog and These drawbacks are severe enough that applications using a mixture of slog and
logr should switch to a different backend. logr should switch to a different backend.
## Using a `slog.Handler` as backend for logr ### Using a `slog.Handler` as backend for logr
Using a plain `slog.Handler` without support for logr works better than the Using a plain `slog.Handler` without support for logr works better than the
other direction: other direction:
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level - All logr verbosity levels can be mapped 1:1 to their corresponding slog level
by negating them. by negating them.
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program - Stack unwinding is done by the `SlogSink` and the resulting program
counter is passed to the `slog.Handler`. counter is passed to the `slog.Handler`.
- Names added via `Logger.WithName` are gathered and recorded in an additional - Names added via `Logger.WithName` are gathered and recorded in an additional
attribute with `logger` as key and the names separated by slash as value. attribute with `logger` as key and the names separated by slash as value.
@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
with logr implementations without slog support is not important, then with logr implementations without slog support is not important, then
`slog.Valuer` is sufficient. `slog.Valuer` is sufficient.
## Context support for slog ### Context support for slog
Storing a logger in a `context.Context` is not supported by Storing a logger in a `context.Context` is not supported by
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
to fill this gap: used to fill this gap. They store and retrieve a `slog.Logger` pointer
under the same context key that is also used by `NewContext` and
`FromContext` for `logr.Logger` value.
func HandlerFromContext(ctx context.Context) slog.Handler { When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
logger, err := logr.FromContext(ctx) automatically convert the `slog.Logger` to a
if err == nil { `logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
return slogr.NewSlogHandler(logger)
}
return slog.Default().Handler()
}
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { With this approach, binaries which use either slog or logr are as efficient as
return logr.NewContext(ctx, slogr.NewLogr(handler)) possible with no unnecessary allocations. This is also why the API stores a
} `slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
on retrieval would need to allocate one.
The downside is that storing and retrieving a `slog.Handler` needs more The downside is that switching back and forth needs more allocations. Because
allocations compared to using a `logr.Logger`. Therefore the recommendation is logr is the API that is already in use by different packages, in particular
to use the `logr.Logger` API in code which uses contextual logging. Kubernetes, the recommendation is to use the `logr.Logger` API in code which
uses contextual logging.
An alternative to adding values to a logger and storing that logger in the
context is to store the values in the context and to configure a logging
backend to extract those values when emitting log entries. This only works when
log calls are passed the context, which is not supported by the logr API.
With the slog API, it is possible, but not
required. https://github.com/veqryn/slog-context is a package for slog which
provides additional support code for this approach. It also contains wrappers
for the context functions in logr, so developers who prefer to not use the logr
APIs directly can use those instead and the resulting code will still be
interoperable with logr.
## FAQ ## FAQ

33
vendor/github.com/go-logr/logr/context.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
// the value is always a Logger value. With Go >= 1.21, the value can be a
// Logger value or a slog.Logger pointer.
type contextKey struct{}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}

49
vendor/github.com/go-logr/logr/context_noslog.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
//go:build !go1.21
// +build !go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

83
vendor/github.com/go-logr/logr/context_slog.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"fmt"
"log/slog"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
v := ctx.Value(contextKey{})
if v == nil {
return Logger{}, notFoundError{}
}
switch v := v.(type) {
case Logger:
return v, nil
case *slog.Logger:
return FromSlogHandler(v.Handler()), nil
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
v := ctx.Value(contextKey{})
if v == nil {
return nil
}
switch v := v.(type) {
case Logger:
return slog.New(ToSlogHandler(v))
case *slog.Logger:
return v
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if logger, err := FromContext(ctx); err == nil {
return logger
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
// provided slog.Logger.
func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

View File

@ -207,10 +207,6 @@ limitations under the License.
// those. // those.
package logr package logr
import (
"context"
)
// New returns a new Logger instance. This is primarily used by libraries // New returns a new Logger instance. This is primarily used by libraries
// implementing LogSink, rather than end users. Passing a nil sink will create // implementing LogSink, rather than end users. Passing a nil sink will create
// a Logger which discards all log lines. // a Logger which discards all log lines.
@ -410,45 +406,6 @@ func (l Logger) IsZero() bool {
return l.sink == nil return l.sink == nil
} }
// contextKey is how we find Loggers in a context.Context.
type contextKey struct{}
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// RuntimeInfo holds information that the logr "core" library knows which // RuntimeInfo holds information that the logr "core" library knows which
// LogSinks might want to know. // LogSinks might want to know.
type RuntimeInfo struct { type RuntimeInfo struct {

192
vendor/github.com/go-logr/logr/sloghandler.go generated vendored Normal file
View File

@ -0,0 +1,192 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
type slogHandler struct {
// May be nil, in which case all logs get discarded.
sink LogSink
// Non-nil if sink is non-nil and implements SlogSink.
slogSink SlogSink
// groupPrefix collects values from WithGroup calls. It gets added as
// prefix to value keys when handling a log record.
groupPrefix string
// levelBias can be set when constructing the handler to influence the
// slog.Level of log records. A positive levelBias reduces the
// slog.Level value. slog has no API to influence this value after the
// handler got created, so it can only be set indirectly through
// Logger.V.
levelBias slog.Level
}
var _ slog.Handler = &slogHandler{}
// groupSeparator is used to concatenate WithGroup names and attribute keys.
const groupSeparator = "."
// GetLevel is used for black box unit testing.
func (l *slogHandler) GetLevel() slog.Level {
return l.levelBias
}
func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
}
func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
if l.slogSink != nil {
// Only adjust verbosity level of log entries < slog.LevelError.
if record.Level < slog.LevelError {
record.Level -= l.levelBias
}
return l.slogSink.Handle(ctx, record)
}
// No need to check for nil sink here because Handle will only be called
// when Enabled returned true.
kvList := make([]any, 0, 2*record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
return true
})
if record.Level >= slog.LevelError {
l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
} else {
level := l.levelFromSlog(record.Level)
l.sinkWithCallDepth().Info(level, record.Message, kvList...)
}
return nil
}
// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
// are called by Handle, code in slog gets skipped.
//
// This offset currently (Go 1.21.0) works for calls through
// slog.New(ToSlogHandler(...)). There's no guarantee that the call
// chain won't change. Wrapping the handler will also break unwinding. It's
// still better than not adjusting at all....
//
// This cannot be done when constructing the handler because FromSlogHandler needs
// access to the original sink without this adjustment. A second copy would
// work, but then WithAttrs would have to be called for both of them.
func (l *slogHandler) sinkWithCallDepth() LogSink {
if sink, ok := l.sink.(CallDepthLogSink); ok {
return sink.WithCallDepth(2)
}
return l.sink
}
func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
if l.sink == nil || len(attrs) == 0 {
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithAttrs(attrs)
clone.sink = clone.slogSink
} else {
kvList := make([]any, 0, 2*len(attrs))
for _, attr := range attrs {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
}
clone.sink = l.sink.WithValues(kvList...)
}
return &clone
}
func (l *slogHandler) WithGroup(name string) slog.Handler {
if l.sink == nil {
return l
}
if name == "" {
// slog says to inline empty groups
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithGroup(name)
clone.sink = clone.slogSink
} else {
clone.groupPrefix = addPrefix(clone.groupPrefix, name)
}
return &clone
}
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
// and other details of slog.
func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
attrVal := attr.Value.Resolve()
if attrVal.Kind() == slog.KindGroup {
groupVal := attrVal.Group()
grpKVs := make([]any, 0, 2*len(groupVal))
prefix := groupPrefix
if attr.Key != "" {
prefix = addPrefix(groupPrefix, attr.Key)
}
for _, attr := range groupVal {
grpKVs = attrToKVs(attr, prefix, grpKVs)
}
kvList = append(kvList, grpKVs...)
} else if attr.Key != "" {
kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
}
return kvList
}
func addPrefix(prefix, name string) string {
if prefix == "" {
return name
}
if name == "" {
return prefix
}
return prefix + groupSeparator + name
}
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
// It ensures that the result is >= 0. This is necessary because the result is
// passed to a LogSink and that API did not historically document whether
// levels could be negative or what that meant.
//
// Some example usage:
//
// logrV0 := getMyLogger()
// logrV2 := logrV0.V(2)
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
func (l *slogHandler) levelFromSlog(level slog.Level) int {
result := -level
result += l.levelBias // in case the original Logger had a V level
if result < 0 {
result = 0 // because LogSink doesn't expect negative V levels
}
return int(result)
}

100
vendor/github.com/go-logr/logr/slogr.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
// FromSlogHandler returns a Logger which writes to the slog.Handler.
//
// The logr verbosity level is mapped to slog levels such that V(0) becomes
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
func FromSlogHandler(handler slog.Handler) Logger {
if handler, ok := handler.(*slogHandler); ok {
if handler.sink == nil {
return Discard()
}
return New(handler.sink).V(int(handler.levelBias))
}
return New(&slogSink{handler: handler})
}
// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
//
// The returned logger writes all records with level >= slog.LevelError as
// error log entries with LogSink.Error, regardless of the verbosity level of
// the Logger:
//
// logger := <some Logger with 0 as verbosity level>
// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
//
// The level of all other records gets reduced by the verbosity
// level of the Logger and the result is negated. If it happens
// to be negative, then it gets replaced by zero because a LogSink
// is not expected to handled negative levels:
//
// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
func ToSlogHandler(logger Logger) slog.Handler {
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
return sink.handler
}
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
if slogSink, ok := handler.sink.(SlogSink); ok {
handler.slogSink = slogSink
}
return handler
}
// SlogSink is an optional interface that a LogSink can implement to support
// logging through the slog.Logger or slog.Handler APIs better. It then should
// also support special slog values like slog.Group. When used as a
// slog.Handler, the advantages are:
//
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
// as intended by slog
// - proper grouping of key/value pairs via WithGroup
// - verbosity levels > slog.LevelInfo can be recorded
// - less overhead
//
// Both APIs (Logger and slog.Logger/Handler) then are supported equally
// well. Developers can pick whatever API suits them better and/or mix
// packages which use either API in the same binary with a common logging
// implementation.
//
// This interface is necessary because the type implementing the LogSink
// interface cannot also implement the slog.Handler interface due to the
// different prototype of the common Enabled method.
//
// An implementation could support both interfaces in two different types, but then
// additional interfaces would be needed to convert between those types in FromSlogHandler
// and ToSlogHandler.
type SlogSink interface {
LogSink
Handle(ctx context.Context, record slog.Record) error
WithAttrs(attrs []slog.Attr) SlogSink
WithGroup(name string) SlogSink
}

120
vendor/github.com/go-logr/logr/slogsink.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
"runtime"
"time"
)
var (
_ LogSink = &slogSink{}
_ CallDepthLogSink = &slogSink{}
_ Underlier = &slogSink{}
)
// Underlier is implemented by the LogSink returned by NewFromLogHandler.
type Underlier interface {
// GetUnderlying returns the Handler used by the LogSink.
GetUnderlying() slog.Handler
}
const (
// nameKey is used to log the `WithName` values as an additional attribute.
nameKey = "logger"
// errKey is used to log the error parameter of Error as an additional attribute.
errKey = "err"
)
type slogSink struct {
callDepth int
name string
handler slog.Handler
}
func (l *slogSink) Init(info RuntimeInfo) {
l.callDepth = info.CallDepth
}
func (l *slogSink) GetUnderlying() slog.Handler {
return l.handler
}
func (l *slogSink) WithCallDepth(depth int) LogSink {
newLogger := *l
newLogger.callDepth += depth
return &newLogger
}
func (l *slogSink) Enabled(level int) bool {
return l.handler.Enabled(context.Background(), slog.Level(-level))
}
func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
l.log(nil, msg, slog.Level(-level), kvList...)
}
func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
l.log(err, msg, slog.LevelError, kvList...)
}
func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
var pcs [1]uintptr
// skip runtime.Callers, this function, Info/Error, and all helper functions above that.
runtime.Callers(3+l.callDepth, pcs[:])
record := slog.NewRecord(time.Now(), level, msg, pcs[0])
if l.name != "" {
record.AddAttrs(slog.String(nameKey, l.name))
}
if err != nil {
record.AddAttrs(slog.Any(errKey, err))
}
record.Add(kvList...)
_ = l.handler.Handle(context.Background(), record)
}
func (l slogSink) WithName(name string) LogSink {
if l.name != "" {
l.name += "/"
}
l.name += name
return &l
}
func (l slogSink) WithValues(kvList ...interface{}) LogSink {
l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
return &l
}
func kvListToAttrs(kvList ...interface{}) []slog.Attr {
// We don't need the record itself, only its Add method.
record := slog.NewRecord(time.Time{}, 0, "", 0)
record.Add(kvList...)
attrs := make([]slog.Attr, 0, record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
attrs = append(attrs, attr)
return true
})
return attrs
}

View File

@ -1,5 +1,18 @@
# Changelog # Changelog
## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
### Features
* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
### Bug Fixes
* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) ## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)

View File

@ -17,6 +17,12 @@ var (
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros Nil UUID // empty UUID, all zeros
// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
Max = UUID{
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
}
) )
// NewHash returns a new UUID derived from the hash of space concatenated with // NewHash returns a new UUID derived from the hash of space concatenated with

View File

@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) {
// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) // makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
// uuid[8] already has the right version number (Variant is 10) // uuid[8] already has the right version number (Variant is 10)
// see function NewV7 and NewV7FromReader // see function NewV7 and NewV7FromReader
func makeV7(uuid []byte) { func makeV7(uuid []byte) {
/* /*
0 1 2 3 0 1 2 3
@ -52,7 +52,7 @@ func makeV7(uuid []byte) {
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms | | unix_ts_ms |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms | ver | rand_a | | unix_ts_ms | ver | rand_a (12 bit seq) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|var| rand_b | |var| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@ -61,7 +61,7 @@ func makeV7(uuid []byte) {
*/ */
_ = uuid[15] // bounds check _ = uuid[15] // bounds check
t := timeNow().UnixMilli() t, s := getV7Time()
uuid[0] = byte(t >> 40) uuid[0] = byte(t >> 40)
uuid[1] = byte(t >> 32) uuid[1] = byte(t >> 32)
@ -70,6 +70,35 @@ func makeV7(uuid []byte) {
uuid[4] = byte(t >> 8) uuid[4] = byte(t >> 8)
uuid[5] = byte(t) uuid[5] = byte(t)
uuid[6] = 0x70 | (uuid[6] & 0x0F) uuid[6] = 0x70 | (0x0F & byte(s>>8))
// uuid[8] has already has right version uuid[7] = byte(s)
}
// lastV7time is the last time we returned stored as:
//
// 52 bits of time in milliseconds since epoch
// 12 bits of (fractional nanoseconds) >> 8
var lastV7time int64
const nanoPerMilli = 1000000
// getV7Time returns the time in milliseconds and nanoseconds / 256.
// The returned (milli << 12 + seq) is guarenteed to be greater than
// (milli << 12 + seq) returned by any previous call to getV7Time.
func getV7Time() (milli, seq int64) {
timeMu.Lock()
defer timeMu.Unlock()
nano := timeNow().UnixNano()
milli = nano / nanoPerMilli
// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
seq = (nano - milli*nanoPerMilli) >> 8
now := milli<<12 + seq
if now <= lastV7time {
now = lastV7time + 1
milli = now >> 12
seq = now & 0xfff
}
lastV7time = now
return milli, seq
} }

View File

@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
} }
func (fr *Framer) maxHeaderStringLen() int { func (fr *Framer) maxHeaderStringLen() int {
v := fr.maxHeaderListSize() v := int(fr.maxHeaderListSize())
if uint32(int(v)) == v { if v < 0 {
return int(v) // If maxHeaderListSize overflows an int, use no limit (0).
return 0
} }
// They had a crazy big number for MaxHeaderBytes anyway, return v
// so give them unlimited header lengths:
return 0
} }
// readMetaFrame returns 0 or more CONTINUATION frames from fr and // readMetaFrame returns 0 or more CONTINUATION frames from fr and

View File

@ -584,7 +584,7 @@ ccflags="$@"
$2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ ||
$2 ~ /^KEYCTL_/ || $2 ~ /^KEYCTL_/ ||
$2 ~ /^PERF_/ || $2 ~ /^PERF_/ ||
$2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SECCOMP_/ ||
$2 ~ /^SEEK_/ || $2 ~ /^SEEK_/ ||
$2 ~ /^SCHED_/ || $2 ~ /^SCHED_/ ||
$2 ~ /^SPLICE_/ || $2 ~ /^SPLICE_/ ||

View File

@ -1785,6 +1785,8 @@ const (
LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20
LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000
LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2
LANDLOCK_ACCESS_NET_BIND_TCP = 0x1
LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2
LANDLOCK_CREATE_RULESET_VERSION = 0x1 LANDLOCK_CREATE_RULESET_VERSION = 0x1
LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_OFF = 0x0
LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef
@ -2465,6 +2467,7 @@ const (
PR_MCE_KILL_GET = 0x22 PR_MCE_KILL_GET = 0x22
PR_MCE_KILL_LATE = 0x0 PR_MCE_KILL_LATE = 0x0
PR_MCE_KILL_SET = 0x1 PR_MCE_KILL_SET = 0x1
PR_MDWE_NO_INHERIT = 0x2
PR_MDWE_REFUSE_EXEC_GAIN = 0x1 PR_MDWE_REFUSE_EXEC_GAIN = 0x1
PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_DISABLE_MANAGEMENT = 0x2c
PR_MPX_ENABLE_MANAGEMENT = 0x2b PR_MPX_ENABLE_MANAGEMENT = 0x2b
@ -2669,8 +2672,9 @@ const (
RTAX_FEATURES = 0xc RTAX_FEATURES = 0xc
RTAX_FEATURE_ALLFRAG = 0x8 RTAX_FEATURE_ALLFRAG = 0x8
RTAX_FEATURE_ECN = 0x1 RTAX_FEATURE_ECN = 0x1
RTAX_FEATURE_MASK = 0xf RTAX_FEATURE_MASK = 0x1f
RTAX_FEATURE_SACK = 0x2 RTAX_FEATURE_SACK = 0x2
RTAX_FEATURE_TCP_USEC_TS = 0x10
RTAX_FEATURE_TIMESTAMP = 0x4 RTAX_FEATURE_TIMESTAMP = 0x4
RTAX_HOPLIMIT = 0xa RTAX_HOPLIMIT = 0xa
RTAX_INITCWND = 0xb RTAX_INITCWND = 0xb
@ -2913,9 +2917,38 @@ const (
SCM_RIGHTS = 0x1 SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x1d SCM_TIMESTAMP = 0x1d
SC_LOG_FLUSH = 0x100000 SC_LOG_FLUSH = 0x100000
SECCOMP_ADDFD_FLAG_SEND = 0x2
SECCOMP_ADDFD_FLAG_SETFD = 0x1
SECCOMP_FILTER_FLAG_LOG = 0x2
SECCOMP_FILTER_FLAG_NEW_LISTENER = 0x8
SECCOMP_FILTER_FLAG_SPEC_ALLOW = 0x4
SECCOMP_FILTER_FLAG_TSYNC = 0x1
SECCOMP_FILTER_FLAG_TSYNC_ESRCH = 0x10
SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV = 0x20
SECCOMP_GET_ACTION_AVAIL = 0x2
SECCOMP_GET_NOTIF_SIZES = 0x3
SECCOMP_IOCTL_NOTIF_RECV = 0xc0502100
SECCOMP_IOCTL_NOTIF_SEND = 0xc0182101
SECCOMP_IOC_MAGIC = '!'
SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_DISABLED = 0x0
SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_FILTER = 0x2
SECCOMP_MODE_STRICT = 0x1 SECCOMP_MODE_STRICT = 0x1
SECCOMP_RET_ACTION = 0x7fff0000
SECCOMP_RET_ACTION_FULL = 0xffff0000
SECCOMP_RET_ALLOW = 0x7fff0000
SECCOMP_RET_DATA = 0xffff
SECCOMP_RET_ERRNO = 0x50000
SECCOMP_RET_KILL = 0x0
SECCOMP_RET_KILL_PROCESS = 0x80000000
SECCOMP_RET_KILL_THREAD = 0x0
SECCOMP_RET_LOG = 0x7ffc0000
SECCOMP_RET_TRACE = 0x7ff00000
SECCOMP_RET_TRAP = 0x30000
SECCOMP_RET_USER_NOTIF = 0x7fc00000
SECCOMP_SET_MODE_FILTER = 0x1
SECCOMP_SET_MODE_STRICT = 0x0
SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP = 0x1
SECCOMP_USER_NOTIF_FLAG_CONTINUE = 0x1
SECRETMEM_MAGIC = 0x5345434d SECRETMEM_MAGIC = 0x5345434d
SECURITYFS_MAGIC = 0x73636673 SECURITYFS_MAGIC = 0x73636673
SEEK_CUR = 0x1 SEEK_CUR = 0x1
@ -3075,6 +3108,7 @@ const (
SOL_TIPC = 0x10f SOL_TIPC = 0x10f
SOL_TLS = 0x11a SOL_TLS = 0x11a
SOL_UDP = 0x11 SOL_UDP = 0x11
SOL_VSOCK = 0x11f
SOL_X25 = 0x106 SOL_X25 = 0x106
SOL_XDP = 0x11b SOL_XDP = 0x11b
SOMAXCONN = 0x1000 SOMAXCONN = 0x1000

View File

@ -281,6 +281,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800 SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905 SIOCATMARK = 0x8905

View File

@ -282,6 +282,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800 SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905 SIOCATMARK = 0x8905

View File

@ -288,6 +288,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800 SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905 SIOCATMARK = 0x8905

View File

@ -278,6 +278,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800 SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905 SIOCATMARK = 0x8905

View File

@ -275,6 +275,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800 SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905 SIOCATMARK = 0x8905

View File

@ -281,6 +281,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x80 SFD_NONBLOCK = 0x80
SIOCATMARK = 0x40047307 SIOCATMARK = 0x40047307

View File

@ -281,6 +281,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x80 SFD_NONBLOCK = 0x80
SIOCATMARK = 0x40047307 SIOCATMARK = 0x40047307

View File

@ -281,6 +281,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x80 SFD_NONBLOCK = 0x80
SIOCATMARK = 0x40047307 SIOCATMARK = 0x40047307

View File

@ -281,6 +281,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x80 SFD_NONBLOCK = 0x80
SIOCATMARK = 0x40047307 SIOCATMARK = 0x40047307

View File

@ -336,6 +336,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800 SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905 SIOCATMARK = 0x8905

View File

@ -340,6 +340,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800 SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905 SIOCATMARK = 0x8905

View File

@ -340,6 +340,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800 SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905 SIOCATMARK = 0x8905

View File

@ -272,6 +272,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800 SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905 SIOCATMARK = 0x8905

View File

@ -344,6 +344,9 @@ const (
SCM_TIMESTAMPNS = 0x23 SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29 SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x40082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x40082104
SFD_CLOEXEC = 0x80000 SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800 SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905 SIOCATMARK = 0x8905

View File

@ -335,6 +335,9 @@ const (
SCM_TIMESTAMPNS = 0x21 SCM_TIMESTAMPNS = 0x21
SCM_TXTIME = 0x3f SCM_TXTIME = 0x3f
SCM_WIFI_STATUS = 0x25 SCM_WIFI_STATUS = 0x25
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
SECCOMP_IOCTL_NOTIF_ID_VALID = 0x80082102
SECCOMP_IOCTL_NOTIF_SET_FLAGS = 0x80082104
SFD_CLOEXEC = 0x400000 SFD_CLOEXEC = 0x400000
SFD_NONBLOCK = 0x4000 SFD_NONBLOCK = 0x4000
SF_FP = 0x38 SF_FP = 0x38

View File

@ -448,4 +448,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451 SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452 SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453
SYS_FUTEX_WAKE = 454
SYS_FUTEX_WAIT = 455
SYS_FUTEX_REQUEUE = 456
) )

View File

@ -371,4 +371,7 @@ const (
SYS_CACHESTAT = 451 SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452 SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453 SYS_MAP_SHADOW_STACK = 453
SYS_FUTEX_WAKE = 454
SYS_FUTEX_WAIT = 455
SYS_FUTEX_REQUEUE = 456
) )

View File

@ -412,4 +412,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451 SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452 SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453
SYS_FUTEX_WAKE = 454
SYS_FUTEX_WAIT = 455
SYS_FUTEX_REQUEUE = 456
) )

View File

@ -315,4 +315,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451 SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452 SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453
SYS_FUTEX_WAKE = 454
SYS_FUTEX_WAIT = 455
SYS_FUTEX_REQUEUE = 456
) )

View File

@ -309,4 +309,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451 SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452 SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453
SYS_FUTEX_WAKE = 454
SYS_FUTEX_WAIT = 455
SYS_FUTEX_REQUEUE = 456
) )

View File

@ -432,4 +432,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_SET_MEMPOLICY_HOME_NODE = 4450
SYS_CACHESTAT = 4451 SYS_CACHESTAT = 4451
SYS_FCHMODAT2 = 4452 SYS_FCHMODAT2 = 4452
SYS_MAP_SHADOW_STACK = 4453
SYS_FUTEX_WAKE = 4454
SYS_FUTEX_WAIT = 4455
SYS_FUTEX_REQUEUE = 4456
) )

View File

@ -362,4 +362,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_SET_MEMPOLICY_HOME_NODE = 5450
SYS_CACHESTAT = 5451 SYS_CACHESTAT = 5451
SYS_FCHMODAT2 = 5452 SYS_FCHMODAT2 = 5452
SYS_MAP_SHADOW_STACK = 5453
SYS_FUTEX_WAKE = 5454
SYS_FUTEX_WAIT = 5455
SYS_FUTEX_REQUEUE = 5456
) )

View File

@ -362,4 +362,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_SET_MEMPOLICY_HOME_NODE = 5450
SYS_CACHESTAT = 5451 SYS_CACHESTAT = 5451
SYS_FCHMODAT2 = 5452 SYS_FCHMODAT2 = 5452
SYS_MAP_SHADOW_STACK = 5453
SYS_FUTEX_WAKE = 5454
SYS_FUTEX_WAIT = 5455
SYS_FUTEX_REQUEUE = 5456
) )

View File

@ -432,4 +432,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_SET_MEMPOLICY_HOME_NODE = 4450
SYS_CACHESTAT = 4451 SYS_CACHESTAT = 4451
SYS_FCHMODAT2 = 4452 SYS_FCHMODAT2 = 4452
SYS_MAP_SHADOW_STACK = 4453
SYS_FUTEX_WAKE = 4454
SYS_FUTEX_WAIT = 4455
SYS_FUTEX_REQUEUE = 4456
) )

View File

@ -439,4 +439,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451 SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452 SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453
SYS_FUTEX_WAKE = 454
SYS_FUTEX_WAIT = 455
SYS_FUTEX_REQUEUE = 456
) )

View File

@ -411,4 +411,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451 SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452 SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453
SYS_FUTEX_WAKE = 454
SYS_FUTEX_WAIT = 455
SYS_FUTEX_REQUEUE = 456
) )

View File

@ -411,4 +411,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451 SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452 SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453
SYS_FUTEX_WAKE = 454
SYS_FUTEX_WAIT = 455
SYS_FUTEX_REQUEUE = 456
) )

View File

@ -316,4 +316,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451 SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452 SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453
SYS_FUTEX_WAKE = 454
SYS_FUTEX_WAIT = 455
SYS_FUTEX_REQUEUE = 456
) )

View File

@ -377,4 +377,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451 SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452 SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453
SYS_FUTEX_WAKE = 454
SYS_FUTEX_WAIT = 455
SYS_FUTEX_REQUEUE = 456
) )

View File

@ -390,4 +390,8 @@ const (
SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_SET_MEMPOLICY_HOME_NODE = 450
SYS_CACHESTAT = 451 SYS_CACHESTAT = 451
SYS_FCHMODAT2 = 452 SYS_FCHMODAT2 = 452
SYS_MAP_SHADOW_STACK = 453
SYS_FUTEX_WAKE = 454
SYS_FUTEX_WAIT = 455
SYS_FUTEX_REQUEUE = 456
) )

View File

@ -174,7 +174,8 @@ type FscryptPolicyV2 struct {
Contents_encryption_mode uint8 Contents_encryption_mode uint8
Filenames_encryption_mode uint8 Filenames_encryption_mode uint8
Flags uint8 Flags uint8
_ [4]uint8 Log2_data_unit_size uint8
_ [3]uint8
Master_key_identifier [16]uint8 Master_key_identifier [16]uint8
} }
@ -455,60 +456,63 @@ type Ucred struct {
} }
type TCPInfo struct { type TCPInfo struct {
State uint8 State uint8
Ca_state uint8 Ca_state uint8
Retransmits uint8 Retransmits uint8
Probes uint8 Probes uint8
Backoff uint8 Backoff uint8
Options uint8 Options uint8
Rto uint32 Rto uint32
Ato uint32 Ato uint32
Snd_mss uint32 Snd_mss uint32
Rcv_mss uint32 Rcv_mss uint32
Unacked uint32 Unacked uint32
Sacked uint32 Sacked uint32
Lost uint32 Lost uint32
Retrans uint32 Retrans uint32
Fackets uint32 Fackets uint32
Last_data_sent uint32 Last_data_sent uint32
Last_ack_sent uint32 Last_ack_sent uint32
Last_data_recv uint32 Last_data_recv uint32
Last_ack_recv uint32 Last_ack_recv uint32
Pmtu uint32 Pmtu uint32
Rcv_ssthresh uint32 Rcv_ssthresh uint32
Rtt uint32 Rtt uint32
Rttvar uint32 Rttvar uint32
Snd_ssthresh uint32 Snd_ssthresh uint32
Snd_cwnd uint32 Snd_cwnd uint32
Advmss uint32 Advmss uint32
Reordering uint32 Reordering uint32
Rcv_rtt uint32 Rcv_rtt uint32
Rcv_space uint32 Rcv_space uint32
Total_retrans uint32 Total_retrans uint32
Pacing_rate uint64 Pacing_rate uint64
Max_pacing_rate uint64 Max_pacing_rate uint64
Bytes_acked uint64 Bytes_acked uint64
Bytes_received uint64 Bytes_received uint64
Segs_out uint32 Segs_out uint32
Segs_in uint32 Segs_in uint32
Notsent_bytes uint32 Notsent_bytes uint32
Min_rtt uint32 Min_rtt uint32
Data_segs_in uint32 Data_segs_in uint32
Data_segs_out uint32 Data_segs_out uint32
Delivery_rate uint64 Delivery_rate uint64
Busy_time uint64 Busy_time uint64
Rwnd_limited uint64 Rwnd_limited uint64
Sndbuf_limited uint64 Sndbuf_limited uint64
Delivered uint32 Delivered uint32
Delivered_ce uint32 Delivered_ce uint32
Bytes_sent uint64 Bytes_sent uint64
Bytes_retrans uint64 Bytes_retrans uint64
Dsack_dups uint32 Dsack_dups uint32
Reord_seen uint32 Reord_seen uint32
Rcv_ooopack uint32 Rcv_ooopack uint32
Snd_wnd uint32 Snd_wnd uint32
Rcv_wnd uint32 Rcv_wnd uint32
Rehash uint32 Rehash uint32
Total_rto uint16
Total_rto_recoveries uint16
Total_rto_time uint32
} }
type CanFilter struct { type CanFilter struct {
@ -551,7 +555,7 @@ const (
SizeofIPv6MTUInfo = 0x20 SizeofIPv6MTUInfo = 0x20
SizeofICMPv6Filter = 0x20 SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc SizeofUcred = 0xc
SizeofTCPInfo = 0xf0 SizeofTCPInfo = 0xf8
SizeofCanFilter = 0x8 SizeofCanFilter = 0x8
SizeofTCPRepairOpt = 0x8 SizeofTCPRepairOpt = 0x8
) )
@ -3399,7 +3403,7 @@ const (
DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_STATE = 0x2
DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3
DEVLINK_PORT_FN_ATTR_CAPS = 0x4 DEVLINK_PORT_FN_ATTR_CAPS = 0x4
DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5
) )
type FsverityDigest struct { type FsverityDigest struct {
@ -4183,7 +4187,8 @@ const (
) )
type LandlockRulesetAttr struct { type LandlockRulesetAttr struct {
Access_fs uint64 Access_fs uint64
Access_net uint64
} }
type LandlockPathBeneathAttr struct { type LandlockPathBeneathAttr struct {
@ -5134,7 +5139,7 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_MAX = 0x1b NL80211_FREQUENCY_ATTR_MAX = 0x1c
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
@ -5547,7 +5552,7 @@ const (
NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2 NL80211_REGDOM_TYPE_CUSTOM_WORLD = 0x2
NL80211_REGDOM_TYPE_INTERSECTION = 0x3 NL80211_REGDOM_TYPE_INTERSECTION = 0x3
NL80211_REGDOM_TYPE_WORLD = 0x1 NL80211_REGDOM_TYPE_WORLD = 0x1
NL80211_REG_RULE_ATTR_MAX = 0x7 NL80211_REG_RULE_ATTR_MAX = 0x8
NL80211_REKEY_DATA_AKM = 0x4 NL80211_REKEY_DATA_AKM = 0x4
NL80211_REKEY_DATA_KCK = 0x2 NL80211_REKEY_DATA_KCK = 0x2
NL80211_REKEY_DATA_KEK = 0x1 NL80211_REKEY_DATA_KEK = 0x1

View File

@ -37,14 +37,17 @@ func (token Token) Environ(inheritExisting bool) (env []string, err error) {
return nil, err return nil, err
} }
defer DestroyEnvironmentBlock(block) defer DestroyEnvironmentBlock(block)
blockp := unsafe.Pointer(block) size := unsafe.Sizeof(*block)
for { for *block != 0 {
entry := UTF16PtrToString((*uint16)(blockp)) // find NUL terminator
if len(entry) == 0 { end := unsafe.Pointer(block)
break for *(*uint16)(end) != 0 {
end = unsafe.Add(end, size)
} }
env = append(env, entry)
blockp = unsafe.Add(blockp, 2*(len(entry)+1)) entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size)
env = append(env, UTF16ToString(entry))
block = (*uint16)(unsafe.Add(end, size))
} }
return env, nil return env, nil
} }

View File

@ -125,8 +125,7 @@ func UTF16PtrToString(p *uint16) string {
for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ {
ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p))
} }
return UTF16ToString(unsafe.Slice(p, n))
return string(utf16.Decode(unsafe.Slice(p, n)))
} }
func Getpagesize() int { return 4096 } func Getpagesize() int { return 4096 }

View File

@ -430,7 +430,7 @@ type ClientHeader struct {
MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
// A single process may be used to run multiple virtual // A single process may be used to run multiple virtual
// servers with different identities. // servers with different identities.
// The authority is the name of such a server identitiy. // The authority is the name of such a server identity.
// It is typically a portion of the URI in the form of // It is typically a portion of the URI in the form of
// <host> or <host>:<port> . // <host> or <host>:<port> .
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`

View File

@ -1860,27 +1860,15 @@ func (cc *ClientConn) determineAuthority() error {
} }
endpoint := cc.parsedTarget.Endpoint() endpoint := cc.parsedTarget.Endpoint()
target := cc.target if authorityFromDialOption != "" {
switch {
case authorityFromDialOption != "":
cc.authority = authorityFromDialOption cc.authority = authorityFromDialOption
case authorityFromCreds != "": } else if authorityFromCreds != "" {
cc.authority = authorityFromCreds cc.authority = authorityFromCreds
case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok {
// TODO: remove when the unix resolver implements optional interface to cc.authority = auth.OverrideAuthority(cc.parsedTarget)
// return channel authority. } else if strings.HasPrefix(endpoint, ":") {
cc.authority = "localhost"
case strings.HasPrefix(endpoint, ":"):
cc.authority = "localhost" + endpoint cc.authority = "localhost" + endpoint
default: } else {
// TODO: Define an optional interface on the resolver builder to return
// the channel authority given the user's dial target. For resolvers
// which don't implement this interface, we will use the endpoint from
// "scheme://authority/endpoint" as the default authority.
// Escape the endpoint to handle use cases where the endpoint
// might not be a valid authority by default.
// For example an endpoint which has multiple paths like
// 'a/b/c', which is not a valid authority by default.
cc.authority = encodeAuthority(endpoint) cc.authority = encodeAuthority(endpoint)
} }
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)

View File

@ -57,7 +57,7 @@ var (
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
// stored in the passed in attributes. This is set by // stored in the passed in attributes. This is set by
// credentials/xds/xds.go. // credentials/xds/xds.go.
GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer
// GetServerCredentials returns the transport credentials configured on a // GetServerCredentials returns the transport credentials configured on a
// gRPC server. An xDS-enabled server needs to know what type of credentials // gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go. // is configured on the underlying gRPC server. This is set by server.go.
@ -68,11 +68,6 @@ var (
// This is used in the 1.0 release of gcp/observability, and thus must not be // This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed. // deleted or changed.
CanonicalString any // func (codes.Code) string CanonicalString any // func (codes.Code) string
// DrainServerTransports initiates a graceful close of existing connections
// on a gRPC server accepted on the provided listener address. An
// xDS-enabled server invokes this method on a grpc.Server when a particular
// listener moves to "not-serving" mode.
DrainServerTransports any // func(*grpc.Server, string)
// IsRegisteredMethod returns whether the passed in method is registered as // IsRegisteredMethod returns whether the passed in method is registered as
// a method on the server. // a method on the server.
IsRegisteredMethod any // func(*grpc.Server, string) bool IsRegisteredMethod any // func(*grpc.Server, string) bool
@ -188,6 +183,19 @@ var (
ExitIdleModeForTesting any // func(*grpc.ClientConn) error ExitIdleModeForTesting any // func(*grpc.ClientConn) error
ChannelzTurnOffForTesting func() ChannelzTurnOffForTesting func()
// TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found
// error for a given resource type and name. This is usually triggered when
// the associated watch timer fires. For testing purposes, having this
// function makes events more predictable than relying on timer events.
TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error
// TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton
// to invoke resource not found for a resource type name and resource name.
TriggerXDSResourceNameNotFoundClient any // func(string, string) error
// FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD.
FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
) )
// HealthChecker defines the signature of the client-side LB channel health checking function. // HealthChecker defines the signature of the client-side LB channel health checking function.

View File

@ -61,6 +61,10 @@ func (b *builder) Scheme() string {
return b.scheme return b.scheme
} }
func (b *builder) OverrideAuthority(resolver.Target) string {
return "localhost"
}
type nopResolver struct { type nopResolver struct {
} }

View File

@ -1,4 +1,4 @@
//go:build !unix //go:build !unix && !windows
/* /*
* Copyright 2023 gRPC authors. * Copyright 2023 gRPC authors.

View File

@ -0,0 +1,54 @@
//go:build windows
/*
* Copyright 2023 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package internal
import (
"net"
"syscall"
"time"
"golang.org/x/sys/windows"
)
// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
// the underlying connection with OS default values for keepalive parameters.
//
// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
// appropriate Go version becomes less than our least supported Go version, we
// should look into using the new API to make things more straightforward.
func NetDialerWithTCPKeepalive() *net.Dialer {
return &net.Dialer{
// Setting a negative value here prevents the Go stdlib from overriding
// the values of TCP keepalive time and interval. It also prevents the
// Go stdlib from enabling TCP keepalives by default.
KeepAlive: time.Duration(-1),
// This method is called after the underlying network socket is created,
// but before dialing the socket (or calling its connect() method). The
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
// the TCP keealive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
})
},
}
}

View File

@ -59,6 +59,8 @@ import (
// atomically. // atomically.
var clientConnectionCounter uint64 var clientConnectionCounter uint64
var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
// http2Client implements the ClientTransport interface with HTTP2. // http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct { type http2Client struct {
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
@ -568,7 +570,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
} }
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
var k string var k string
for k, vv := range md { for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
@ -1323,10 +1325,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
for streamID, stream := range t.activeStreams { for streamID, stream := range t.activeStreams {
if streamID > id && streamID <= upperLimit { if streamID > id && streamID <= upperLimit {
// The stream was unprocessed by the server. // The stream was unprocessed by the server.
if streamID > id && streamID <= upperLimit { atomic.StoreUint32(&stream.unprocessed, 1)
atomic.StoreUint32(&stream.unprocessed, 1) streamsToClose = append(streamsToClose, stream)
streamsToClose = append(streamsToClose, stream)
}
} }
} }
t.mu.Unlock() t.mu.Unlock()

View File

@ -960,7 +960,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
} }
} }
if err := t.writeHeaderLocked(s); err != nil { if err := t.writeHeaderLocked(s); err != nil {
return status.Convert(err).Err() switch e := err.(type) {
case ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
return status.Convert(err).Err()
}
} }
return nil return nil
} }

View File

@ -25,8 +25,14 @@ import (
"context" "context"
"fmt" "fmt"
"strings" "strings"
"google.golang.org/grpc/internal"
) )
func init() {
internal.FromOutgoingContextRaw = fromOutgoingContextRaw
}
// DecodeKeyValue returns k, v, nil. // DecodeKeyValue returns k, v, nil.
// //
// Deprecated: use k and v directly instead. // Deprecated: use k and v directly instead.
@ -238,16 +244,13 @@ func copyOf(v []string) []string {
return vals return vals
} }
// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
// //
// Remember to perform strings.ToLower on the keys, for both the returned MD (MD // Remember to perform strings.ToLower on the keys, for both the returned MD (MD
// is a map, there's no guarantee it's created using our helper functions) and // is a map, there's no guarantee it's created using our helper functions) and
// the extra kv pairs (AppendToOutgoingContext doesn't turn them into // the extra kv pairs (AppendToOutgoingContext doesn't turn them into
// lowercase). // lowercase).
// func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
// This is intended for gRPC-internal use ONLY. Users should use
// FromOutgoingContext instead.
func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
if !ok { if !ok {
return nil, nil, false return nil, nil, false

View File

@ -314,3 +314,13 @@ type Resolver interface {
// Close closes the resolver. // Close closes the resolver.
Close() Close()
} }
// AuthorityOverrider is implemented by Builders that wish to override the
// default authority for the ClientConn.
// By default, the authority used is target.Endpoint().
type AuthorityOverrider interface {
// OverrideAuthority returns the authority to use for a ClientConn with the
// given target. The implementation must generate it without blocking,
// typically in line, and must keep it unchanged.
OverrideAuthority(Target) string
}

View File

@ -640,14 +640,18 @@ func encode(c baseCodec, msg any) ([]byte, error) {
return b, nil return b, nil
} }
// compress returns the input bytes compressed by compressor or cp. If both // compress returns the input bytes compressed by compressor or cp.
// compressors are nil, returns nil. // If both compressors are nil, or if the message has zero length, returns nil,
// indicating no compression was done.
// //
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
if compressor == nil && cp == nil { if compressor == nil && cp == nil {
return nil, nil return nil, nil
} }
if len(in) == 0 {
return nil, nil
}
wrapErr := func(err error) error { wrapErr := func(err error) error {
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
} }

View File

@ -74,9 +74,6 @@ func init() {
return srv.isRegisteredMethod(method) return srv.isRegisteredMethod(method)
} }
internal.ServerFromContext = serverFromContext internal.ServerFromContext = serverFromContext
internal.DrainServerTransports = func(srv *Server, addr string) {
srv.drainServerTransports(addr)
}
internal.AddGlobalServerOptions = func(opt ...ServerOption) { internal.AddGlobalServerOptions = func(opt ...ServerOption) {
globalServerOptions = append(globalServerOptions, opt...) globalServerOptions = append(globalServerOptions, opt...)
} }
@ -139,7 +136,8 @@ type Server struct {
quit *grpcsync.Event quit *grpcsync.Event
done *grpcsync.Event done *grpcsync.Event
channelzRemoveOnce sync.Once channelzRemoveOnce sync.Once
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
handlersWG sync.WaitGroup // counts active method handler goroutines
channelzID *channelz.Identifier channelzID *channelz.Identifier
czData *channelzData czData *channelzData
@ -176,6 +174,7 @@ type serverOptions struct {
headerTableSize *uint32 headerTableSize *uint32
numServerWorkers uint32 numServerWorkers uint32
recvBufferPool SharedBufferPool recvBufferPool SharedBufferPool
waitForHandlers bool
} }
var defaultServerOptions = serverOptions{ var defaultServerOptions = serverOptions{
@ -573,6 +572,21 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
}) })
} }
// WaitForHandlers cause Stop to wait until all outstanding method handlers have
// exited before returning. If false, Stop will return as soon as all
// connections have closed, but method handlers may still be running. By
// default, Stop does not wait for method handlers to return.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WaitForHandlers(w bool) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.waitForHandlers = w
})
}
// RecvBufferPool returns a ServerOption that configures the server // RecvBufferPool returns a ServerOption that configures the server
// to use the provided shared buffer pool for parsing incoming messages. Depending // to use the provided shared buffer pool for parsing incoming messages. Depending
// on the application's workload, this could result in reduced memory allocation. // on the application's workload, this could result in reduced memory allocation.
@ -932,6 +946,12 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
return return
} }
if cc, ok := rawConn.(interface {
PassServerTransport(transport.ServerTransport)
}); ok {
cc.PassServerTransport(st)
}
if !s.addConn(lisAddr, st) { if !s.addConn(lisAddr, st) {
return return
} }
@ -941,15 +961,6 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
}() }()
} }
func (s *Server) drainServerTransports(addr string) {
s.mu.Lock()
conns := s.conns[addr]
for st := range conns {
st.Drain("")
}
s.mu.Unlock()
}
// newHTTP2Transport sets up a http/2 transport (using the // newHTTP2Transport sets up a http/2 transport (using the
// gRPC http2 server transport in transport/http2_server.go). // gRPC http2 server transport in transport/http2_server.go).
func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
@ -1010,9 +1021,11 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport,
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
st.HandleStreams(ctx, func(stream *transport.Stream) { st.HandleStreams(ctx, func(stream *transport.Stream) {
s.handlersWG.Add(1)
streamQuota.acquire() streamQuota.acquire()
f := func() { f := func() {
defer streamQuota.release() defer streamQuota.release()
defer s.handlersWG.Done()
s.handleStream(st, stream) s.handleStream(st, stream)
} }
@ -1911,6 +1924,10 @@ func (s *Server) stop(graceful bool) {
s.serverWorkerChannelClose() s.serverWorkerChannelClose()
} }
if graceful || s.opts.waitForHandlers {
s.handlersWG.Wait()
}
if s.events != nil { if s.events != nil {
s.events.Finish() s.events.Finish()
s.events = nil s.events = nil

View File

@ -48,6 +48,8 @@ import (
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
) )
var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
// StreamHandler defines the handler called by gRPC server to complete the // StreamHandler defines the handler called by gRPC server to complete the
// execution of a streaming RPC. // execution of a streaming RPC.
// //
@ -184,7 +186,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// when the RPC completes. // when the RPC completes.
opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
// validate md // validate md
if err := imetadata.Validate(md); err != nil { if err := imetadata.Validate(md); err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())

View File

@ -19,4 +19,4 @@
package grpc package grpc
// Version is the current grpc version. // Version is the current grpc version.
const Version = "1.60.1" const Version = "1.61.0"

View File

@ -88,7 +88,7 @@ not git grep -l 'x/net/context' -- "*.go"
git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test'
# - Do not use "interface{}"; use "any" instead. # - Do not use "interface{}"; use "any" instead.
git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate'
# - Do not call grpclog directly. Use grpclog.Component instead. # - Do not call grpclog directly. Use grpclog.Component instead.
git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go'
@ -127,7 +127,7 @@ staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true
grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)"
# Exclude underscore checks for generated code. # Exclude underscore checks for generated code.
grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)' grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)'
# Error for duplicate imports not including grpc protos. # Error for duplicate imports not including grpc protos.
grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
@ -152,6 +152,7 @@ grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
XXXXX Protobuf related deprecation errors: XXXXX Protobuf related deprecation errors:
"github.com/golang/protobuf "github.com/golang/protobuf
.pb.go: .pb.go:
grpc_testing_not_regenerate
: ptypes. : ptypes.
proto.RegisterType proto.RegisterType
XXXXX gRPC internal usage deprecation errors: XXXXX gRPC internal usage deprecation errors:
@ -184,9 +185,6 @@ GetSafeRegexMatch
GetSuffixMatch GetSuffixMatch
GetTlsCertificateCertificateProviderInstance GetTlsCertificateCertificateProviderInstance
GetValidationContextCertificateProviderInstance GetValidationContextCertificateProviderInstance
XXXXX TODO: Remove the below deprecation usages:
CloseNotifier
Roots.Subjects
XXXXX PleaseIgnoreUnused' XXXXX PleaseIgnoreUnused'
echo SUCCESS echo SUCCESS

View File

@ -102,6 +102,25 @@ func (e Flags) String() string {
return fmt.Sprintf("%+v", map[string]Flag(e)) return fmt.Sprintf("%+v", map[string]Flag(e))
} }
func (e Flags) NodeSelector() map[string]string {
// Check if NodeSelector is not nil
if e == nil {
return map[string]string{}
}
nodeSelectorMap := make(map[string]string)
for k, v := range e {
if strings.Contains(k, NodeSelectorLabel) {
key := strings.TrimPrefix(k, NodeSelectorLabel)
value := strings.TrimSpace(string(v))
nodeSelectorMap[key] = value
}
}
return nodeSelectorMap
}
// NewFlagsConfigFromMap creates a Flags from the supplied Map // NewFlagsConfigFromMap creates a Flags from the supplied Map
func NewFlagsConfigFromMap(data map[string]string) (Flags, error) { func NewFlagsConfigFromMap(data map[string]string) (Flags, error) {
flags := newDefaults() flags := newDefaults()
@ -122,6 +141,8 @@ func NewFlagsConfigFromMap(data map[string]string) (Flags, error) {
flags[sanitizedKey] = Permissive flags[sanitizedKey] = Permissive
} else if k == TransportEncryption && strings.EqualFold(v, string(Strict)) { } else if k == TransportEncryption && strings.EqualFold(v, string(Strict)) {
flags[sanitizedKey] = Strict flags[sanitizedKey] = Strict
} else if strings.Contains(k, NodeSelectorLabel) {
flags[sanitizedKey] = Flag(v)
} else { } else {
return flags, fmt.Errorf("cannot parse the feature flag '%s' = '%s'", k, v) return flags, fmt.Errorf("cannot parse the feature flag '%s' = '%s'", k, v)
} }

View File

@ -25,4 +25,5 @@ const (
TransportEncryption = "transport-encryption" TransportEncryption = "transport-encryption"
EvenTypeAutoCreate = "eventtype-auto-create" EvenTypeAutoCreate = "eventtype-auto-create"
OIDCAuthentication = "authentication-oidc" OIDCAuthentication = "authentication-oidc"
NodeSelectorLabel = "apiserversources.nodeselector."
) )

View File

@ -32,12 +32,6 @@ const (
// SourceDuckLabelValue is the label value to indicate // SourceDuckLabelValue is the label value to indicate
// the CRD is a Source duck type. // the CRD is a Source duck type.
SourceDuckLabelValue = "true" SourceDuckLabelValue = "true"
//OIDCLabelKey is used to filter out all the informers that related to OIDC work
OIDCLabelKey = "oidc"
// OIDCTokenRoleLabelSelector is the label selector for the OIDC token creator role and rolebinding informers
OIDCTokenRoleLabelSelector = OIDCLabelKey
) )
var ( var (

View File

@ -203,7 +203,7 @@ func loadCertPool(config ClientConfig) (*x509.CertPool, error) {
return nil, err return nil, err
} }
_ = filepath.WalkDir(fmt.Sprintf("/%s", TrustBundleMountPath), func(path string, d fs.DirEntry, err error) error { _ = filepath.WalkDir(TrustBundleMountPath, func(path string, d fs.DirEntry, err error) error {
if err != nil || d.IsDir() { if err != nil || d.IsDir() {
return nil return nil
} }

View File

@ -37,7 +37,7 @@ const (
// TrustBundleLabelSelector is the ConfigMap label selector for trust bundles. // TrustBundleLabelSelector is the ConfigMap label selector for trust bundles.
TrustBundleLabelSelector = "networking.knative.dev/trust-bundle=true" TrustBundleLabelSelector = "networking.knative.dev/trust-bundle=true"
TrustBundleMountPath = "knative-custom-certs" TrustBundleMountPath = "/knative-custom-certs"
TrustBundleVolumeNamePrefix = "kne-bundle-" TrustBundleVolumeNamePrefix = "kne-bundle-"
) )
@ -122,7 +122,7 @@ func PropagateTrustBundles(ctx context.Context, k8s kubernetes.Interface, trustB
// Update owner references // Update owner references
expected.OwnerReferences = withOwnerReferences(obj, gvk, []metav1.OwnerReference{}) expected.OwnerReferences = withOwnerReferences(obj, gvk, []metav1.OwnerReference{})
if err := createConfigMap(ctx, k8s, obj, expected); err != nil { if err := createConfigMap(ctx, k8s, expected); err != nil {
return err return err
} }
continue continue
@ -132,7 +132,7 @@ func PropagateTrustBundles(ctx context.Context, k8s kubernetes.Interface, trustB
expected.OwnerReferences = withOwnerReferences(obj, gvk, p.userCm.OwnerReferences) expected.OwnerReferences = withOwnerReferences(obj, gvk, p.userCm.OwnerReferences)
if !equality.Semantic.DeepDerivative(expected, p.userCm) { if !equality.Semantic.DeepDerivative(expected, p.userCm) {
if err := updateConfigMap(ctx, k8s, obj, expected); err != nil { if err := updateConfigMap(ctx, k8s, expected); err != nil {
return err return err
} }
} }
@ -147,31 +147,72 @@ func AddTrustBundleVolumes(trustBundleLister corev1listers.ConfigMapLister, obj
} }
pt = pt.DeepCopy() pt = pt.DeepCopy()
sources := make([]corev1.VolumeProjection, 0, len(cms))
for _, cm := range cms { for _, cm := range cms {
volumeName := kmeta.ChildName(TrustBundleVolumeNamePrefix, cm.Name) sources = append(sources, corev1.VolumeProjection{
pt.Volumes = append(pt.Volumes, corev1.Volume{ ConfigMap: &corev1.ConfigMapProjection{
Name: volumeName, LocalObjectReference: corev1.LocalObjectReference{
VolumeSource: corev1.VolumeSource{ Name: cm.Name,
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: cm.Name,
},
}, },
}, },
}) })
}
if len(sources) == 0 {
return pt, nil
}
for i := range pt.Containers { volumeName := fmt.Sprintf("%s%s", TrustBundleVolumeNamePrefix, "volume")
vs := corev1.VolumeSource{
Projected: &corev1.ProjectedVolumeSource{
Sources: sources,
},
}
found := false
for i, v := range pt.Volumes {
if v.Name == volumeName {
found = true
pt.Volumes[i].VolumeSource = vs
break
}
}
if !found {
pt.Volumes = append(pt.Volumes, corev1.Volume{
Name: volumeName,
VolumeSource: vs,
})
}
for i := range pt.Containers {
found = false
for _, v := range pt.Containers[i].VolumeMounts {
if v.Name == volumeName {
found = true
break
}
}
if !found {
pt.Containers[i].VolumeMounts = append(pt.Containers[i].VolumeMounts, corev1.VolumeMount{ pt.Containers[i].VolumeMounts = append(pt.Containers[i].VolumeMounts, corev1.VolumeMount{
Name: volumeName, Name: volumeName,
ReadOnly: true, ReadOnly: true,
MountPath: fmt.Sprintf("/%s/%s", TrustBundleMountPath, cm.Name), MountPath: TrustBundleMountPath,
}) })
} }
for i := range pt.InitContainers { }
for i := range pt.InitContainers {
found = false
for _, v := range pt.InitContainers[i].VolumeMounts {
if v.Name == volumeName {
found = true
break
}
}
if !found {
pt.InitContainers[i].VolumeMounts = append(pt.InitContainers[i].VolumeMounts, corev1.VolumeMount{ pt.InitContainers[i].VolumeMounts = append(pt.InitContainers[i].VolumeMounts, corev1.VolumeMount{
Name: volumeName, Name: volumeName,
ReadOnly: true, ReadOnly: true,
MountPath: fmt.Sprintf("/%s/%s", TrustBundleMountPath, cm.Name), MountPath: TrustBundleMountPath,
}) })
} }
} }
@ -228,18 +269,18 @@ func deleteConfigMap(ctx context.Context, k8s kubernetes.Interface, sb kmeta.Acc
return nil return nil
} }
func updateConfigMap(ctx context.Context, k8s kubernetes.Interface, sb kmeta.Accessor, expected *corev1.ConfigMap) error { func updateConfigMap(ctx context.Context, k8s kubernetes.Interface, expected *corev1.ConfigMap) error {
_, err := k8s.CoreV1().ConfigMaps(sb.GetNamespace()).Update(ctx, expected, metav1.UpdateOptions{}) _, err := k8s.CoreV1().ConfigMaps(expected.Namespace).Update(ctx, expected, metav1.UpdateOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to update ConfigMap %s/%s: %w", sb.GetNamespace(), expected.Name, err) return fmt.Errorf("failed to update ConfigMap %s/%s: %w", expected.Namespace, expected.Name, err)
} }
return nil return nil
} }
func createConfigMap(ctx context.Context, k8s kubernetes.Interface, sb kmeta.Accessor, expected *corev1.ConfigMap) error { func createConfigMap(ctx context.Context, k8s kubernetes.Interface, expected *corev1.ConfigMap) error {
_, err := k8s.CoreV1().ConfigMaps(sb.GetNamespace()).Create(ctx, expected, metav1.CreateOptions{}) _, err := k8s.CoreV1().ConfigMaps(expected.Namespace).Create(ctx, expected, metav1.CreateOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to create ConfigMap %s/%s: %w", sb.GetNamespace(), expected.Name, err) return fmt.Errorf("failed to create ConfigMap %s/%s: %w", expected.Namespace, expected.Name, err)
} }
return nil return nil
} }

View File

@ -0,0 +1,19 @@
/*
Copyright 2019 The Knative Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ingress holds utilities related to the implementation of ingress
// controllers.
package ingress

View File

@ -0,0 +1,131 @@
/*
Copyright 2019 The Knative Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ingress
import (
"crypto/sha256"
"encoding/json"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/networking/pkg/apis/networking/v1alpha1"
"knative.dev/networking/pkg/http/header"
"knative.dev/pkg/network"
)
// ComputeHash computes a hash of the Ingress Spec, Namespace and Name
func ComputeHash(ing *v1alpha1.Ingress) ([sha256.Size]byte, error) {
bytes, err := json.Marshal(ing.Spec)
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("failed to serialize Ingress: %w", err)
}
bytes = append(bytes, []byte(ing.GetNamespace())...)
bytes = append(bytes, []byte(ing.GetName())...)
return sha256.Sum256(bytes), nil
}
// InsertProbe adds a AppendHeader rule so that any request going through a Gateway is tagged with
// the version of the Ingress currently deployed on the Gateway.
func InsertProbe(ing *v1alpha1.Ingress) (string, error) {
bytes, err := ComputeHash(ing)
if err != nil {
return "", fmt.Errorf("failed to compute the hash of the Ingress: %w", err)
}
hash := fmt.Sprintf("%x", bytes)
for _, rule := range ing.Spec.Rules {
if rule.HTTP == nil {
return "", fmt.Errorf("rule is missing HTTP block: %+v", rule)
}
probePaths := make([]v1alpha1.HTTPIngressPath, 0, len(rule.HTTP.Paths))
for i := range rule.HTTP.Paths {
elt := rule.HTTP.Paths[i].DeepCopy()
if elt.AppendHeaders == nil {
elt.AppendHeaders = make(map[string]string, 1)
}
if elt.Headers == nil {
elt.Headers = make(map[string]v1alpha1.HeaderMatch, 1)
}
elt.Headers[header.HashKey] = v1alpha1.HeaderMatch{Exact: header.HashValueOverride}
elt.AppendHeaders[header.HashKey] = hash
probePaths = append(probePaths, *elt)
}
rule.HTTP.Paths = append(probePaths, rule.HTTP.Paths...)
}
return hash, nil
}
// HostsPerVisibility takes an Ingress and a map from visibility levels to a set of string keys,
// it then returns a map from that key space to the hosts under that visibility.
func HostsPerVisibility(ing *v1alpha1.Ingress, visibilityToKey map[v1alpha1.IngressVisibility]sets.Set[string]) map[string]sets.Set[string] {
output := make(map[string]sets.Set[string], 2) // We currently have public and internal.
for _, rule := range ing.Spec.Rules {
for host := range ExpandedHosts(sets.New(rule.Hosts...)) {
for key := range visibilityToKey[rule.Visibility] {
if _, ok := output[key]; !ok {
output[key] = make(sets.Set[string], len(rule.Hosts))
}
output[key].Insert(host)
}
}
}
return output
}
// ExpandedHosts sets up hosts for the short-names for cluster DNS names.
func ExpandedHosts(hosts sets.Set[string]) sets.Set[string] {
allowedSuffixes := []string{
"",
"." + network.GetClusterDomainName(),
".svc." + network.GetClusterDomainName(),
}
// Optimistically pre-alloc.
expanded := make(sets.Set[string], len(hosts)*len(allowedSuffixes))
for _, h := range sets.List(hosts) {
for _, suffix := range allowedSuffixes {
if th := strings.TrimSuffix(h, suffix); suffix == "" || len(th) < len(h) {
if isValidTopLevelDomain(th) {
expanded.Insert(th)
}
}
}
}
return expanded
}
// Validate that the Top Level Domain of a given hostname is valid.
// Current checks:
// - not all digits
// - len < 64
//
// Example: '1234' is an invalid TLD
func isValidTopLevelDomain(domain string) bool {
parts := strings.Split(domain, ".")
tld := parts[len(parts)-1]
if len(tld) > 63 {
return false
}
for _, c := range []byte(tld) {
if c == '-' || c > '9' {
return true
}
}
// Every char was a digit.
return false
}

View File

@ -50,29 +50,42 @@ func TransformDeploymentStatus(ds *appsv1.DeploymentStatus) *duckv1.Status {
// The absence of this condition means no failure has occurred. If we find it // The absence of this condition means no failure has occurred. If we find it
// below, we'll overwrite this. // below, we'll overwrite this.
depCondSet.Manage(s).MarkTrue(DeploymentConditionReplicaSetReady) depCondSet.Manage(s).MarkTrue(DeploymentConditionReplicaSetReady)
depCondSet.Manage(s).MarkUnknown(DeploymentConditionProgressing, "Deploying", "")
for _, cond := range ds.Conditions { conds := []appsv1.DeploymentConditionType{
// TODO(jonjohnsonjr): Should we care about appsv1.DeploymentAvailable here? appsv1.DeploymentProgressing,
switch cond.Type { appsv1.DeploymentReplicaFailure,
case appsv1.DeploymentProgressing: }
switch cond.Status {
case corev1.ConditionUnknown: for _, wantType := range conds {
depCondSet.Manage(s).MarkUnknown(DeploymentConditionProgressing, cond.Reason, cond.Message) for _, cond := range ds.Conditions {
case corev1.ConditionTrue: if wantType != cond.Type {
depCondSet.Manage(s).MarkTrue(DeploymentConditionProgressing) continue
case corev1.ConditionFalse:
depCondSet.Manage(s).MarkFalse(DeploymentConditionProgressing, cond.Reason, cond.Message)
} }
case appsv1.DeploymentReplicaFailure:
switch cond.Status { switch cond.Type {
case corev1.ConditionUnknown: case appsv1.DeploymentProgressing:
depCondSet.Manage(s).MarkUnknown(DeploymentConditionReplicaSetReady, cond.Reason, cond.Message) switch cond.Status {
case corev1.ConditionTrue: case corev1.ConditionUnknown:
depCondSet.Manage(s).MarkFalse(DeploymentConditionReplicaSetReady, cond.Reason, cond.Message) depCondSet.Manage(s).MarkUnknown(DeploymentConditionProgressing, cond.Reason, cond.Message)
case corev1.ConditionFalse: case corev1.ConditionTrue:
depCondSet.Manage(s).MarkTrue(DeploymentConditionReplicaSetReady) depCondSet.Manage(s).MarkTrue(DeploymentConditionProgressing)
case corev1.ConditionFalse:
depCondSet.Manage(s).MarkFalse(DeploymentConditionProgressing, cond.Reason, cond.Message)
}
case appsv1.DeploymentReplicaFailure:
switch cond.Status {
case corev1.ConditionUnknown:
depCondSet.Manage(s).MarkUnknown(DeploymentConditionReplicaSetReady, cond.Reason, cond.Message)
case corev1.ConditionTrue:
depCondSet.Manage(s).MarkFalse(DeploymentConditionReplicaSetReady, cond.Reason, cond.Message)
case corev1.ConditionFalse:
depCondSet.Manage(s).MarkTrue(DeploymentConditionReplicaSetReady)
}
} }
} }
} }
return s return s
} }

View File

@ -19,7 +19,6 @@ package v1
import ( import (
"time" "time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
net "knative.dev/networking/pkg/apis/networking" net "knative.dev/networking/pkg/apis/networking"
"knative.dev/pkg/kmeta" "knative.dev/pkg/kmeta"
@ -144,9 +143,3 @@ func (rs *RevisionStatus) IsActivationRequired() bool {
c := revisionCondSet.Manage(rs).GetCondition(RevisionConditionActive) c := revisionCondSet.Manage(rs).GetCondition(RevisionConditionActive)
return c != nil && c.Status != corev1.ConditionTrue return c != nil && c.Status != corev1.ConditionTrue
} }
// IsReplicaSetFailure returns true if the deployment replicaset failed to create
func (rs *RevisionStatus) IsReplicaSetFailure(deploymentStatus *appsv1.DeploymentStatus) bool {
ds := serving.TransformDeploymentStatus(deploymentStatus)
return ds != nil && ds.GetCondition(serving.DeploymentConditionReplicaSetReady).IsFalse()
}

View File

@ -170,6 +170,8 @@ func (rs *RevisionStatus) PropagateDeploymentStatus(original *appsv1.DeploymentS
// PropagateAutoscalerStatus propagates autoscaler's status to the revision's status. // PropagateAutoscalerStatus propagates autoscaler's status to the revision's status.
func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodAutoscalerStatus) { func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodAutoscalerStatus) {
resUnavailable := rs.GetCondition(RevisionConditionResourcesAvailable).IsFalse()
// Reflect the PA status in our own. // Reflect the PA status in our own.
cond := ps.GetCondition(autoscalingv1alpha1.PodAutoscalerConditionReady) cond := ps.GetCondition(autoscalingv1alpha1.PodAutoscalerConditionReady)
rs.ActualReplicas = nil rs.ActualReplicas = nil
@ -183,13 +185,16 @@ func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodA
} }
if cond == nil { if cond == nil {
rs.MarkActiveUnknown("Deploying", "") rs.MarkActiveUnknown(ReasonDeploying, "")
if !resUnavailable {
rs.MarkResourcesAvailableUnknown(ReasonDeploying, "")
}
return return
} }
// Don't mark the resources available, if deployment status already determined // Don't mark the resources available, if deployment status already determined
// it isn't so. // it isn't so.
resUnavailable := rs.GetCondition(RevisionConditionResourcesAvailable).IsFalse()
if ps.IsScaleTargetInitialized() && !resUnavailable { if ps.IsScaleTargetInitialized() && !resUnavailable {
// Precondition for PA being initialized is SKS being active and // Precondition for PA being initialized is SKS being active and
// that implies that |service.endpoints| > 0. // that implies that |service.endpoints| > 0.
@ -197,6 +202,12 @@ func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodA
rs.MarkContainerHealthyTrue() rs.MarkContainerHealthyTrue()
} }
// Mark resource unavailable if we don't have a Service Name and the deployment is ready
// This can happen when we have initial scale set to 0
if rs.GetCondition(RevisionConditionResourcesAvailable).IsTrue() && ps.ServiceName == "" {
rs.MarkResourcesAvailableUnknown(ReasonDeploying, "")
}
switch cond.Status { switch cond.Status {
case corev1.ConditionUnknown: case corev1.ConditionUnknown:
rs.MarkActiveUnknown(cond.Reason, cond.Message) rs.MarkActiveUnknown(cond.Reason, cond.Message)
@ -222,14 +233,6 @@ func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodA
rs.MarkActiveFalse(cond.Reason, cond.Message) rs.MarkActiveFalse(cond.Reason, cond.Message)
case corev1.ConditionTrue: case corev1.ConditionTrue:
rs.MarkActiveTrue() rs.MarkActiveTrue()
// Precondition for PA being active is SKS being active and
// that implies that |service.endpoints| > 0.
//
// Note: This is needed for backwards compatibility as we're adding the new
// ScaleTargetInitialized condition to gate readiness.
rs.MarkResourcesAvailableTrue()
rs.MarkContainerHealthyTrue()
} }
} }

View File

@ -195,11 +195,6 @@ const (
// RouteConditionCertificateProvisioned condition when it is set to True // RouteConditionCertificateProvisioned condition when it is set to True
// because external-domain-tls was not enabled. // because external-domain-tls was not enabled.
ExternalDomainTLSNotEnabledMessage = "external-domain-tls is not enabled" ExternalDomainTLSNotEnabledMessage = "external-domain-tls is not enabled"
// TLSNotEnabledForClusterLocalMessage is the message which is set on the
// RouteConditionCertificateProvisioned condition when it is set to True
// because the domain is cluster-local.
TLSNotEnabledForClusterLocalMessage = "TLS is not enabled for cluster-local"
) )
// MarkTLSNotEnabled sets RouteConditionCertificateProvisioned to true when // MarkTLSNotEnabled sets RouteConditionCertificateProvisioned to true when

View File

@ -31,6 +31,8 @@ import (
const ( const (
// DomainConfigName is the config map name for the domain configuration. // DomainConfigName is the config map name for the domain configuration.
DomainConfigName = "config-domain" DomainConfigName = "config-domain"
// Type for domains to generate namespace wildcard certs
DomainTypeWildcard = "wildcard"
) )
var ( var (
@ -46,11 +48,17 @@ type LabelSelector struct {
} }
func (s *LabelSelector) specificity() int { func (s *LabelSelector) specificity() int {
if s == nil {
return 0
}
return len(s.Selector) return len(s.Selector)
} }
// Matches returns whether the given labels meet the requirement of the selector. // Matches returns whether the given labels meet the requirement of the selector.
func (s *LabelSelector) Matches(labels map[string]string) bool { func (s *LabelSelector) Matches(labels map[string]string) bool {
if s == nil {
return true
}
for label, expectedValue := range s.Selector { for label, expectedValue := range s.Selector {
value, ok := labels[label] value, ok := labels[label]
if !ok || expectedValue != value { if !ok || expectedValue != value {
@ -63,33 +71,50 @@ func (s *LabelSelector) Matches(labels map[string]string) bool {
// Domain maps domains to routes by matching the domain's // Domain maps domains to routes by matching the domain's
// label selectors to the route's labels. // label selectors to the route's labels.
type Domain struct { type Domain struct {
// Domains map from domain to label selector. If a route has // Domains map from domain to a set of options including a label selector.
// labels matching a particular selector, it will use the Domains map[string]DomainConfig
// corresponding domain. If multiple selectors match, we choose }
// the most specific selector.
Domains map[string]*LabelSelector // The configuration of one domain
type DomainConfig struct {
// The label selector for the domain. If a route has labels matching a particular selector, it
// will use the corresponding domain. If multiple selectors match, we choose the most specific
// selector.
Selector *LabelSelector
// The type of domain, currently only supports wildcard or unset
Type string
}
// Internal only representation of domain config for unmarshalling, allows backwards compatibility
type domainInternalConfig struct {
Selector map[string]string `json:"selector,omitempty"`
Type string `json:"type"`
} }
// NewDomainFromConfigMap creates a Domain from the supplied ConfigMap // NewDomainFromConfigMap creates a Domain from the supplied ConfigMap
func NewDomainFromConfigMap(configMap *corev1.ConfigMap) (*Domain, error) { func NewDomainFromConfigMap(configMap *corev1.ConfigMap) (*Domain, error) {
c := Domain{Domains: map[string]*LabelSelector{}} c := Domain{Domains: map[string]DomainConfig{}}
hasDefault := false hasDefault := false
for k, v := range configMap.Data { for k, v := range configMap.Data {
if k == configmap.ExampleKey { if k == configmap.ExampleKey {
continue continue
} }
labelSelector := &LabelSelector{} internalConfig := domainInternalConfig{}
err := yaml.Unmarshal([]byte(v), labelSelector) err := yaml.Unmarshal([]byte(v), &internalConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
c.Domains[k] = labelSelector if len(internalConfig.Selector) == 0 {
if len(labelSelector.Selector) == 0 {
hasDefault = true hasDefault = true
internalConfig.Type = DomainTypeWildcard
}
c.Domains[k] = DomainConfig{
Selector: &LabelSelector{Selector: internalConfig.Selector},
Type: internalConfig.Type,
} }
} }
if !hasDefault { if !hasDefault {
c.Domains[DefaultDomain] = &LabelSelector{} c.Domains[DefaultDomain] = DomainConfig{Selector: &LabelSelector{}, Type: DomainTypeWildcard}
} }
return &c, nil return &c, nil
} }
@ -105,15 +130,15 @@ func (c *Domain) LookupDomainForLabels(labels map[string]string) string {
if labels[networking.VisibilityLabelKey] == serving.VisibilityClusterLocal { if labels[networking.VisibilityLabelKey] == serving.VisibilityClusterLocal {
return "svc." + network.GetClusterDomainName() return "svc." + network.GetClusterDomainName()
} }
for k, selector := range c.Domains { for k, v := range c.Domains {
// Ignore if selector doesn't match, or decrease the specificity. // Ignore if selector doesn't match, or decrease the specificity.
if !selector.Matches(labels) || selector.specificity() < specificity { if !v.Selector.Matches(labels) || v.Selector.specificity() < specificity {
continue continue
} }
if selector.specificity() > specificity || strings.Compare(k, domain) < 0 { if v.Selector.specificity() > specificity || strings.Compare(k, domain) < 0 {
domain = k domain = k
specificity = selector.specificity() specificity = v.Selector.specificity()
} }
} }

View File

@ -26,17 +26,9 @@ func (in *Domain) DeepCopyInto(out *Domain) {
*out = *in *out = *in
if in.Domains != nil { if in.Domains != nil {
in, out := &in.Domains, &out.Domains in, out := &in.Domains, &out.Domains
*out = make(map[string]*LabelSelector, len(*in)) *out = make(map[string]DomainConfig, len(*in))
for key, val := range *in { for key, val := range *in {
var outVal *LabelSelector (*out)[key] = *val.DeepCopy()
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = new(LabelSelector)
(*in).DeepCopyInto(*out)
}
(*out)[key] = outVal
} }
} }
return return
@ -52,6 +44,27 @@ func (in *Domain) DeepCopy() *Domain {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DomainConfig) DeepCopyInto(out *DomainConfig) {
*out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainConfig.
func (in *DomainConfig) DeepCopy() *DomainConfig {
if in == nil {
return nil
}
out := new(DomainConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelSelector) DeepCopyInto(out *LabelSelector) { func (in *LabelSelector) DeepCopyInto(out *LabelSelector) {
*out = *in *out = *in

View File

@ -24,11 +24,13 @@ import (
"text/template" "text/template"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/validation/field"
netapi "knative.dev/networking/pkg/apis/networking" netapi "knative.dev/networking/pkg/apis/networking"
netv1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1" netv1alpha1 "knative.dev/networking/pkg/apis/networking/v1alpha1"
netcfg "knative.dev/networking/pkg/config" netcfg "knative.dev/networking/pkg/config"
"knative.dev/networking/pkg/ingress"
"knative.dev/pkg/apis" "knative.dev/pkg/apis"
pkgnet "knative.dev/pkg/network" pkgnet "knative.dev/pkg/network"
"knative.dev/serving/pkg/apis/serving" "knative.dev/serving/pkg/apis/serving"
@ -63,6 +65,28 @@ func GetAllDomainsAndTags(ctx context.Context, r *v1.Route, names []string, visi
return domainTagMap, nil return domainTagMap, nil
} }
// GetDomainsForVisibility return all domains for the specified visibility.
func GetDomainsForVisibility(ctx context.Context, targetName string, r *v1.Route, visibility netv1alpha1.IngressVisibility) (sets.Set[string], error) {
hostname, err := HostnameFromTemplate(ctx, r.Name, targetName)
if err != nil {
return nil, err
}
meta := r.ObjectMeta.DeepCopy()
isClusterLocal := visibility == netv1alpha1.IngressVisibilityClusterLocal
labels.SetVisibility(meta, isClusterLocal)
domain, err := DomainNameFromTemplate(ctx, *meta, hostname)
if err != nil {
return nil, err
}
domains := []string{domain}
if isClusterLocal {
domains = sets.List(ingress.ExpandedHosts(sets.New(domains...)))
}
return sets.New(domains...), err
}
// DomainNameFromTemplate generates domain name base on the template specified in the `config-network` ConfigMap. // DomainNameFromTemplate generates domain name base on the template specified in the `config-network` ConfigMap.
// name is the "subdomain" which will be referred as the "name" in the template // name is the "subdomain" which will be referred as the "name" in the template
func DomainNameFromTemplate(ctx context.Context, r metav1.ObjectMeta, name string) (string, error) { func DomainNameFromTemplate(ctx context.Context, r metav1.ObjectMeta, name string) (string, error) {

View File

@ -180,14 +180,6 @@ func WithRouteConditionsExternalDomainTLSDisabled(rt *v1.Route) {
rt.Status.MarkTLSNotEnabled(v1.ExternalDomainTLSNotEnabledMessage) rt.Status.MarkTLSNotEnabled(v1.ExternalDomainTLSNotEnabledMessage)
} }
// WithRouteConditionsTLSNotEnabledForClusterLocalMessage calls
// MarkTLSNotEnabled with TLSNotEnabledForClusterLocalMessage after initialized
// the Service's conditions.
func WithRouteConditionsTLSNotEnabledForClusterLocalMessage(rt *v1.Route) {
rt.Status.InitializeConditions()
rt.Status.MarkTLSNotEnabled(v1.TLSNotEnabledForClusterLocalMessage)
}
// WithRouteConditionsHTTPDowngrade calls MarkHTTPDowngrade after initialized the Service's conditions. // WithRouteConditionsHTTPDowngrade calls MarkHTTPDowngrade after initialized the Service's conditions.
func WithRouteConditionsHTTPDowngrade(rt *v1.Route) { func WithRouteConditionsHTTPDowngrade(rt *v1.Route) {
rt.Status.InitializeConditions() rt.Status.InitializeConditions()

View File

@ -291,6 +291,7 @@ function install() {
YTT_FILES+=("${REPO_ROOT_DIR}/test/config/ytt/ingress/${ingress}") YTT_FILES+=("${REPO_ROOT_DIR}/test/config/ytt/ingress/${ingress}")
YTT_FILES+=("${REPO_ROOT_DIR}/test/config/ytt/certmanager/kapp-order.yaml") YTT_FILES+=("${REPO_ROOT_DIR}/test/config/ytt/certmanager/kapp-order.yaml")
YTT_FILES+=("${REPO_ROOT_DIR}/test/config/ytt/certmanager/kapp-secret-upgrade.yaml")
YTT_FILES+=("${REPO_ROOT_DIR}/third_party/cert-manager-${CERT_MANAGER_VERSION}/cert-manager.yaml") YTT_FILES+=("${REPO_ROOT_DIR}/third_party/cert-manager-${CERT_MANAGER_VERSION}/cert-manager.yaml")
YTT_FILES+=("${REPO_ROOT_DIR}/third_party/cert-manager-${CERT_MANAGER_VERSION}/net-certmanager.yaml") YTT_FILES+=("${REPO_ROOT_DIR}/third_party/cert-manager-${CERT_MANAGER_VERSION}/net-certmanager.yaml")
@ -319,10 +320,6 @@ function install() {
YTT_FILES+=("${REPO_ROOT_DIR}/test/config/resource-quota/resource-quota.yaml") YTT_FILES+=("${REPO_ROOT_DIR}/test/config/resource-quota/resource-quota.yaml")
fi fi
if (( ENABLE_TLS )); then
YTT_FILES+=("${REPO_ROOT_DIR}/test/config/tls/cert-secret.yaml")
fi
local ytt_result=$(mktemp) local ytt_result=$(mktemp)
local ytt_post_install_result=$(mktemp) local ytt_post_install_result=$(mktemp)
local ytt_flags="" local ytt_flags=""
@ -375,16 +372,14 @@ function install() {
fi fi
if (( ENABLE_TLS )); then if (( ENABLE_TLS )); then
echo "Patch to config-network to enable internal encryption" echo "Patch config-network to enable encryption features"
toggle_feature system-internal-tls Enabled config-network toggle_feature system-internal-tls enabled config-network
# This is currently only supported by kourier
if [[ "$INGRESS_CLASS" == "kourier.ingress.networking.knative.dev" ]]; then if [[ "$INGRESS_CLASS" == "kourier.ingress.networking.knative.dev" ]]; then
echo "Point Kourier local gateway to custom server certificates" toggle_feature cluster-local-domain-tls enabled config-network
toggle_feature cluster-cert-secret server-certs config-kourier
# This needs to match the name of Secret in test/config/tls/cert-secret.yaml
export CA_CERT=ca-cert
# This needs to match $san from test/config/tls/generate.sh
export SERVER_NAME=knative.dev
fi fi
echo "Restart activator to mount the certificates" echo "Restart activator to mount the certificates"
kubectl delete pod -n ${SYSTEM_NAMESPACE} -l app=activator kubectl delete pod -n ${SYSTEM_NAMESPACE} -l app=activator
kubectl wait --timeout=60s --for=condition=Available deployment -n ${SYSTEM_NAMESPACE} activator kubectl wait --timeout=60s --for=condition=Available deployment -n ${SYSTEM_NAMESPACE} activator

View File

@ -81,6 +81,10 @@ toggle_feature allow-zero-initial-scale false config-autoscaler || fail_test
go_test_e2e -timeout=2m ./test/e2e/domainmapping ${E2E_TEST_FLAGS} || failed=1 go_test_e2e -timeout=2m ./test/e2e/domainmapping ${E2E_TEST_FLAGS} || failed=1
toggle_feature cluster-local-domain-tls enabled config-network || fail_test
go_test_e2e -timeout=2m ./test/e2e/clusterlocaldomaintls ${E2E_TEST_FLAGS} || failed=1
toggle_feature cluster-local-domain-tls disabled config-network || fail_test
toggle_feature system-internal-tls enabled config-network || fail_test toggle_feature system-internal-tls enabled config-network || fail_test
toggle_feature "logging.enable-request-log" true config-observability || fail_test toggle_feature "logging.enable-request-log" true config-observability || fail_test
toggle_feature "logging.request-log-template" "TLS: {{.Request.TLS}}" config-observability || fail_test toggle_feature "logging.request-log-template" "TLS: {{.Request.TLS}}" config-observability || fail_test

37
vendor/modules.txt vendored
View File

@ -62,7 +62,7 @@ github.com/emicklei/go-restful/v3/log
# github.com/evanphx/json-patch v5.6.0+incompatible # github.com/evanphx/json-patch v5.6.0+incompatible
## explicit ## explicit
github.com/evanphx/json-patch github.com/evanphx/json-patch
# github.com/evanphx/json-patch/v5 v5.8.0 # github.com/evanphx/json-patch/v5 v5.9.0
## explicit; go 1.18 ## explicit; go 1.18
github.com/evanphx/json-patch/v5 github.com/evanphx/json-patch/v5
github.com/evanphx/json-patch/v5/internal/json github.com/evanphx/json-patch/v5/internal/json
@ -79,7 +79,7 @@ github.com/go-kit/log/level
# github.com/go-logfmt/logfmt v0.5.1 # github.com/go-logfmt/logfmt v0.5.1
## explicit; go 1.17 ## explicit; go 1.17
github.com/go-logfmt/logfmt github.com/go-logfmt/logfmt
# github.com/go-logr/logr v1.3.0 # github.com/go-logr/logr v1.4.1
## explicit; go 1.18 ## explicit; go 1.18
github.com/go-logr/logr github.com/go-logr/logr
# github.com/go-openapi/jsonpointer v0.19.6 # github.com/go-openapi/jsonpointer v0.19.6
@ -135,7 +135,7 @@ github.com/google/gofuzz/bytesource
# github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
## explicit; go 1.13 ## explicit; go 1.13
github.com/google/shlex github.com/google/shlex
# github.com/google/uuid v1.5.0 # github.com/google/uuid v1.6.0
## explicit ## explicit
github.com/google/uuid github.com/google/uuid
# github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 # github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79
@ -348,7 +348,7 @@ go.uber.org/zap/zapcore
golang.org/x/mod/internal/lazyregexp golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module golang.org/x/mod/module
golang.org/x/mod/semver golang.org/x/mod/semver
# golang.org/x/net v0.20.0 # golang.org/x/net v0.21.0
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/net/http/httpguts golang.org/x/net/http/httpguts
golang.org/x/net/http2 golang.org/x/net/http2
@ -357,7 +357,7 @@ golang.org/x/net/http2/hpack
golang.org/x/net/idna golang.org/x/net/idna
golang.org/x/net/internal/timeseries golang.org/x/net/internal/timeseries
golang.org/x/net/trace golang.org/x/net/trace
# golang.org/x/oauth2 v0.16.0 # golang.org/x/oauth2 v0.17.0
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/oauth2 golang.org/x/oauth2
golang.org/x/oauth2/internal golang.org/x/oauth2/internal
@ -365,12 +365,12 @@ golang.org/x/oauth2/internal
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/sync/errgroup golang.org/x/sync/errgroup
golang.org/x/sync/semaphore golang.org/x/sync/semaphore
# golang.org/x/sys v0.16.0 # golang.org/x/sys v0.17.0
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/sys/plan9 golang.org/x/sys/plan9
golang.org/x/sys/unix golang.org/x/sys/unix
golang.org/x/sys/windows golang.org/x/sys/windows
# golang.org/x/term v0.16.0 # golang.org/x/term v0.17.0
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/term golang.org/x/term
# golang.org/x/text v0.14.0 # golang.org/x/text v0.14.0
@ -403,7 +403,7 @@ golang.org/x/tools/internal/imports
# gomodules.xyz/jsonpatch/v2 v2.4.0 # gomodules.xyz/jsonpatch/v2 v2.4.0
## explicit; go 1.20 ## explicit; go 1.20
gomodules.xyz/jsonpatch/v2 gomodules.xyz/jsonpatch/v2
# google.golang.org/api v0.155.0 # google.golang.org/api v0.163.0
## explicit; go 1.19 ## explicit; go 1.19
google.golang.org/api/support/bundler google.golang.org/api/support/bundler
# google.golang.org/appengine v1.6.8 # google.golang.org/appengine v1.6.8
@ -415,16 +415,16 @@ google.golang.org/appengine/internal/log
google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 # google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac
## explicit; go 1.19 ## explicit; go 1.19
google.golang.org/genproto/protobuf/field_mask google.golang.org/genproto/protobuf/field_mask
# google.golang.org/genproto/googleapis/api v0.0.0-20231211222908-989df2bf70f3 # google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457
## explicit; go 1.19 ## explicit; go 1.19
google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/api/httpbody
# google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 # google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe
## explicit; go 1.19 ## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/rpc/status
# google.golang.org/grpc v1.60.1 # google.golang.org/grpc v1.61.0
## explicit; go 1.19 ## explicit; go 1.19
google.golang.org/grpc google.golang.org/grpc
google.golang.org/grpc/attributes google.golang.org/grpc/attributes
@ -953,10 +953,10 @@ k8s.io/utils/net
k8s.io/utils/pointer k8s.io/utils/pointer
k8s.io/utils/strings/slices k8s.io/utils/strings/slices
k8s.io/utils/trace k8s.io/utils/trace
# knative.dev/client-pkg v0.0.0-20240124090003-67fca0ca8681 # knative.dev/client-pkg v0.0.0-20240214132329-2c46c4434d4e
## explicit; go 1.21 ## explicit; go 1.21
knative.dev/client-pkg/pkg/kn/plugin knative.dev/client-pkg/pkg/kn/plugin
# knative.dev/eventing v0.40.0 # knative.dev/eventing v0.40.1-0.20240214130959-97e91c540b0c
## explicit; go 1.21 ## explicit; go 1.21
knative.dev/eventing/pkg/apis/config knative.dev/eventing/pkg/apis/config
knative.dev/eventing/pkg/apis/duck knative.dev/eventing/pkg/apis/duck
@ -987,10 +987,10 @@ knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/fake
knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2 knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2
knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/fake knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/fake
knative.dev/eventing/pkg/eventingtls knative.dev/eventing/pkg/eventingtls
# knative.dev/hack v0.0.0-20240123162936-f3f03ac0ab1a # knative.dev/hack v0.0.0-20240214131420-999d7e6b8495
## explicit; go 1.18 ## explicit; go 1.18
knative.dev/hack knative.dev/hack
# knative.dev/networking v0.0.0-20240116081125-ce0738abf051 # knative.dev/networking v0.0.0-20240214132427-22eb3d0fda5c
## explicit; go 1.18 ## explicit; go 1.18
knative.dev/networking/pkg knative.dev/networking/pkg
knative.dev/networking/pkg/apis/networking knative.dev/networking/pkg/apis/networking
@ -1004,8 +1004,9 @@ knative.dev/networking/pkg/http/header
knative.dev/networking/pkg/http/probe knative.dev/networking/pkg/http/probe
knative.dev/networking/pkg/http/proxy knative.dev/networking/pkg/http/proxy
knative.dev/networking/pkg/http/stats knative.dev/networking/pkg/http/stats
knative.dev/networking/pkg/ingress
knative.dev/networking/pkg/k8s knative.dev/networking/pkg/k8s
# knative.dev/pkg v0.0.0-20240116073220-b488e7be5902 # knative.dev/pkg v0.0.0-20240214130941-b8f9b2204947
## explicit; go 1.18 ## explicit; go 1.18
knative.dev/pkg/apis knative.dev/pkg/apis
knative.dev/pkg/apis/duck knative.dev/pkg/apis/duck
@ -1055,7 +1056,7 @@ knative.dev/pkg/tracing/config
knative.dev/pkg/tracing/propagation knative.dev/pkg/tracing/propagation
knative.dev/pkg/tracing/propagation/tracecontextb3 knative.dev/pkg/tracing/propagation/tracecontextb3
knative.dev/pkg/tracker knative.dev/pkg/tracker
# knative.dev/serving v0.40.0 # knative.dev/serving v0.40.1-0.20240215124546-096adcc220b2
## explicit; go 1.21 ## explicit; go 1.21
knative.dev/serving/pkg/apis/autoscaling knative.dev/serving/pkg/apis/autoscaling
knative.dev/serving/pkg/apis/autoscaling/v1alpha1 knative.dev/serving/pkg/apis/autoscaling/v1alpha1