upgrade to latest dependencies (#1823)

bumping knative.dev/serving 0462ce6...4c76e3b:%0A  > 4c76e3b Drop workaround pach for Istio (# 14156)%0A  > d97db13 Update net-kourier nightly (# 14160)%0A  > 1559c9b Bump net-certmanager manifests and add kapp ordering overlay (# 14148)%0A  > 52fe24e upgrade to latest dependencies (# 14155)%0A  > a5c8c2e Update community files (# 14154)%0A  > 2a17873 Separate Activator SA (# 14133)%0A  > fc166ac Consolidate webhooks (# 14082)%0A  > b935bea Set QP resource defaults (# 14039)%0A  > 529c5e4 Update net-kourier nightly (# 14150)%0A  > 4ccd1c9 Implement Stringer for nil podTracker (# 14147)%0A  > bd3e5e3 upgrade to latest dependencies (# 14151)%0A  > ebbca26 upgrade to latest dependencies (# 14149)%0A  > bde2f42 Update net-gateway-api nightly (# 14144)%0A  > bb1262e Update net-kourier nightly (# 14129)%0A  > 32ec382 Drop unused ytt patch for Ingress ServiceType (# 14143)%0A  > 4c3b36c Update net-gateway-api nightly (# 14136)%0A  > 9a75a93 Update net-istio nightly (# 14132)%0A  > ca618b7 Update net-certmanager nightly (# 14131)%0A  > ea3e9c3 Update net-contour nightly (# 14130)%0A  > 2e7d6e4 Update community files (# 14128)%0A  > 63fa389 Allow to set QP resources per service (# 14038)%0A  > 9310e4d Update net-kourier nightly (# 14125)%0Abumping go.opencensus.io 49838f2...b1a01ee:%0A  > b1a01ee Add started RPC metric for client and server side (# 1283)%0A  > 0bf7faa Fix CI, and update testify (# 1284)%0A  > 0521206 Passing capacity to make() in place of length.  (# 1276)%0A  > c2a62a2 fix: Add function to stop the defaultWorker (# 1272)%0A  > bf52d9d  Remove `convTslice` calls in Record()  (# 1268)%0A  > ad0b46e Precompute encodeWithKeys buffer size to avoid resizes (# 1269)%0A  > a55fb71 Optimize `Record()` to avoid extra allocations  (# 1267)%0A  > dcf8515 Fix formatting to pass with go1.17 (# 1270)%0A  > fb455b0 Remove @rakyll from codeowners (# 1256)%0A  > f5c4b39 Bump version to next expected release (# 1257)%0Abumping github.com/census-instrumentation/opencensus-proto 4aa53e1...e53624a:%0A  > e53624a Bump version to 0.4.1%0A  > 576a4ca Run go mod tidy, forgot to do after proto updates (# 228) (# 230)%0A  > f918ec1 Merge pull request # 227 from dashpole/release%0A  > a5f3b19 Add makefile, upgrade to use opentelemetry protoc docker, newer protoc (# 225)%0A  > 11530ad Bump version to 0.4.1-SNAPSHOT%0A  > 032ec2a Add go.mod, so dependencies are controlled (# 224)%0A  > 0b54ad6 Bump version to 0.4.0%0A  > 54c9a4c Start 0.4.0 development cycle (# 219)%0Abumping github.com/golang/protobuf ae97035...5d5e8c0:%0A  > 5d5e8c0 Merge pull request # 1363 from neild/jsonnull%0A  > 37828f9 jsonpb: accept 'null' as a valid representation of NullValue in unmarshal%0Abumping golang.org/x/oauth2 e48dfd9...839de22:%0A  > 839de22 google: don't check for IsNotExist for well-known file%0A  > 0690208 go.mod: update golang.org/x dependencies%0A  > 451d5d6 internal: remove repeated definite articles%0A  > cfe200d oauth2: parse RFC 6749 error response%0A  > 3607514 go.mod: update golang.org/x dependencies%0A  > 4abfd87 google: add CredentialsParams.EarlyTokenRefresh%0A  > 1e7f329 oauth2: add ReuseTokenSourceWithExpiry%0A  > 86850e0 oauth2: fix typo%0A  > a6e37e7 google: Updating 3pi documentation%0A  > 54b70c8 google: update missing auth help URL%0A  > 2fc4ef5 README: encourage issues and proposals before changes%0A  > 62b4eed go.mod: update golang.org/x dependencies%0A  > 885f294 google: Add support for OAuth2 token exchange over mTLS%0A  > 6f9c1a1 google: use Credentials instead of deprecated DefaultCredentials%0A  > c82d0e1 google/internal/externalaccount: Removed URL validation for google URLs in ADC files%0A  > adbaf66 go.mod: update golang.org/x dependencies%0A  > e07593a oauth2: remove direct dependency on golang.org/x/net%0A  > 34ffb07 go.mod: update golang.org/x dependencies%0A  > b177c21 go.mod: update golang.org/x dependencies%0A  > 510acbc google/internal/externalaccount: Added check for aws region and security credential environment variables before aws metadata call%0A  > ec4a9b2 google/internal/externalaccount: Adding metadata verification%0A  > 68a41d6 go.mod: update golang.org/x dependencies%0A  > 1a77549 go.mod: update to new compute/metadata module%0Abumping golang.org/x/text 9db913a...e503480:%0A  > e503480 encoding/japanese, language: shorten very long sub-test names%0A  > 2df65d7 all: regenerate for Unicode 15.0.0%0A  > e3c038a all: prepare for Unicode 15.0.0%0A  > 3a7a255 internal/export/idna: make more space for mapping index%0A  > d61dd50 go.mod: delete repeated "indirect"%0A  > efb744f internal/export/idna: fix infinite loop in Go pre-1.10%0A  > 48e4a4a all: fix some comments%0Abumping golang.org/x/term 0edf009...edd9fb7:%0A  > edd9fb7 go.mod: update golang.org/x dependencies%0A  > 88fcf87 term: consistently return zeroes on GetSize error%0A  > f6de4a1 go.mod: update golang.org/x dependencies%0A  > 119f703 go.mod: update golang.org/x dependencies%0A  > 7ae6be6 go.mod: update golang.org/x dependencies%0Abumping google.golang.org/api a039966...63d06ab:%0A  > 63d06ab chore(main): release 0.124.0 (# 1990)%0A  > 73f57fe feat(all): auto-regenerate discovery clients (# 1991)%0A  > 8c0e6d9 chore: add yoshi-approver ownership (# 1994)%0A  > 7843046 chore(all): update all (# 1992)%0A  > 94c12ed chore(all): update module github.com/google/s2a-go to v0.1.4 (# 1989)%0A  > 0b4f4af feat(all): auto-regenerate discovery clients (# 1988)%0A  > 2721e1f chore: enable GoApiaryCodegen auto-approve (# 1987)%0A  > a241c25 chore: delete broken AutoApprove (# 1986)%0A  > 3e2d6a6 chore: trigger AutoApprove on reopen as well (# 1985)%0A  > a24a28a chore: give AutoApprove job full write (# 1984)%0A  > 8210800 chore(main): release 0.123.0 (# 1975)%0A  > f31b763 feat(all): auto-regenerate discovery clients (# 1982)%0A  > d27f40f feat(all): auto-regenerate discovery clients (# 1978)%0A  > 94d3d73 chore(ci): fix AutoApprove for discogen (# 1981)%0A  > 98b3073 feat(all): auto-regenerate discovery clients (# 1974)%0A  > d5e0fb2 chore(main): release 0.122.0 (# 1972)%0A  > ab64815 feat(all): auto-regenerate discovery clients (# 1973)%0A  > 8b0974e fix: add better support of array of floats (# 1971)%0A  > cf0df64 chore(main): release 0.121.0 (# 1962)%0A  > c2c2b59 feat(all): auto-regenerate discovery clients (# 1963)%0A  > e44a771 chore(all): update module github.com/google/s2a-go to v0.1.3 (# 1965)%0A  > 2068ba5 feat(all): auto-regenerate discovery clients (# 1961)%0A  > c2018e2 chore(main): release 0.120.0 (# 1956)%0A  > 4e35cac feat(all): auto-regenerate discovery clients (# 1960)%0A  > fcd007a feat(all): auto-regenerate discovery clients (# 1958)%0A  > 289b859 feat(all): auto-regenerate discovery clients (# 1957)%0A  > 409bc9d feat(all): auto-regenerate discovery clients (# 1955)%0A  > 0909f16 chore(main): release 0.119.0 (# 1944)%0A  > 2f54150 chore(deps): update s2a-go to v0.1.2 (# 1954)%0A  > 685ec81 feat: add an option to enable DirectPath xDS (# 1942)%0A  > d85769c feat(all): auto-regenerate discovery clients (# 1953)%0A  > 4cb8eb9 feat(all): auto-regenerate discovery clients (# 1952)%0A  > bf9f3ac chore(all): update google.golang.org/genproto digest to daa745c (# 1949)%0A  > e1eda57 feat(all): auto-regenerate discovery clients (# 1948)%0A  > a0dacd5 feat(all): auto-regenerate discovery clients (# 1947)%0A  > e8b93cb chore(deps): update s2a-go to v0.1.1 (# 1945)%0A  > 690068f feat(all): auto-regenerate discovery clients (# 1943)%0A  > 2d6890a chore(main): release 0.118.0 (# 1940)%0A  > 29dc45a feat(all): auto-regenerate discovery clients (# 1941)%0A  > ac94a0f feat(all): auto-regenerate discovery clients (# 1939)%0A  > 8019ef6 chore(main): release 0.117.0 (# 1931)%0A  > 750c7c8 feat(all): auto-regenerate discovery clients (# 1936)%0A  > 3a98290 chore(all): update all (# 1937)%0A  > 3f62830 chore(deps): bump golang.org/x/crypto (# 1930)%0A  > 2219681 feat(all): auto-regenerate discovery clients (# 1935)%0A  > 2efcb2e feat(all): auto-regenerate discovery clients (# 1932)%0A  > 3c61729 feat: add experimental s2a-go integration (# 1874)%0A  > 587b9e5 chore(main): release 0.116.0 (# 1929)%0A  > 28c8cd5 feat(all): auto-regenerate discovery clients (# 1928)%0A  > feafcdc chore(main): release 0.115.0 (# 1922)%0A  > 34781cf feat(all): auto-regenerate discovery clients (# 1927)%0A  > 33a2dfe feat(all): auto-regenerate discovery clients (# 1924)%0A  > 02cfb82 chore(all): update google.golang.org/genproto digest to dcfb400 (# 1925)%0A  > 8930f0e feat(all): auto-regenerate discovery clients (# 1923)%0A  > 1c955e8 feat(all): auto-regenerate discovery clients (# 1913)%0A  > 7f87838 chore(all): update all (# 1918)%0A  > be028cf chore(gensupport): add idempotency header (# 1916)%0A  > 649bfb9 chore(all): update all (# 1914)%0A  > 5ac4fd7 test: update test due to generator change (# 1912)%0A  > f79df48 chore(main): release 0.114.0 (# 1910)%0A  > 2754ab4 feat(all): auto-regenerate discovery clients (# 1907)%0A  > dc4b77d fix: always reference the internal package. (# 1909)%0A  > b8a2a5e chore(main): release 0.113.0 (# 1901)%0A  > fc221ce feat(all): auto-regenerate discovery clients (# 1900)%0A  > e63383f chore(all): update module google.golang.org/protobuf to v1.29.1 [SECURITY] (# 1906)%0A  > 64b6ee4 feat(idtoken): add support for external_account (# 1897)%0A  > 65cafd4 chore(all): update all (# 1902)%0A  > 63c48a6 feat(transport): add support for setting quota project with envvar (# 1892)%0A  > 225fa6b internal: Refactor cert logic to support OAuth2 token exchange over mTLS (# 1886)%0A  > 8d4d70d chore(main): release 0.112.0 (# 1883)%0A  > 89c274a feat(all): auto-regenerate discovery clients (# 1898)%0A  > 9f18671 feat(all): auto-regenerate discovery clients (# 1896)%0A  > e88ee8a feat(all): auto-regenerate discovery clients (# 1893)%0A  > 5da4d6a feat(all): auto-regenerate discovery clients (# 1887)%0A  > 6bd0840 chore(deps): bump golang.org/x/crypto in /internal/kokoro/discogen (# 1890)%0A  > 2f72016 chore(all): update all (# 1888)%0A  > c886360 feat(all): auto-regenerate discovery clients (# 1885)%0A  > 1aee5cd feat(all): auto-regenerate discovery clients (# 1884)%0A  > 15808d7 feat(all): auto-regenerate discovery clients (# 1882)%0A  > e99d0d5 chore(main): release 0.111.0 (# 1860)%0A  > 70d3954 feat(all): auto-regenerate discovery clients (# 1875)%0A  > f32872c chore(all): update google.golang.org/genproto digest to 637eb22 (# 1877)%0A  > c02cff6 feat(all): auto-regenerate discovery clients (# 1873)%0A  > 7d34d41 feat(all): auto-regenerate discovery clients (# 1872)%0A  > d456fd6 chore(deps): bump golang.org/x/net in /internal/kokoro/discogen (# 1870)%0A  > b7ab21d chore(all): update all (# 1868)%0A  > 7f5f40a feat(all): auto-regenerate discovery clients (# 1866)%0A  > 4056319 chore(all): update module golang.org/x/net to v0.7.0 [SECURITY] (# 1867)%0A  > 8b8b195 feat(all): auto-regenerate discovery clients (# 1863)%0A  > c0f2510 chore(all): update vet.sh to go runtime 1.20 (# 1865)%0A  > 895105a feat(all): auto-regenerate discovery clients (# 1861)%0A  > ba3414e feat(all): auto-regenerate discovery clients (# 1859)%0A  > 892811c chore(main): release 0.110.0 (# 1840)%0A  > 1edc79b chore: update x libs (# 1858)%0A  > 929a393 chore(all): update all (# 1853)%0A  > 1147cb8 fix: Update ECP dependency to v0.2.3 (# 1857)%0A  > 689f934 feat(all): auto-regenerate discovery clients (# 1851)%0A  > 8efd00d fix(internal/gensupport): don't prematurely close timers (# 1856)%0A  > 3fb5b61 fix: Improve error handling for enterprise certificate module (# 1848)%0A  > 8980266 feat(all): auto-regenerate discovery clients (# 1850)%0A  > 3fb8cdc feat(all): auto-regenerate discovery clients (# 1841)%0A  > 1651c38 chore(transport): remove support for go runtimes earlier than 1.16 (# 1844)%0A  > 4b4c9d4 feat(all): auto-regenerate discovery clients (# 1838)%0A  > b3b5f17 chore(main): release 0.109.0 (# 1818)%0A  > c13cc35 feat(all): auto-regenerate discovery clients (# 1836)%0A  > 602b6a4 chore(all): update all (major) (# 1806)%0A  > a5d0daa feat(all): auto-regenerate discovery clients (# 1828)%0A  > 50fc7c4 feat(all): auto-regenerate discovery clients (# 1826)%0A  > 6aad438 feat(all): auto-regenerate discovery clients (# 1825)%0A  > 85d0224 feat(all): auto-regenerate discovery clients (# 1822)%0A  > 884a246 feat(all): auto-regenerate discovery clients (# 1821)%0A  > 5935892 feat(all): auto-regenerate discovery clients (# 1819)%0A  > ba3ba78 feat(all): auto-regenerate discovery clients (# 1817)%0A  > 47f66d6 chore(main): release 0.108.0 (# 1811)%0A  > da48b9a feat(all): auto-regenerate discovery clients (# 1816)%0A  > a12685c feat(all): auto-regenerate discovery clients (# 1813)%0A  > 4df52d2 feat(all): auto-regenerate discovery clients (# 1810)%0A  > f74fbb6 chore(main): release 0.107.0 (# 1803)%0A  > d8084e4 feat(all): auto-regenerate discovery clients (# 1809)%0A  > 4dca4e0 feat: re-enable integrations:v1 (# 1801)%0A  > 86e4009 fix: user Timers over time.After (# 1802)%0A  > bcc345c feat(all): auto-regenerate discovery clients (# 1808)%0A  > de06921 feat(all): auto-regenerate discovery clients (# 1807)%0A  > 935ef64 feat(all): auto-regenerate discovery clients (# 1804)%0A  > 93de455 feat(all): auto-regenerate discovery clients (# 1800)%0A  > ac7eb8f chore(main): release 0.106.0 (# 1786)%0A  > 3944e86 feat(all): auto-regenerate discovery clients (# 1794)%0A  > f6dec99 feat(idtoken): add support for impersonated_service_account creds type (# 1792)%0A  > ddb5c65 test: add buffer to both sides of token expiry validation (# 1797)%0A  > b35900a fix(idtoken): configure validator constructor to use no authentication (# 1789)%0A  > ca86833 chore(all): update all (# 1796)%0A  > a6b0739 chore: skip generating integrations:v1 as it fails generation (# 1793)%0A  > 7bd17b3 feat(all): auto-regenerate discovery clients (# 1790)%0A  > 9fb35f5 feat(all): auto-regenerate discovery clients (# 1788)%0A  > 1569e5b feat(option/internaloption): add new EmbeddableAdapter option (# 1787)%0A  > a7f08e2 feat(all): auto-regenerate discovery clients (# 1784)%0A  > 67aaf4e chore(main): release 0.105.0 (# 1774)%0A  > 5b02761 feat(all): auto-regenerate discovery clients (# 1777)%0A  > c58bf4c feat: support set null map entries for non-simple map values (# 1782)%0A  > e4271df feat(googleapi): add response headers to Error reported by CheckMediaResponse (# 1781)%0A  > 6193507 chore: remove uses of obsolete golang.org/x/xerrors (# 1776)%0A  > 37a2e41 feat(all): auto-regenerate discovery clients (# 1773)%0A  > 9255b0b chore(main): release 0.104.0 (# 1748)%0A  > 4238314 chore: ignore some golang.org/x/* dependencies in renovate (# 1772)%0A  > 029b659 chore(all): update all (# 1768)%0A  > f819644 feat(all): auto-regenerate discovery clients (# 1771)%0A  > 2b596d9 feat(all): auto-regenerate discovery clients (# 1767)%0A  > 3195ce1 feat(all): auto-regenerate discovery clients (# 1766)%0A  > 97a9846 feat(all): auto-regenerate discovery clients (# 1760)%0A  > 8d8f0a7 feat(transport): de-experiment google-c2p resolver (# 1757)%0A  > c213153 fix(transport/grpc): separate resolution of creds and certs (# 1759)%0A  > 629e217 fix(idtoken): increase MaxIdleConnsPerHost to 100 in NewClient (# 1754)%0A  > caf7af0 feat(all): auto-regenerate discovery clients (# 1755)%0A  > e18b504 feat(all): auto-regenerate discovery clients (# 1753)%0A  > dd565a4 feat(all): auto-regenerate discovery clients (# 1752)%0A  > a657f19 feat(all): auto-regenerate discovery clients (# 1751)%0A  > 292129c feat(all): auto-regenerate discovery clients (# 1746)%0A  > 02077fd chore(all): update all (# 1749)%0A  > 567070f docs: document limitation of WithUserAgent (# 1747)%0A  > 561b601 chore(main): release 0.103.0 (# 1738)%0A  > 4248dc3 feat(all): auto-regenerate discovery clients (# 1743)%0A  > ee25e29 feat(googleapi): inject gax apierror.APIError into googleapi.Error (# 1730)%0A  > f8efb95 chore(all): update all (# 1740)%0A  > 9695aa1 feat: rm hard dep on x/sys (# 1742)%0A  > bec0f29 chore(.github): force renovate to use 1.19 (# 1741)%0A  > bbd4259 feat(all): auto-regenerate discovery clients (# 1739)%0A  > de99200 feat(all): auto-regenerate discovery clients (# 1737)%0A  > 0d7f97a chore(main): release 0.102.0 (# 1726)%0A  > 0528475 feat: rely on new compute metadata module directly (# 1736)%0A  > ce57a67 feat(all): auto-regenerate discovery clients (# 1734)%0A  > 1e1eab9 feat(all): auto-regenerate discovery clients (# 1727)%0A  > 0ce5403 test(transport/grpc): fix arg for test failure logs (# 1733)%0A  > 06360d8 feat(all): auto-regenerate discovery clients (# 1725)%0A  > 644a13c chore(main): release 0.101.0 (# 1719)%0A  > f4788b3 feat(all): auto-regenerate discovery clients (# 1723)%0A  > 9ea2ceb chore(all): update all (# 1721)%0A  > 9140608 feat(all): auto-regenerate discovery clients (# 1720)%0A  > 453b81a feat(all): auto-regenerate discovery clients (# 1718)%0A  > d530a93 chore(main): release 0.100.0 (# 1714)%0A  > 37f90e9 feat(internal/gensupport): remove DetermineContentType, use gax-go copy (# 1716)%0A  > b235b1f fix(idtoken): Allow missing age in http response header (# 1715)%0A  > f990a2a feat(all): auto-regenerate discovery clients (# 1717)%0A  > f9e15f2 feat(all): auto-regenerate discovery clients (# 1712)%0A  > e74b770 chore(all): update all (# 1713)%0A  > 977e871 chore(main): release 0.99.0 (# 1710)%0A  > 6b81c83 feat(all): auto-regenerate discovery clients (# 1701)%0A  > 1aa1deb chore(all): update all (# 1707)%0A  > 69fb474 chore: Update ECP dependency to 0.2.0 (# 1704)%0A  > a4ae94d chore(main): release 0.98.0 (# 1700)%0A  > 25b7450 feat(all): auto-regenerate discovery clients (# 1699)%0A  > aa775b4 feat(all): auto-regenerate discovery clients (# 1696)%0A  > faa845a chore(main): release 0.97.0 (# 1694)%0A  > 2c3e863 fix(gensupport): allow initial request for resumable uploads to retry w/ non-nil getBody (# 1690)%0A  > f427ee3 feat(internal/gensupport): wrap retry failures with context and prev error (# 1684)%0A  > 6b0515b fix: build script bash error (# 1697)%0A  > b8f2556 feat(all): auto-regenerate discovery clients (# 1695)%0A  > a87400b feat(all): auto-regenerate discovery clients (# 1693)%0Abumping google.golang.org/protobuf 6875c3d...68463f0:%0A  > 68463f0 all: release v1.31.0%0A  > 59a8581 encoding/protodelim: fix handling of io.EOF%0A  > fc47fdd proto: store the size of tag to avoid multiple calculations%0A  > cc524c9 internal/order: fix sorting of synthetic oneofs to be deterministic%0A  > b8fc770 encoding/protodelim: If UnmarshalFrom gets a bufio.Reader, try to reuse its buffer instead of creating a new one%0A  > cf06b0c compiler/protogen: add Semantic.SET to setter annotations%0A  > 05cbe34 encoding: add MarshalAppend to protojson and prototext%0A  > 1bca6d9 types/dynamicpb: add NewTypes%0A  > 736947c all: update protobuf release version to fix macOS tests%0A  > 808c664 all: start v1.30.0-devel%0A  > f221882 all: release v1.30.0%0A  > e344383 protoadapt: helper functions to convert v1 or v2 message to either v1 or v2 message.%0A  > 32efccd all: start v1.29.1-devel%0A  > 771d8c7 all: release v1.29.1%0A  > edaf511 internal/encoding/text: fix parsing of incomplete numbers%0A  > fe5bc54 all: start v1.29.0-devel%0A  > d3c9826 all: release v1.29.0%0A  > eba8b09 cmd/protoc-gen-go: support protobuf retention feature%0A  > fcf5f6c encoding/prototext: allow whitespace and comments between minus sign and number in negative numeric literal%0A  > bc1253a types/descriptorpb: regenerate using latest protobuf v22.0 release%0A  > 246af0d internal,reflect: correct typos%0A  > 49eaa78 all: update links to Protocol Buffer documentation%0A  > e831c33 proto: document Equal behavior of invalid messages%0A  > 358fe40 internal/strs: avoid unnecessary allocations in Builder%0A  > 0430d69 internal/impl: remove the unreachable line of code%0A  > a948118 cmd/protoc-gen-go: make deprecated messages more descriptive%0A  > 31a5920 protobuf: remove the check for reserved field numbers.%0A  > a8d446d types/known/structpb: preallocate map in AsMap%0A  > b2a7dfe reflect/protoreflect: add Value.Equal method%0A  > f0e23c7 proto: revert expose package-private equalField%0A  > 4d6d213 proto: expose package-private equalField.%0A  > 7a48e2b protoreflect: fix list example code%0A  > 494e81b integration test: drop Go 1.11 and Go 1.12%0A  > f930b1d encoding/protojson: fix parsing of google.protobuf.Timestamp%0A  > 692f4a2 types/descriptorpb: update *.pb.go to use latest protoc release, 21.5%0A  > fb0abd9 encoding: add protodelim package%0A  > 0f85348 all: start v1.28.1-devel%0Abumping google.golang.org/grpc 1c29e07...5b67e5e:%0A  > 5b67e5e Update version.go to v1.56.1 (# 6386)%0A  > d0f5150 client: handle empty address lists correctly in addrConn.updateAddrs (# 6354) (# 6385)%0A  > 997c1ea Change version to 1.56.1-dev (# 6345)%0A  > 2b6ff72 Change version to 1.56.0 (# 6344)%0A  > 7996425 xds/outlierdetection: fix config handling (# 6361) (# 6367)%0A  > a5ae5c6 weightedroundrobin: cherry-pick 2 commits from master (# 6360)%0A  > 8edfa1a authz: End2End test for AuditLogger (# 6304)%0A  > 2b1d70b xds: enable RLS in xDS by default (# 6343)%0A  > 47f8ed8 interop: Don't fail target if sub-target already failed (# 6332)%0A  > 1f23f6c client: fix Connect to handle channel idleness properly (# 6331)%0A  > 3ea58ce client: disable channel idleness by default (# 6328)%0A  > 6c2529b xds: support pick_first custom load balancing policy (A62) (# 6314)%0A  > 9b9b364 internal/envconfig: Set Custom LB Env Var to true by default (# 6317)%0A  > e325737 alts: Fix flaky ALTS TestFullHandshake test. (# 6300)%0A  > 4d3f221 xds/internal/xdsclient: Add support for String Matcher Header Matcher in RDS (# 6313)%0A  > 157db19 stats/opencensus: Fix flaky test span (# 6296)%0A  > f19266c xds: support built-in Stdout audit logger type (# 6298)%0A  > 59134c3 client: add support for pickfirst address shuffling from gRFC A62 (# 6311)%0A  > a6e1acf grpc: support sticky TF in pick_first LB policy (# 6306)%0A  > 2ae10b2 xdsclient: remove interface check related to ResourceData (# 6308)%0A  > e9799e7 client: support a 1:1 mapping with acbws and addrConns (# 6302)%0A  > 2a266e7 authz: use pointer to to structpb.Struct instead of value (# 6307)%0A  > 511a963 interop: let the interop client send additional metadata, controlled by a flag (# 6295)%0A  > 9b7a947 grpc: support channel idleness (# 6263)%0A  > 098b2d0 xds/internal/balancer/outlierdetection: Switch Outlier Detection to use new duration field (# 6286)%0A  > 417d4b6 examples: add error_handling example; move errors to error_details (# 6293)%0A  > 390c392 authz: Rbac engine audit logging (# 6225)%0A  > 52fef6d authz: Stdout logger (# 6230)%0A  > 92e65c8 test/kokoro: Add custom_lb_test to the xds_k8s_lb job (# 6290)%0A  > 756119c  xds/outlierdetection: forward metadata from child picker (# 6287)%0A  > 8eba9c2 github: upgrade to v3 of checkout & setup-go (# 6280)%0A  > 24fd252 proto: update generated code to match grpc-proto changes (# 6283)%0A  > 4eb88d7 cleanup: use new Duration type in base ServiceConfig (# 6284)%0A  > 1230f0e xds/internal/xdsclient: Split registry up and two separate packages (# 6278)%0A  > 0bdae48 interop: fix interop_test.sh shutdown (# 6279)%0A  > 5dcfb37 interop: hold lock on server for OOB metrics updates; share 30s timeout (# 6277)%0A  > 68381e7 xds: WRR in xDS (# 6272)%0A  > fd376a5 test: fix flaky TimeoutOnDeadServer test; some cleanups (# 6276)%0A  > 1db474c weightedroundrobin: fix duration format in lb config (# 6271)%0A  > 523dcdd weightedroundrobin: fix test race accessing timeNow (# 6269)%0A  > 1536887 interop/xds: Add Custom LB needed for interop test (# 6262)%0A  > 7d61344 examples: fix authz example to receive streaming error properly (# 6270)%0A  > afcbdc9 xds/internal/xdsclient/xdslbregistry: Continue in converter if type not found (# 6268)%0A  > b3fbd87 interop: add ORCA test cases and functionality (# 6266)%0A  > 5e58734 xds: Add support for Custom LB Policies (# 6224)%0A  > 5c4bee5 balancer/weightedroundrobin: add load balancing policy (A58) (# 6241)%0A  > c44f77e grpc: use CallbackSerializer in balancer wrapper (# 6254)%0A  > f193ec0 orca: fix race when calling listeners coincides with updating the run goroutine (# 6258)%0A  > 417cf84 test: deflake TestBalancerProducerHonorsContext (# 6257)%0A  > 1f3fe1c Update ClientStream.SendMsg doc (# 6247)%0A  > ccad7b7 grpc: use CallbackSerializer in resolver_wrapper (# 6234)%0A  > 47b3c55 orca: fix race at producer startup (# 6245)%0A  > 56b33d5 server/transport: send appropriate debug_data in GOAWAY frames (# 6220)%0A  > add9015 orca: allow a ServerMetricsProvider to be passed to the ORCA service and ServerOption (# 6223)%0A  > 40d0147 googledirectpatph: enable ignore_resource_deletion in bootstrap (# 6243)%0A  > ed3ceba balancer: make producer RPCs block until the SubConn is READY (# 6236)%0A  > b153b00 multiple: standardize import renaming for typed structs (# 6238)%0A  > 713bd04 orca: minor cleanups (# 6239)%0A  > 21a339c grpc: handle RemoveSubConn inline in balancerWrapper (# 6228)%0A  > b153827 xds: make glaze happy for test packages (# 6237)%0A  > 019acf2 stubserver: add option for allowing more services to be registered (# 6240)%0A  > cf89a0b authz: Swap to using the correct TypedConfig in audit logger parsing (# 6235)%0A  > df82147 internal: Document gcp/observability 1.0 dependencies in /internal (# 6229)%0A  > da1a5eb tests: nix TestClientDoesntDeadlockWhileWritingErroneousLargeMessages (# 6227)%0A  > e853dbf authz: add conversion of json to RBAC Audit Logging config (# 6192)%0A  > 497436c xds/internal/balancer/outlierdetection: Change string to String (# 6222)%0A  > de11139 clusterresolver: improve tests (# 6188)%0A  > eff0942 xds/internal/xdsclient: Custom LB xDS Client Changes (# 6165)%0A  > 8628e07 xds/internal/balancer/outlierdetection: Add Channelz Logger to Outlier Detection LB (# 6145)%0A  > 83c460b authz: Move audit package (# 6218)%0A  > 8c70261 grpc: ClientConn cleanup in prep for channel idleness (# 6189)%0A  > 2cd95c7 gcp/observability: remove redundant import (# 6215)%0A  > 16651f6 go.mod: update all dependencies (# 6214)%0A  > ca60462 stubserver: Stop server when StartClient failed (# 6190)%0A  > 7dfd718 internal/buffer: add Close method to the Unbounded buffer type (# 6161)%0A  > ebeda75 tests: defalke TestTimerAndWatchStateOnSendCallback (# 6206)%0A  > 0ed709c Change version to 1.56.0-dev (# 6213)%0A  > 875c97a examples/features/observability: use observability module v1.0.0 (# 6210)%0A  > aa8c137 authz: add audit logging APIs (# 6158)%0A  > b91b884 gcp/observability: Have o11y module point to grpc 1.54 and opencensus 1.0.0 (# 6209)%0A  > eab9e20 test/kokoro: increase PSM Security test timeout to 4h (# 6193)%0A  > d90621f remove the unnecessary call to ResetTimer and StopTimer (# 6185)%0A  > fe72db9 testing: add helpers to start test service, and retrieve port (# 6187)%0A  > 5a50b97 Revert "Revert "credentials/alts: defer ALTS stream creation until handshake …" (# 6179)%0A  > 89ec960 grpc: read the service config channel once instead of twice (# 6186)%0A  > 6237dfe internal/stubserver: Close Client Conn in error handling of Start (# 6174)%0A  > 06de8f8 alts: Add retry loop when making RPC in ALTS's TestFullHandshake. (# 6183)%0A  > 6eabd7e server: use least-requests loadbalancer for workers (# 6004)%0A  > 8374ff8 Export the unwrapResource method, to allow callers outside of the package (# 6181)%0A  > efb2f45 test/xds: Fix test_grpc import path (# 6180)%0A  > 81b3092 security/advancedtls: add TlsVersionOption to select desired min/max TLS versions (# 6007)%0A  > 17b693d alts: Perform full handshake in ALTS tests. (# 6177)%0A  > 01f8b86 Add documentation on some anti-patterns (# 6034)%0A  > 3489bb7 xdsclient/test: deflake TestWatchResourceTimerCanRestartOnIgnoredADSRecvError (# 6159)%0A  > bfb57b8 testing: delete internal copy of test service proto, and use canonical one (# 6164)%0A  > 10401b9 stats/opencensus: the backend to Sent. Attempt. and Recv. (# 6173)%0A  > b0a8b1b Use string instead of enum for xds resource type (# 6163)%0A  > 1d5b73a xds: add stop to avoid hanging in TestServeWithStop (# 6172)%0A  > ea0a038 xds/xdsclient: ignore resource deletion as per gRFC A53 (# 6035)%0A  > a51779d xdsclient/test: deflake TestTimerAndWatchStateOnSendCallback (# 6169)%0A  > e979919 internal/grpcsync: move CallbackSerializer from xdsclient/internal to here (# 6153)%0A  > c2899dd examples/features/observability: Point o11y example to latest gcp/observability module (# 6162)%0A  > 113d75f gcp/observability: Add isSampled bool to log entries (# 6160)%0A  > 4a12595 stats/opencensus: Switch helper to return Span Context from context (# 6156)%0A  > c3f1d5e gcp/observability: Set the opencensus_task label only for metrics, not tracing and logging (# 6155)%0A  > 42dd7ac Use anypb.New instead of ptypes.MarshalAny (# 6074)%0A  > 415ccdf go.mod: update all dependencies after 1.54 branch cut (# 6132)%0A  > a357baf status: FromError: return entire error message text for wrapped errors (# 6150)%0A  > 44cebb8 xds: enable XDS federation by default (# 6151)%0A  > c018273 examples: Add observability example (# 6149)%0A  > 277bb64 Revert "credentials/alts: defer ALTS stream creation until handshake time (# 6077)" (# 6148)%0A  > 0fdfd40 gcp/observability: Generate unique process identifier unconditionally (# 6144)%0A  > 1d20f1b security/advancedtls: swap from deprecated pkix.CertificateList to x509.RevocationList (# 6054)%0A  > a8a25ce transport: use prefix logging (# 6135)%0A  > 9c25653 cdsbalancer: improve log messages (# 6134)%0A  > a02aae6 CONTRIBUTING.md: remove duplicated bullet point (# 6139)%0A  > cdab8ae clusterresolver: push empty config to child policy upon removal of cluster resource (# 6125)%0A  > 7651e62 transport: add a draining state check before creating streams (# 6142)%0A  > a2ca46c examples: organize READMEs better (# 6121)%0A  > 4efec30 stats/opencensus: remove leading slash for per call metrics (# 6141)%0A  > 78099db gcp/observability: Switch hex encoding to string() method (# 6138)%0A  > 70c5291 observability: remove import replace directive and switch it to point to latest commit (# 6122)%0A  > 66e3533 status: handle wrapped errors (# 6031)%0A  > a75fd73 Change version to 1.55.0-dev (# 6131)%0A  > b638faf stats/opencensus: Add message prefix to metrics names (# 6126)%0A  > c84a500 credentials/alts: defer ALTS stream creation until handshake time (# 6077)%0A  > 6f44ae8 metadata: add benchmark test for FromIncomingContext and ValueFromIncomingContext (# 6117)%0A  > a1e657c client: log last error on subchannel connectivity change (# 6109)%0A  > 36fd0a4 gcp/observability: Add compressed metrics to observability module and synchronize View data with exporter (# 6105)%0A  > 52ca957 xds: make comparison of server configs in bootstrap more reliable (# 6112)%0A  > 7507ea6 gcp/observability: Change logging schema and set queue size limit for logs and batching delay (# 6118)%0A  > 16c3b7d examples: add example for ORCA load reporting (# 6114)%0A  > b458a4f transport: stop always closing connections when loopy returns (# 6110)%0A  > 11e2506 tests: Scale down keepalive test timings (# 6088)%0A  > 5796c40 interop/observability: Pass interop parameters to client/server as-is (# 6111)%0A  > abd4db2 xdsclient/tests: fix flaky test NodeProtoSentOnlyInFirstRequest (# 6108)%0A  > 3633361 tests: support LRS on the same port as ADS (# 6102)%0A  > 0558239 Update CONTRIBUTING.md (# 6089)%0A  > 2260821 go.mod: upgrade golang.org/x/net to address CVE-2022-41723 (# 6106)%0A  > 60a1aa3 testutils: add support for creating endpoint resources with options (# 6103)%0A  > 92d9e77 xds: NACK route configuration if sum of weights of weighted clusters exceeds uint32_max (# 6085)%0A  > d02039b Deflake the integration test. (# 6093)%0A  > 55d8783 gcp/observability: Link logs and traces by logging Trace and Span IDs (# 6056)%0A  > ad4057f transport: stop returning errors that are always nil (# 6098)%0A  > 558e1b6 examples/authz: add token package docstring (# 6095)%0A  > 33df9fc credentials/xds: improve error message upon SAN matching failure (# 6080)%0A  > 3292193 xdsclient: handle race with watch timer handling (# 6086)%0A  > e83e34b xds/resolver/test: use a non-blocking send instead of closing the channel (# 6082)%0A  > b46bdef interop/observability: add GCP Observability Testing Client/Server (# 5979)%0A  > f311684 stats/opencensus: New uncompressed metrics and align with tracing spec (# 6051)%0A  > cc320bf grpc: Log server trailers before writing status (# 6076)%0A  > b9e6d59 xdsclient: send Node proto only on first discovery request on ADS stream (# 6078)%0A  > ae4a231 ringhash: ensure addresses are consistenly hashed across updates (# 6066)%0A  > 52dcd14 xdsclient: move tests from `e2e_test` to `tests` directory (# 6073)%0A  > d8f80bb stats/opencensus: Added client api latency and upgrade go.mod (# 6042)%0A  > a8b3226 gcp/observability: Disable logging and traces on channels to cloud ops backends (# 6022)%0A  > 20141c2 examples: add an example to illustrate authorization (authz) support (# 5920)%0A  > 8c374f7 clusterresolver: cleanup resource resolver implementation (# 6052)%0A  > 1d16ef5 metadata: Lowercase appended metadata (# 6071)%0A  > 8ba23be cmd/protoc-gen-go-grpc: bump -version to 1.3.0 for release (# 6064)%0A  > a1693ec fakeserver: remove ADS and LRS v2 support (# 6068)%0A  > 832ecc2 channelz: use protocmp.Transform() to compare protos (# 6065)%0A  > 28b6bcf xds/xdsclient: improve failure mode behavior (gRFC A57) (# 5996)%0A  > d53f0ec test: move compressor tests out of end2end_test.go (# 6063)%0A  > dba41ef metadata: fix validation issues (# 6001)%0A  > 75bed1d test: move e2e health checking tests out of end2end_test.go (# 6062)%0A  > 0586c51 internal/transport: reduce running time of test from 5s to 1s (# 6061)%0A  > 7437662 internal/transport: Fix flaky keep alive test (# 6059)%0A  > 681b133 admin/test: split channelz imports (# 6058)%0A  > 1093d3a channelz: remove dependency on testing package (# 6050)%0A  > 3775f63 xdsclient/transport: reduce chattiness of logs (# 5992)%0A  > 6fe609d xdsclient: minor cleanup in eds parsing (# 6055)%0A  > 5353eaa testing: add helpers to configure cluster specifier plugin type (# 5977)%0A  > 8702a2e stats/opencensus: Add top level call span (# 6030)%0A  > 85b95dc gcp/observability: Register new views (# 6026)%0A  > abff344 stats/opencensus: Add per call latency metric (# 6017)%0A  > 0f02ca5 gcp/observability: Switch observability module to use new opencensus instrumentation code (# 6021)%0A  > 6d612a3 resolver: update Resolver.Scheme() docstring to mention requirement of lowercase scheme names (# 6014)%0A  > 30d8c0a xds/internal/xdsclient: NACK empty clusters in aggregate clusters (# 6023)%0A  > 081499f xds: remove support for v2 Transport API (# 6013)%0A  > dd12def stats/opencensus: Add OpenCensus traces support (# 5978)%0A  > f4feddb github: update tests to use go version 1.20 (# 6020)%0A  > 8153410 client: Add dial option to disable global dial options (# 6016)%0A  > 55dfae6 resolver: document handling UpdateState errors by resolvers (# 6002)%0A  > ceb3f07 client: Revert dialWithGlobalOption (# 6012)%0A  > d655f40 internal/transport: fix severity of log when receiving a GOAWAY with error code ENHANCE_YOUR_CALM (# 5935)%0A  > b81e8b6 metadata: slightly improve operateHeaders (# 6008)%0A  > e9d9bd0 tests: reduce the degree of stress testing in long running tests (# 6003)%0A  > f855226 github: update codeQL action to v2 (# 6009)%0A  > f69e9ad stats/opencensus: Add OpenCensus metrics support (# 5923)%0A  > 3151e83 cmd/protoc-gen-go-grpc: export consts for full method names (# 5886)%0A  > d6dabba xds/server: reduce chattiness of logs (# 5995)%0A  > 0954097 server: expose API to set send compressor (# 5744)%0A  > a7058f7 xds/csds: switch tests to use the new generic xdsclient API (# 6000)%0A  > 3711154 xdsclient/bootstrap: reduce chattiness of logs (# 5991)%0A  > d103fc7 xdsclient/xdsresource: reduce chattiness of logs (# 5993)%0A  > 6a707eb client: add an option to disable global dial options (# 5990)%0A  > c813c17 Change version to 1.54.0-dev (# 5985)%0A  > 2a1e934 server: after GracefulStop, ensure connections are closed when final RPC completes (# 5968)%0A  > e2d69aa tests: fix spelling of variable (# 5966)%0A  > a6376c9 xds/resolver: cleanup tests to use real xDS client 3/n (# 5953)%0A  > bf8fc46 xds/resolver: cleanup tests to use real xDS client 5/n (# 5955)%0A  > 3930549 resolver: replace resolver.Target.Endpoint field with Endpoint() method (# 5852)%0A  > 894816c grpclb: rename `grpclbstate` package back to `state` (# 5962)%0A  > e5a0237 encoding: fix duplicate compressor names (# 5958)%0A  > 4adb2a7 xds/resolver: cleanup tests to use real xDS client 2/n (# 5952)%0A  > 52a8392 gcp/observability: update method name validation (# 5951)%0A  > 4075ef0 xds: fix panic involving double close of channel in xDS transport (# 5959)%0A  > 7bf6a58 gcp/observability: Cleanup resources allocated if start errors (# 5960)%0A  > bc9728f xds/resolver: cleanup tests to use real xDS client 4/n (# 5954)%0A  > 6e74938 xds/resolver: cleanup tests to use real xDS client (# 5950)%0A  > 9b9b381 server: fix a few issues where grpc server uses RST_STREAM for non-HTTP/2 errors (# 5893)%0A  > ace8082 xdsclient: close func refactor (# 5926)%0A  > 9326362 transport: fix maxStreamID to align with http2 spec (# 5948)%0A  > 4e4d828 xds interop: Fix buildscripts not continuing on a failed test suite (# 5937)%0A  > 379a2f6 *: add missing colon to errorf messages to improve readability (# 5911)%0A  > cde2edc Revert "xds interop: Fix buildscripts not continuing on a failed test suite (# 5932)" (# 5936)%0A  > 78ddc05 xdsclient: fix race in load report implementation (# 5927)%0A  > 2a9e970 xds interop: Fix buildscripts not continuing on a failed test suite (# 5932)%0A  > 9228cff rls: fix a data race involving the LRU cache (# 5925)%0A  > be06d52 binarylog: consistently rename imports for binarylog proto (# 5931)%0A  > bf3ad35 *: update all dependencies (# 5924)%0A  > 6de8f50 transport: drain client transport when streamID approaches maxStreamID (# 5889)%0A  > 42b7b63 stats/opencensus: OpenCensus instrumentation api (# 5919)%0A  > 974a5ef grpc: document defaults in MaxCallMsgSize functions (# 5916)%0A  > 9b73c42 test/xds: add tests for scenarios where authority in resource name is not specified in bootstrap config (# 5890)%0A  > 3b2da53 xdsclient: handle resource not found errors correctly (# 5912)%0A  > f2fbb0e Deprecate use of `ioutil` package (# 5906)%0A  > 8ec85e4 priority: improve and reduce verbosity of logs (# 5902)%0A  > 12b8fb5 test: move e2e HTTP header tests to http_header_end2end_test.go (# 5901)%0A  > f1a9ef9 stream: update ServerStream.SendMsg doc (# 5894)%0A  > c90744f oauth: mark `NewOauthAccess` as deprecated and update examples to use `TokenSource` (# 5882)%0A  > 0e5421c internal/envconfig: add convenience boolFromEnv to improve readability (# 5887)%0A  > 4565dd7 ringhash: allow overriding max ringhash size via environment variable (# 5884)%0A  > 94a65dc rls: deflake tests (# 5877)%0A  > 08479c5 xdsclient: resource agnostic API implementation (# 5776)%0A  > 07ac97c transport: simplify httpClient by moving onGoAway func to onClose (# 5885)%0A  > 5ff7dfc rls: propagate headers received in RLS response to backends (# 5883)%0A  > f94594d interop: add test client for use in xDS federation e2e tests (# 5878)%0A  > 68b388b balancer: support injection of per-call metadata from LB policies (# 5853)%0A  > 4f16fbe examples: update server reflection tutorial (# 5824)%0A  > b2d4d5d test: fix raceyness check to deflake test http server (# 5866)%0A  > 54b7d03 grpc: Add join Dial Option (# 5861)%0A  > 70617b1 vet & github: run vet separately from tests; make vet-proto only check protos (# 5873)%0A  > 81ad1b5 *: update all dependencies (# 5874)%0A  > 357d7af Change version to 1.53.0-dev (# 5872)%0A  > a0e8eb9 test: rename race.go to race_test.go (# 5869)%0A  > ae86ff4 benchmark: fix typo in ClientReadBufferSize feature name (# 5867)%0A  > e53d28f xdsclient: log node ID with verbosity INFO (# 5860)%0A  > 9373e5c transport: Fix closing a closed channel panic in handlePing (# 5854)%0A  > 2f413c4 transport/http2: use HTTP 400 for bad requests instead of 500 (# 5804)%0A  > 5003029 testutils: do a better job of verifying pick_first in tests (# 5850)%0A  > 3e27f89 binarylog: Account for key in metadata truncation (# 5851)%0A  > f54bba9 test/xds: minor cleanup in xDS e2e test (# 5843)%0A  > a9709c3 Added logs for reasons causing connection and transport close (# 5840)%0A  > aba03e1 xds: pass options by value to helper routines which setup the management server in tests (# 5833)%0A  > 638141f examples: add feature/cancellation retry to example test script (# 5846)%0A  > 22c1fd2 deps: update golang.org/x/net to latest in all modules (# 5847)%0A  > 1949035 ringhash: add logs to surface information about ring creation (# 5832)%0A  > f7c110a test: remove use of deprecated WithInsecure() API (# 5836)%0A  > a205447 examples: add new example to show updating metadata in interceptors (# 5788)%0A  > 001d234 rls: Fix regex in rls test (# 5834)%0A  > 7361971 rls: use a regex for the expected error string (# 5827)%0A  > 617d6c8 security/advancedtls: add test for crl cache expiration behavior (# 5749)%0A  > ef51864 grpclb: improve grpclb tests (# 5826)%0A  > fa99649 xdsclient: deflake new transport ack/nack tests (# 5830)%0A  > 99ba982 transport/server: flush GOAWAY before closing conn due to max age (# 5821)%0A  > 20c937e transport: limit AccountCheck tests to fewer streams and iterations to avoid flakes (# 5828)%0A  > 110ed9e xdsclient: resource-type-agnostic transport layer (# 5808)%0A  > c91396d pickfirst: do not return initial subconn while connecting (# 5825)%0A  > 94f0e7f benchmark: add a feature for read and write buffer sizes (# 5774)%0A  > 087387c Deflake Outlier Detection xDS e2e test (# 5819)%0A  > dd123b7 testutils/pickfirst: move helper function to testutils (# 5822)%0A  > be202a2 examples: add an example to illustrate the usage of stats handler (# 5657)%0A  > 9f97673 test: move e2e goaway tests to goaway_test.go (# 5820)%0A  > 0fe49e8 grpc: Improve documentation of read/write buffer size server and dial options (# 5800)%0A  > 09fc1a3 interop: update Go version in docker container used for psm interop (# 5811)%0A  > adfb915 server: fix ChainUnaryInterceptor and ChainStreamInterceptor to allow retrying handlers (# 5666)%0A  > e0a9f11 reflection: split grpc and pb imports (# 5810)%0A  > 6f96f96 reflection: update proto (# 5809)%0A  > 6e43203 reflection: generate protobuf files from grpc-proto (# 5799)%0A  > 0abb6f9 xdsclient: resource type agnostic WatchResource() API (# 5777)%0A  > 3011eaf test/tools: update staticcheck version to latest (# 5806)%0A  > fefb3ec test/tools: update everything to latest versions except staticcheck (# 5805)%0A  > 50be6ae go.mod: update all dependencies (# 5803)%0A  > ff14680 Cap min and max ring size to 4K (# 5801)%0A  > 0238b6e transport: new stream with actual server name (# 5748)%0A  > 817c1e8 passthrough: return error if endpoint is empty and opt.Dialer is nil when building resolver (# 5732)%0A  > 56ac86f xdsclient: wait for underlying transport to close (# 5775)%0A  > 457c2f5 benchmark: use default buffer sizes (# 5762)%0A  > 689d061 Cleanup usages of resolver.Target's Scheme and Authority (# 5761)%0A  > 5331dbd outlierdetection: remove an unused variable in a test (# 5778)%0A  > 81db250 Change version to 1.52.0-dev (# 5784)%0A  > 72812fe gcp/observability: filter logging from cloud ops endpoints calls (# 5765)%0A  > 0ae33e6 xdsclient: remove unused test code (# 5772)%0A  > 824f449 go.mod: upgrade x/text to v0.4 to address CVE (# 5769)%0A  > 7f23df0 xdsclient: switch xdsclient watch deadlock test to e2e style (# 5697)%0A  > 32f969e o11y: Added started rpc metric in o11y plugin (# 5768)%0A  > b597a8e xdsclient: improve authority watchers test (# 5700)%0A  > e41e894 orca: create ORCA producer for LB policies to use to receive OOB load reports (# 5669)%0A  > 36d14db Fix binary logging bug which logs a server header on a trailers only response (# 5763)%0A  > fcb8bdf xds/google-c2p: validate url for no authorities (# 5756)%0A  > 040b795 xdsclient/e2e_test: use SendContext() where appropriate (# 5729)%0A  > 0d6481f target: replace parsedTarget.Scheme to parsedTarget.URL.Scheme (# 5750)%0A  > fdcc01b transport/test: implement staticcheck suggestion (# 5752)%0A  > aa44cca google-c2p: use new-style resource name for LDS subscription (# 5743)%0A  > c858a77 balancer/weightedtarget: fix ConnStateEvltr to ignore transition from TF to Connecting (# 5747)%0A  > 64df652 google-c2p: include federation env var in the logic which determines when to use directpath (# 5745)%0A  > 3c09650 balancer/weightedtarget: use ConnectivityStateEvaluator (# 5734)%0A  > 3fd80b0 Fix flaky test MultipleClientStatsHandler (# 5739)%0A  > 26071c2 google-c2p resolver: add authority entry to bootstrap config (# 5680)%0A  > 9127159 client: synchronously verify server preface in newClientTransport (# 5731)%0A  > f51d212 xdsclient: improve RDS watchers test (# 5692)%0A  > 7c16802 tests: refactor tests to use testutils helper functions (# 5728)%0A  > 28fae96 xdsclient: improve federation watchers test (# 5696)%0A  > f88cc65 xdsclient: improve EDS watchers test (# 5694)%0A  > 439221d xdsclient: add a convenience type to synchronize execution of callbacks (# 5702)%0A  > dbb8e2b xdsclient: improve CDS watchers test (# 5693)%0A  > 79ccdd8 clientconn: go idle if conn closed after preface received (# 5714)%0A  > 778860e testing: update Go to 1.19 (# 5717)%0A  > eb8aa31 weightedtarget: return a more meaningful error when no child policy is reporting READY (# 5391)%0A  > bb3d739 fakeserver: add v3 support to the xDS fakeserver implementation (# 5698)%0A  > 912765f xds: move bootstrap config generating utility package to testutils (# 5713)%0A  > f52b910 o11y: Fixed o11y bug (# 5720)%0A  > 00d1830 Fix o11y typo (# 5719)%0A  > e163a90 xds/xdsclient: add EDS resource endpoint address duplication check (# 5715)%0A  > 9eba574 xds: de-experimentalize google c2p resolver (# 5707)%0A  > 8b3b10b gcp/observability: implement public preview config syntax, logging schema, and exposed metrics (# 5704)%0A  > 8062981 vet: workaround buggy mac git grep behavior (# 5716)%0A  > e81d0a2 xdsclient: improve LDS watchers test (# 5691)%0A  > 7b817b4 client: set grpc-accept-encoding to full list of registered compressors (# 5541)%0A  > c672451 xds/xdsclient: add sum of EDS locality weights check (# 5703)%0A  > c03925d priority: release references to child policies which are removed (# 5682)%0A  > 5fc798b Add binary logger option for client and server (# 5675)%0A  > 12db695 grpc: restrict status codes from control plane (gRFC A54) (# 5653)%0A  > 202d355 Change version to 1.51.0-dev (# 5687)%0A  > 1451c62 internal/transport: optimize grpc-message encoding/decoding (# 5654)%0A  > be4b63b test: minor test cleanup (# 5679)%0A  > d83070e Changed Outlier Detection Env Var to default true (# 5673)%0A  > 54521b2 client: remove trailing null from unix abstract socket address (# 5678)%0A  > 36e4810 orca: cleanup old code, and get grpc package to use new code (# 5627)%0A  > e8866a8 build: harden GitHub Workflow permissions (# 5660)%0A  > 8458251 xdsclient: ignore routes with cluster_specifier_plugin when GRPC_EXPERIMENTAL_XDS_RLS_LB is off (# 5670)%0A  > a238ceb xDS: Outlier Detection Env Var not hardcoded to false (# 5664)%0A  > b1d7f56 transport: Fix deadlock in transport caused by GOAWAY race with new stream creation (# 5652)%0A  > 9c3e589 rls: delegate pick to child policy as long as it is not in TransientFailure (# 5656)%0A  > 7da8a05 xds: Enable Outlier Detection interop tests (# 5632)%0A  > 21f0259 test: loosen metadata error check to reduce dependence on exact library errors (# 5650)%0A  > 552de12 orca: fix package used to reference service to use pb suffix instead of grpc (# 5647)%0A  > 87d1a90 orca: fix package used to reference service to use grpc suffix instead of pb (# 5645)%0A  > 60eecd9 metadata: add ValueFromIncomingContext to more efficiently retrieve a single value (# 5596)%0A  > 2ebd594 Documentation/proxy: update due to Go 1.16 behavior change (# 5630)%0A  > 1530d3b gcp/observability: fix End() to cleanup global state correctly (# 5623)%0A  > f7d2036 xds: add Outlier Detection Balancer (# 5435)%0A  > 182e9df Grab comment from proto file, similar to protoc-gen-go (# 5540)%0A  > 60a3a7e cleanup: fixes for issues surfaced by vet (# 5617)%0A  > 99ae81b roundrobin: optimization of the roundrobin implementation. (# 5607)%0A  > aee9f0e orca: server side custom metrics implementation (# 5531)%0A  > ddcda5f alts: do not set WaitForReady on handshaker RPCs (# 5620)%0A  > d875a0e xdsclient: NACK cluster resource if config_source_specifier in lrs_server is not self (# 5613)%0A  > c351f37 chore: remove duplicate word in comments (# 5616)%0A  > f0f9f00 test/kokoro: enable pod log collection in the buildscripts (# 5608)%0A  > 1dd0256 ringhash: implement a no-op ExitIdle() method (# 5614)%0A  > fe59226 clusterresolver: deflake eds_impl tests (# 5562)%0A  > d5dee5f xds/ringhash: make reconnection logic work for a single subConn (# 5601)%0A  > b225dda transport: update http2 spec document link (# 5597)%0A  > 641dc87 transport: add peer information to http2Server and http2Client context (# 5589)%0A  > 02fbca0 xds/resolver: generate channel ID randomly (# 5591)%0A  > 97cb7b1 xds/clusterresolver: prevent deadlock of concurrent Close and UpdateState calls (# 5588)%0A  > c56f196 internal/fakegrpclb: don't listen on all adapters (# 5592)%0A  > 3f5b7ab internal/transport: fix typo (# 5566)%0A  > c11858e Publish arm64 binaries to GitHub releases (# 5561)%0A  > 802b32e Change version to 1.50.0-dev (# 5585)%0Abumping golang.org/x/net dfa2b5d...c73c09c:%0A  > c73c09c go.mod: update golang.org/x dependencies%0A  > 4fc2eb9 http2: revert Transport change from CL 486156%0A  > 63727cc http2: validate Host header before sending%0A  > 1bb09e6 quic: pass the connection ID length into 1-RTT packet parsing%0A  > 952fc9c quic: move ack_delay_exponent handling out of frame parsing%0A  > 02fe9a5 quic: loss detection%0A  > c8a2c5a quic: remove stray debugging print%0A  > 5d50b40 quic: add packetFate enum%0A  > ee81e8c quic: correct rttvar updates%0A  > ab184e6 quic: rename side type to connSide%0A  > 508a573 quic: add congestion controller%0A  > 4050002 html: handle equals sign before attribute%0A  > f5464dd idna: update for Unicode 15.0.0%0A  > 6c96ca5 go.mod: update golang.org/x dependencies%0A  > 5541298 quic: add packet pacer%0A  > 88a50b6 all: update x/sys to HEAD%0A  > 7e6923f quic: add RTT estimator%0A  > 2796e09 bpf: check for little endian CPU for OS VM comparison%0A  > 10cf388 quic: add a data structure for tracking lists of sent packets%0A  > ccc217c quic: parameterize rangeset%0A  > f16447c quic: add go1.21 build constraint%0A  > f7250ea quic: add a type tracking sent values%0A  > 1b5a2d8 quic: packet encoding/decoding%0A  > a233290 quic: add a data structure for tracking sent packets%0A  > 61d852e quic: error codes and types%0A  > d40f154 quic: varint encoding and decoding%0A  > 10d9069 quic: add rangeset type%0A  > f71a821 quic: packet protection%0A  > 6488c8f quic: basic packet operations%0A  > d4a2c13 quic: packet number encoding/decoding%0A  > 0d6f3cb quic: add various useful common constants and types%0A  > ee6956f quic: add internal/quic package%0A  > 6826f5a http2: close request bodies before RoundTrip error return%0A  > ca96da6 dns/dnsmessage: reject names with dots inside label%0A  > 056145c net/http: deflake TestTransportRetryAfterGOAWAY%0A  > abee42a http2: deflake TestTransportReuseAfterError%0A  > 3b31286 ipv4,ipv6: remove unneeded deadlines added for flaky tests%0A  > 23ce3b8 http2: disable Content-Length when nilled%0A  > 120fc90 http2: change default frame scheduler to round robin%0A  > 2b0b97d dns/dnsmessage: reject packing of 255B rooted names, reject unpacking of 256B (dns encoded) names%0A  > d28c0b1 all: fix some comments%0A  > daac0ce go.mod: update golang.org/x dependencies%0A  > 82780d6 http2: don't reuse connections that are experiencing errors%0A  > 0bfab66 ipv4, ipv6: drop redundant skip checks based on GOOS%0A  > 938ff15 ipv4, ipv6, nettest: skip unsupported tests on wasip1%0A  > eb1572c html: another shot at security doc%0A  > 9001ca7 nettest: re-enable unixpacket tests on netbsd/386%0A  > 3d5a8ee internal/socks: permit authenticating with an empty password%0A  > 694cff8 go.mod: update golang.org/x dependencies%0A  > 6960703 http2: log the correct error when retrying in (*Transport).RoundTripOpt%0A  > 9f24bb4 http2: properly discard data received after request/response body is closed%0A  > 08dda57 html: fix package doc typo

Signed-off-by: Knative Automation <automation@knative.team>
This commit is contained in:
Knative Automation 2023-07-11 12:07:12 +01:00 committed by GitHub
parent b6c06e0602
commit 9ddff04d26
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
329 changed files with 28949 additions and 7905 deletions

48
go.mod
View File

@ -11,8 +11,8 @@ require (
github.com/spf13/cobra v1.6.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.13.0
golang.org/x/mod v0.8.0
golang.org/x/term v0.6.0
golang.org/x/mod v0.9.0
golang.org/x/term v0.10.0
gotest.tools/v3 v3.3.0
k8s.io/api v0.26.5
k8s.io/apiextensions-apiserver v0.26.5
@ -20,11 +20,11 @@ require (
k8s.io/cli-runtime v0.25.2
k8s.io/client-go v0.26.5
k8s.io/code-generator v0.26.5
knative.dev/eventing v0.37.1-0.20230627143052-8f7409444808
knative.dev/hack v0.0.0-20230615155948-d7586a218601
knative.dev/networking v0.0.0-20230622190036-3a75df5dd93d
knative.dev/pkg v0.0.0-20230616134650-eb63a40adfb0
knative.dev/serving v0.37.1-0.20230626132048-0462ce6dd994
knative.dev/eventing v0.37.1-0.20230710092738-ed05a353ea4d
knative.dev/hack v0.0.0-20230710131339-12cd6a1f789c
knative.dev/networking v0.0.0-20230710015341-b2cce924227a
knative.dev/pkg v0.0.0-20230710013638-5ef4812a4fe9
knative.dev/serving v0.37.1-0.20230711005812-4c76e3b3033d
sigs.k8s.io/yaml v1.3.0
)
@ -34,8 +34,8 @@ require (
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blendle/zapdriver v1.3.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cloudevents/sdk-go/sql/v2 v2.13.0 // indirect
github.com/cloudevents/sdk-go/v2 v2.13.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
@ -53,7 +53,7 @@ require (
github.com/go-openapi/swag v0.22.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/gnostic v0.6.9 // indirect
github.com/google/go-containerregistry v0.13.0 // indirect
@ -61,7 +61,7 @@ require (
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
@ -95,24 +95,26 @@ require (
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.4.1 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.opencensus.io v0.23.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.starlark.net v0.0.0-20220817180228-f738f5508c12 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.23.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/oauth2 v0.1.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.1.0 // indirect
golang.org/x/tools v0.6.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.7.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/api v0.96.0 // indirect
google.golang.org/api v0.124.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 // indirect
google.golang.org/grpc v1.56.1 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect

277
go.sum
View File

@ -17,34 +17,14 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@ -55,7 +35,6 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h1:LblfooH1lKOpp1hIhukktmSAxFkqMPFk9KR6iZ0MJNI=
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY=
contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg=
@ -84,12 +63,13 @@ github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHf
github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@ -101,12 +81,8 @@ github.com/cloudevents/sdk-go/v2 v2.13.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUE
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
@ -125,10 +101,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
@ -178,6 +152,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -190,8 +165,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -207,10 +180,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -229,8 +201,6 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@ -243,7 +213,6 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@ -254,27 +223,14 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
@ -282,8 +238,9 @@ github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/z
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -434,7 +391,7 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@ -461,8 +418,9 @@ github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+z
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@ -470,8 +428,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc=
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
@ -489,15 +448,15 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.starlark.net v0.0.0-20220817180228-f738f5508c12 h1:xOBJXWGEDwU5xSDxH6macxO11Us0AH2fTa9rmsbbF7g=
go.starlark.net v0.0.0-20220817180228-f738f5508c12/go.mod h1:VZcBMdr3cT3PnBoWunTabuSEXwVAH+ZJ5zxfs3AdASk=
@ -545,7 +504,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@ -556,9 +514,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -595,25 +552,16 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -623,20 +571,10 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y=
golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -647,10 +585,9 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -695,47 +632,25 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -745,13 +660,13 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@ -803,20 +718,12 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
@ -839,28 +746,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
google.golang.org/api v0.96.0 h1:F60cuQPJq7K7FzsxMYHAUJSiXh2oKctHxBMbDygxhfM=
google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
google.golang.org/api v0.124.0 h1:dP6Ef1VgOGqQ8eiv4GiY8RhmeyqzovcXBYPDUYG8Syo=
google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -906,54 +793,14 @@ google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51 h1:ucpgjuzWqWrj0NEwjUpsGTf2IGxyLtmuSk0oGgifjec=
google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=
google.golang.org/genproto v0.0.0-20230525234025-438c736192d0 h1:x1vNwUhVOcsYoKyEGCZBH694SBmmBjA2EfauFVEI2+M=
google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY=
google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a h1:HiYVD+FGJkTo+9zj1gqz0anapsa1JxjiSrN+BJKyUmE=
google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 h1:DEH99RbiLZhMxrpEJCZ0A+wdTe0EOgou/poSLx9vWf4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -972,23 +819,10 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ=
google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1003,8 +837,9 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1063,16 +898,16 @@ k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+O
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8=
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/eventing v0.37.1-0.20230627143052-8f7409444808 h1:9xzp250Vqqj+3K/306/th9v5u+n1X+cRdNPgD5Ffyn4=
knative.dev/eventing v0.37.1-0.20230627143052-8f7409444808/go.mod h1:MRpJ0pRqodV9mlPvRTLXnUOo0ExPDHyEk04Zxjn3bA0=
knative.dev/hack v0.0.0-20230615155948-d7586a218601 h1:yMe29SMHrAIt3+J+APvf4WVP6cW7ZDtUhh5uxD5ERdA=
knative.dev/hack v0.0.0-20230615155948-d7586a218601/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/networking v0.0.0-20230622190036-3a75df5dd93d h1:jrODmEPU3SYcmaec2pErrcH5AvXVDdK70XJvblTj7RE=
knative.dev/networking v0.0.0-20230622190036-3a75df5dd93d/go.mod h1:/CM22Ocewr85rfI84I+WUMoOVZSczpsif4fScImHdfU=
knative.dev/pkg v0.0.0-20230616134650-eb63a40adfb0 h1:weQWWxEEbNOPuL4qtGiBZuMSFhcjF/Cu163uktd/xFE=
knative.dev/pkg v0.0.0-20230616134650-eb63a40adfb0/go.mod h1:dqC6IrvyBE7E+oZocs5PkVhq1G59pDTA7r8U17EAKMk=
knative.dev/serving v0.37.1-0.20230626132048-0462ce6dd994 h1:pGlaiue/Ux4Gzlb2eTIPmQksmmrNv5xeXsg9GRW4mWE=
knative.dev/serving v0.37.1-0.20230626132048-0462ce6dd994/go.mod h1:G5sRe4RpjxCaNdRI/EQagvtCioMtjOIdfoboauNMpSk=
knative.dev/eventing v0.37.1-0.20230710092738-ed05a353ea4d h1:kmrAAdim+/5YZeZBDDbaj9XlAtgw2tHm5ToZC81tueM=
knative.dev/eventing v0.37.1-0.20230710092738-ed05a353ea4d/go.mod h1:qdgFteB1E2/K0TfweX9zDX3An6PkzsQ/a4xEWkitusk=
knative.dev/hack v0.0.0-20230710131339-12cd6a1f789c h1:jd+ogsbIZTtBVFf3IdQq0uC6+COpFzQjCAWkctuITeY=
knative.dev/hack v0.0.0-20230710131339-12cd6a1f789c/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/networking v0.0.0-20230710015341-b2cce924227a h1:l3yIUSoxxU+y1OBYRDirwM3XY9zturWvjiKkFha+BWk=
knative.dev/networking v0.0.0-20230710015341-b2cce924227a/go.mod h1:cSrqxaHRb/SSNd19YS3tJq6YHoQIqlRpWv4v9OXmhDg=
knative.dev/pkg v0.0.0-20230710013638-5ef4812a4fe9 h1:2T60dQFvSz7MS0M2pgLNMbxtv4Rn2zskjQH8kB/wTkg=
knative.dev/pkg v0.0.0-20230710013638-5ef4812a4fe9/go.mod h1:eXobTqst4aI7CNa6W7sG73VhEsHGWPSrkefeMTb++a0=
knative.dev/serving v0.37.1-0.20230711005812-4c76e3b3033d h1:uuU7ZeYAwJIjk4N5VBhP7f2x8+PcaxlzZyGPWsAtYmo=
knative.dev/serving v0.37.1-0.20230711005812-4c76e3b3033d/go.mod h1:KzM8JVJDX87Mx4ACwG3R3TypDYiZgz6PZOdnyIcPwi8=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.12.3
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: opencensus/proto/agent/common/v1/common.proto
// NOTE: This proto is experimental and is subject to change at this point.
@ -24,10 +24,9 @@
package v1
import (
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
@ -39,10 +38,6 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type LibraryInfo_Language int32
const (
@ -207,7 +202,7 @@ type ProcessIdentifier struct {
// Process id.
Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
// Start time of this ProcessIdentifier. Represented in epoch time.
StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
StartTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
}
func (x *ProcessIdentifier) Reset() {
@ -256,7 +251,7 @@ func (x *ProcessIdentifier) GetPid() uint32 {
return 0
}
func (x *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp {
func (x *ProcessIdentifier) GetStartTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.StartTimestamp
}
@ -447,7 +442,7 @@ var file_opencensus_proto_agent_common_v1_common_proto_rawDesc = []byte{
0x10, 0x09, 0x12, 0x0a, 0x0a, 0x06, 0x57, 0x45, 0x42, 0x5f, 0x4a, 0x53, 0x10, 0x0a, 0x22, 0x21,
0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x42, 0xa2, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e,
0x65, 0x42, 0xa6, 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e,
0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e,
0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x49, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
@ -455,9 +450,10 @@ var file_opencensus_proto_agent_common_v1_common_proto_rawDesc = []byte{
0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e,
0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
0x2f, 0x76, 0x31, 0xea, 0x02, 0x20, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73,
0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x6d,
0x6d, 0x6f, 0x6e, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x2f, 0x76, 0x31, 0xea, 0x02, 0x24, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73,
0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a,
0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
var (
@ -475,13 +471,13 @@ func file_opencensus_proto_agent_common_v1_common_proto_rawDescGZIP() []byte {
var file_opencensus_proto_agent_common_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_opencensus_proto_agent_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_opencensus_proto_agent_common_v1_common_proto_goTypes = []interface{}{
(LibraryInfo_Language)(0), // 0: opencensus.proto.agent.common.v1.LibraryInfo.Language
(*Node)(nil), // 1: opencensus.proto.agent.common.v1.Node
(*ProcessIdentifier)(nil), // 2: opencensus.proto.agent.common.v1.ProcessIdentifier
(*LibraryInfo)(nil), // 3: opencensus.proto.agent.common.v1.LibraryInfo
(*ServiceInfo)(nil), // 4: opencensus.proto.agent.common.v1.ServiceInfo
nil, // 5: opencensus.proto.agent.common.v1.Node.AttributesEntry
(*timestamp.Timestamp)(nil), // 6: google.protobuf.Timestamp
(LibraryInfo_Language)(0), // 0: opencensus.proto.agent.common.v1.LibraryInfo.Language
(*Node)(nil), // 1: opencensus.proto.agent.common.v1.Node
(*ProcessIdentifier)(nil), // 2: opencensus.proto.agent.common.v1.ProcessIdentifier
(*LibraryInfo)(nil), // 3: opencensus.proto.agent.common.v1.LibraryInfo
(*ServiceInfo)(nil), // 4: opencensus.proto.agent.common.v1.ServiceInfo
nil, // 5: opencensus.proto.agent.common.v1.Node.AttributesEntry
(*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp
}
var file_opencensus_proto_agent_common_v1_common_proto_depIdxs = []int32{
2, // 0: opencensus.proto.agent.common.v1.Node.identifier:type_name -> opencensus.proto.agent.common.v1.ProcessIdentifier

View File

@ -14,17 +14,20 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.12.3
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: opencensus/proto/agent/metrics/v1/metrics_service.proto
package v1
import (
context "context"
v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@ -38,10 +41,6 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type ExportMetricsServiceRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -195,7 +194,7 @@ var file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDesc = []byt
0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69,
0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0xad, 0x01, 0x0a, 0x24, 0x69, 0x6f, 0x2e, 0x6f,
0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0xb1, 0x01, 0x0a, 0x24, 0x69, 0x6f, 0x2e, 0x6f,
0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31,
0x42, 0x13, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
@ -204,9 +203,10 @@ var file_opencensus_proto_agent_metrics_v1_metrics_service_proto_rawDesc = []byt
0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63,
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d,
0x67, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
0x2f, 0x76, 0x31, 0xea, 0x02, 0x21, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73,
0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x73, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x2f, 0x76, 0x31, 0xea, 0x02, 0x25, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73,
0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a,
0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
var (
@ -292,3 +292,119 @@ func file_opencensus_proto_agent_metrics_v1_metrics_service_proto_init() {
file_opencensus_proto_agent_metrics_v1_metrics_service_proto_goTypes = nil
file_opencensus_proto_agent_metrics_v1_metrics_service_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// MetricsServiceClient is the client API for MetricsService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type MetricsServiceClient interface {
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error)
}
type metricsServiceClient struct {
cc grpc.ClientConnInterface
}
func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient {
return &metricsServiceClient{cc}
}
func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) {
stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...)
if err != nil {
return nil, err
}
x := &metricsServiceExportClient{stream}
return x, nil
}
type MetricsService_ExportClient interface {
Send(*ExportMetricsServiceRequest) error
Recv() (*ExportMetricsServiceResponse, error)
grpc.ClientStream
}
type metricsServiceExportClient struct {
grpc.ClientStream
}
func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) {
m := new(ExportMetricsServiceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// MetricsServiceServer is the server API for MetricsService service.
type MetricsServiceServer interface {
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(MetricsService_ExportServer) error
}
// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations.
type UnimplementedMetricsServiceServer struct {
}
func (*UnimplementedMetricsServiceServer) Export(MetricsService_ExportServer) error {
return status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
s.RegisterService(&_MetricsService_serviceDesc, srv)
}
func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream})
}
type MetricsService_ExportServer interface {
Send(*ExportMetricsServiceResponse) error
Recv() (*ExportMetricsServiceRequest, error)
grpc.ServerStream
}
type metricsServiceExportServer struct {
grpc.ServerStream
}
func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) {
m := new(ExportMetricsServiceRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _MetricsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService",
HandlerType: (*MetricsServiceServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Export",
Handler: _MetricsService_Export_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto",
}

View File

@ -13,14 +13,14 @@ import (
"io"
"net/http"
"github.com/golang/protobuf/descriptor"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
@ -29,7 +29,7 @@ var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = descriptor.ForMessage
var _ = metadata.Join
func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (MetricsService_ExportClient, runtime.ServerMetadata, error) {
var metadata runtime.ServerMetadata
@ -55,15 +55,6 @@ func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Mars
}
return nil
}
if err := handleSend(); err != nil {
if cerr := stream.CloseSend(); cerr != nil {
grpclog.Infof("Failed to terminate client stream: %v", cerr)
}
if err == io.EOF {
return stream, metadata, nil
}
return nil, metadata, err
}
go func() {
for {
if err := handleSend(); err != nil {
@ -86,6 +77,7 @@ func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Mars
// RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux".
// UnaryRPC :call MetricsServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMetricsServiceHandlerFromEndpoint instead.
func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error {
mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
@ -140,7 +132,7 @@ func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.Serve
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opencensus.proto.agent.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@ -160,7 +152,7 @@ func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.Serve
}
var (
pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "", runtime.AssumeColonVerbOpt(true)))
pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, ""))
)
var (

View File

@ -1,126 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
package v1
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// MetricsServiceClient is the client API for MetricsService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type MetricsServiceClient interface {
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error)
}
type metricsServiceClient struct {
cc grpc.ClientConnInterface
}
func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient {
return &metricsServiceClient{cc}
}
func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) {
stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...)
if err != nil {
return nil, err
}
x := &metricsServiceExportClient{stream}
return x, nil
}
type MetricsService_ExportClient interface {
Send(*ExportMetricsServiceRequest) error
Recv() (*ExportMetricsServiceResponse, error)
grpc.ClientStream
}
type metricsServiceExportClient struct {
grpc.ClientStream
}
func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) {
m := new(ExportMetricsServiceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// MetricsServiceServer is the server API for MetricsService service.
// All implementations must embed UnimplementedMetricsServiceServer
// for forward compatibility
type MetricsServiceServer interface {
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(MetricsService_ExportServer) error
mustEmbedUnimplementedMetricsServiceServer()
}
// UnimplementedMetricsServiceServer must be embedded to have forward compatible implementations.
type UnimplementedMetricsServiceServer struct {
}
func (*UnimplementedMetricsServiceServer) Export(MetricsService_ExportServer) error {
return status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func (*UnimplementedMetricsServiceServer) mustEmbedUnimplementedMetricsServiceServer() {}
func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
s.RegisterService(&_MetricsService_serviceDesc, srv)
}
func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream})
}
type MetricsService_ExportServer interface {
Send(*ExportMetricsServiceResponse) error
Recv() (*ExportMetricsServiceRequest, error)
grpc.ServerStream
}
type metricsServiceExportServer struct {
grpc.ServerStream
}
func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) {
m := new(ExportMetricsServiceRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _MetricsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService",
HandlerType: (*MetricsServiceServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Export",
Handler: _MetricsService_Export_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto",
}

View File

@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.12.3
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: opencensus/proto/agent/trace/v1/trace_service.proto
// NOTE: This proto is experimental and is subject to change at this point.
@ -24,10 +24,13 @@
package v1
import (
context "context"
v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@ -41,10 +44,6 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
type CurrentLibraryConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -342,7 +341,7 @@ var file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDesc = []byte{
0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67,
0x65, 0x6e, 0x74, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70,
0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0xa5, 0x01,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0xa9, 0x01,
0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x74, 0x72, 0x61, 0x63,
0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69,
@ -351,9 +350,10 @@ var file_opencensus_proto_agent_trace_v1_trace_service_proto_rawDesc = []byte{
0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65,
0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65,
0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65,
0x2f, 0x76, 0x31, 0xea, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73,
0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x72, 0x61,
0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x2f, 0x76, 0x31, 0xea, 0x02, 0x23, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73,
0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x3a, 0x3a,
0x54, 0x72, 0x61, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
@ -472,3 +472,193 @@ func file_opencensus_proto_agent_trace_v1_trace_service_proto_init() {
file_opencensus_proto_agent_trace_v1_trace_service_proto_goTypes = nil
file_opencensus_proto_agent_trace_v1_trace_service_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// TraceServiceClient is the client API for TraceService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type TraceServiceClient interface {
// After initialization, this RPC must be kept alive for the entire life of
// the application. The agent pushes configs down to applications via a
// stream.
Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error)
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error)
}
type traceServiceClient struct {
cc grpc.ClientConnInterface
}
func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient {
return &traceServiceClient{cc}
}
func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) {
stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...)
if err != nil {
return nil, err
}
x := &traceServiceConfigClient{stream}
return x, nil
}
type TraceService_ConfigClient interface {
Send(*CurrentLibraryConfig) error
Recv() (*UpdatedLibraryConfig, error)
grpc.ClientStream
}
type traceServiceConfigClient struct {
grpc.ClientStream
}
func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error {
return x.ClientStream.SendMsg(m)
}
func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) {
m := new(UpdatedLibraryConfig)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) {
stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...)
if err != nil {
return nil, err
}
x := &traceServiceExportClient{stream}
return x, nil
}
type TraceService_ExportClient interface {
Send(*ExportTraceServiceRequest) error
Recv() (*ExportTraceServiceResponse, error)
grpc.ClientStream
}
type traceServiceExportClient struct {
grpc.ClientStream
}
func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) {
m := new(ExportTraceServiceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// TraceServiceServer is the server API for TraceService service.
type TraceServiceServer interface {
// After initialization, this RPC must be kept alive for the entire life of
// the application. The agent pushes configs down to applications via a
// stream.
Config(TraceService_ConfigServer) error
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(TraceService_ExportServer) error
}
// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations.
type UnimplementedTraceServiceServer struct {
}
func (*UnimplementedTraceServiceServer) Config(TraceService_ConfigServer) error {
return status.Errorf(codes.Unimplemented, "method Config not implemented")
}
func (*UnimplementedTraceServiceServer) Export(TraceService_ExportServer) error {
return status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
s.RegisterService(&_TraceService_serviceDesc, srv)
}
func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream})
}
type TraceService_ConfigServer interface {
Send(*UpdatedLibraryConfig) error
Recv() (*CurrentLibraryConfig, error)
grpc.ServerStream
}
type traceServiceConfigServer struct {
grpc.ServerStream
}
func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error {
return x.ServerStream.SendMsg(m)
}
func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) {
m := new(CurrentLibraryConfig)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream})
}
type TraceService_ExportServer interface {
Send(*ExportTraceServiceResponse) error
Recv() (*ExportTraceServiceRequest, error)
grpc.ServerStream
}
type traceServiceExportServer struct {
grpc.ServerStream
}
func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) {
m := new(ExportTraceServiceRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _TraceService_serviceDesc = grpc.ServiceDesc{
ServiceName: "opencensus.proto.agent.trace.v1.TraceService",
HandlerType: (*TraceServiceServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Config",
Handler: _TraceService_Config_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "Export",
Handler: _TraceService_Export_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto",
}

View File

@ -13,14 +13,14 @@ import (
"io"
"net/http"
"github.com/golang/protobuf/descriptor"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
@ -29,7 +29,7 @@ var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = descriptor.ForMessage
var _ = metadata.Join
func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) {
var metadata runtime.ServerMetadata
@ -55,15 +55,6 @@ func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marsha
}
return nil
}
if err := handleSend(); err != nil {
if cerr := stream.CloseSend(); cerr != nil {
grpclog.Infof("Failed to terminate client stream: %v", cerr)
}
if err == io.EOF {
return stream, metadata, nil
}
return nil, metadata, err
}
go func() {
for {
if err := handleSend(); err != nil {
@ -86,6 +77,7 @@ func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marsha
// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux".
// UnaryRPC :call TraceServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead.
func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error {
mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
@ -140,7 +132,7 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opencensus.proto.agent.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@ -160,7 +152,7 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu
}
var (
pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "", runtime.AssumeColonVerbOpt(true)))
pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, ""))
)
var (

View File

@ -1,200 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
package v1
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// TraceServiceClient is the client API for TraceService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type TraceServiceClient interface {
// After initialization, this RPC must be kept alive for the entire life of
// the application. The agent pushes configs down to applications via a
// stream.
Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error)
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error)
}
type traceServiceClient struct {
cc grpc.ClientConnInterface
}
func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient {
return &traceServiceClient{cc}
}
func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) {
stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...)
if err != nil {
return nil, err
}
x := &traceServiceConfigClient{stream}
return x, nil
}
type TraceService_ConfigClient interface {
Send(*CurrentLibraryConfig) error
Recv() (*UpdatedLibraryConfig, error)
grpc.ClientStream
}
type traceServiceConfigClient struct {
grpc.ClientStream
}
func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error {
return x.ClientStream.SendMsg(m)
}
func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) {
m := new(UpdatedLibraryConfig)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) {
stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...)
if err != nil {
return nil, err
}
x := &traceServiceExportClient{stream}
return x, nil
}
type TraceService_ExportClient interface {
Send(*ExportTraceServiceRequest) error
Recv() (*ExportTraceServiceResponse, error)
grpc.ClientStream
}
type traceServiceExportClient struct {
grpc.ClientStream
}
func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) {
m := new(ExportTraceServiceResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// TraceServiceServer is the server API for TraceService service.
// All implementations must embed UnimplementedTraceServiceServer
// for forward compatibility
type TraceServiceServer interface {
// After initialization, this RPC must be kept alive for the entire life of
// the application. The agent pushes configs down to applications via a
// stream.
Config(TraceService_ConfigServer) error
// For performance reasons, it is recommended to keep this RPC
// alive for the entire life of the application.
Export(TraceService_ExportServer) error
mustEmbedUnimplementedTraceServiceServer()
}
// UnimplementedTraceServiceServer must be embedded to have forward compatible implementations.
type UnimplementedTraceServiceServer struct {
}
func (*UnimplementedTraceServiceServer) Config(TraceService_ConfigServer) error {
return status.Errorf(codes.Unimplemented, "method Config not implemented")
}
func (*UnimplementedTraceServiceServer) Export(TraceService_ExportServer) error {
return status.Errorf(codes.Unimplemented, "method Export not implemented")
}
func (*UnimplementedTraceServiceServer) mustEmbedUnimplementedTraceServiceServer() {}
func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
s.RegisterService(&_TraceService_serviceDesc, srv)
}
func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream})
}
type TraceService_ConfigServer interface {
Send(*UpdatedLibraryConfig) error
Recv() (*CurrentLibraryConfig, error)
grpc.ServerStream
}
type traceServiceConfigServer struct {
grpc.ServerStream
}
func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error {
return x.ServerStream.SendMsg(m)
}
func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) {
m := new(CurrentLibraryConfig)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream})
}
type TraceService_ExportServer interface {
Send(*ExportTraceServiceResponse) error
Recv() (*ExportTraceServiceRequest, error)
grpc.ServerStream
}
type traceServiceExportServer struct {
grpc.ServerStream
}
func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) {
m := new(ExportTraceServiceRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _TraceService_serviceDesc = grpc.ServiceDesc{
ServiceName: "opencensus.proto.agent.trace.v1.TraceService",
HandlerType: (*TraceServiceServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Config",
Handler: _TraceService_Config_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "Export",
Handler: _TraceService_Export_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto",
}

View File

@ -19,19 +19,18 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.12.3
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: opencensus/proto/metrics/v1/metrics.proto
package v1
import (
v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
wrappers "github.com/golang/protobuf/ptypes/wrappers"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
reflect "reflect"
sync "sync"
)
@ -43,10 +42,6 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// The kind of metric. It describes how the data is reported.
//
// A gauge is an instantaneous measurement of a value.
@ -364,7 +359,7 @@ type TimeSeries struct {
// was reset to zero. Exclusive. The cumulative value is over the time interval
// (start_timestamp, timestamp]. If not specified, the backend can use the
// previous recorded value.
StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
StartTimestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
// The set of label values that uniquely identify this timeseries. Applies to
// all points. The order of label values must match that of label keys in the
// metric descriptor.
@ -406,7 +401,7 @@ func (*TimeSeries) Descriptor() ([]byte, []int) {
return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{3}
}
func (x *TimeSeries) GetStartTimestamp() *timestamp.Timestamp {
func (x *TimeSeries) GetStartTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.StartTimestamp
}
@ -493,7 +488,7 @@ type Point struct {
// The moment when this point was recorded. Inclusive.
// If not specified, the timestamp will be decided by the backend.
Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// The actual point value.
//
// Types that are assignable to Value:
@ -536,7 +531,7 @@ func (*Point) Descriptor() ([]byte, []int) {
return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{5}
}
func (x *Point) GetTimestamp() *timestamp.Timestamp {
func (x *Point) GetTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.Timestamp
}
@ -721,11 +716,11 @@ type SummaryValue struct {
// The total number of recorded values since start_time. Optional since
// some systems don't expose this.
Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
Count *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
// The total sum of recorded values since start_time. Optional since some
// systems don't expose this. If count is zero then this field must be zero.
// This field must be unset if the sum is not available.
Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
Sum *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
// Values calculated over an arbitrary time window.
Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
}
@ -762,14 +757,14 @@ func (*SummaryValue) Descriptor() ([]byte, []int) {
return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{7}
}
func (x *SummaryValue) GetCount() *wrappers.Int64Value {
func (x *SummaryValue) GetCount() *wrapperspb.Int64Value {
if x != nil {
return x.Count
}
return nil
}
func (x *SummaryValue) GetSum() *wrappers.DoubleValue {
func (x *SummaryValue) GetSum() *wrapperspb.DoubleValue {
if x != nil {
return x.Sum
}
@ -926,7 +921,7 @@ type DistributionValue_Exemplar struct {
// belongs to.
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
// The observation (sampling) time of the above value.
Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
Timestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// Contextual information about the example value.
Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
@ -970,7 +965,7 @@ func (x *DistributionValue_Exemplar) GetValue() float64 {
return 0
}
func (x *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp {
func (x *DistributionValue_Exemplar) GetTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.Timestamp
}
@ -1048,11 +1043,11 @@ type SummaryValue_Snapshot struct {
// The number of values in the snapshot. Optional since some systems don't
// expose this.
Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
Count *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
// The sum of values in the snapshot. Optional since some systems don't
// expose this. If count is zero then this field must be zero or not set
// (if not supported).
Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
Sum *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
// A list of values at different percentiles of the distribution calculated
// from the current snapshot. The percentiles must be strictly increasing.
PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"`
@ -1090,14 +1085,14 @@ func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) {
return file_opencensus_proto_metrics_v1_metrics_proto_rawDescGZIP(), []int{7, 0}
}
func (x *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value {
func (x *SummaryValue_Snapshot) GetCount() *wrapperspb.Int64Value {
if x != nil {
return x.Count
}
return nil
}
func (x *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue {
func (x *SummaryValue_Snapshot) GetSum() *wrapperspb.DoubleValue {
if x != nil {
return x.Sum
}
@ -1351,7 +1346,7 @@ var file_opencensus_proto_metrics_v1_metrics_proto_rawDesc = []byte{
0x6c, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x69,
0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x94, 0x01, 0x0a, 0x1e, 0x69, 0x6f, 0x2e,
0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x97, 0x01, 0x0a, 0x1e, 0x69, 0x6f, 0x2e,
0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x4d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74,
@ -1359,9 +1354,9 @@ var file_opencensus_proto_metrics_v1_metrics_proto_rawDesc = []byte{
0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f,
0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76,
0x31, 0xea, 0x02, 0x1b, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x50,
0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x56, 0x31, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x31, 0xea, 0x02, 0x1e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x3a, 0x3a,
0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x3a, 0x3a,
0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -1396,9 +1391,9 @@ var file_opencensus_proto_metrics_v1_metrics_proto_goTypes = []interface{}{
(*SummaryValue_Snapshot)(nil), // 14: opencensus.proto.metrics.v1.SummaryValue.Snapshot
(*SummaryValue_Snapshot_ValueAtPercentile)(nil), // 15: opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile
(*v1.Resource)(nil), // 16: opencensus.proto.resource.v1.Resource
(*timestamp.Timestamp)(nil), // 17: google.protobuf.Timestamp
(*wrappers.Int64Value)(nil), // 18: google.protobuf.Int64Value
(*wrappers.DoubleValue)(nil), // 19: google.protobuf.DoubleValue
(*timestamppb.Timestamp)(nil), // 17: google.protobuf.Timestamp
(*wrapperspb.Int64Value)(nil), // 18: google.protobuf.Int64Value
(*wrapperspb.DoubleValue)(nil), // 19: google.protobuf.DoubleValue
}
var file_opencensus_proto_metrics_v1_metrics_proto_depIdxs = []int32{
2, // 0: opencensus.proto.metrics.v1.Metric.metric_descriptor:type_name -> opencensus.proto.metrics.v1.MetricDescriptor

View File

@ -14,14 +14,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.12.3
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: opencensus/proto/resource/v1/resource.proto
package v1
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@ -35,10 +34,6 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Resource information.
type Resource struct {
state protoimpl.MessageState
@ -115,7 +110,7 @@ var file_opencensus_proto_resource_v1_resource_proto_rawDesc = []byte{
0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
0x02, 0x38, 0x01, 0x42, 0x98, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63,
0x02, 0x38, 0x01, 0x42, 0x9b, 0x01, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63,
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
@ -123,9 +118,9 @@ var file_opencensus_proto_resource_v1_resource_proto_rawDesc = []byte{
0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e,
0x2d, 0x67, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xea,
0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x50, 0x72, 0x6f,
0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x3a, 0x3a, 0x50, 0x72,
0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x3a, 0x56,
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@ -14,19 +14,18 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.12.3
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: opencensus/proto/trace/v1/trace.proto
package v1
import (
v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
wrappers "github.com/golang/protobuf/ptypes/wrappers"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
reflect "reflect"
sync "sync"
)
@ -38,10 +37,6 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// Type of span. Can be used to specify additional relationships between spans
// in addition to a parent/child relationship.
type Span_SpanKind int32
@ -272,7 +267,7 @@ type Span struct {
// keep end_time > start_time for consistency.
//
// This field is required.
StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
StartTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// The end time of the span. On the client side, this is the time kept by
// the local machine where the span execution ends. On the server side, this
// is the time when the server application handler stops running.
@ -282,7 +277,7 @@ type Span struct {
// keep end_time > start_time for consistency.
//
// This field is required.
EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
EndTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// A set of attributes on the span.
Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"`
// A stack trace captured at the start of the span.
@ -304,10 +299,10 @@ type Span struct {
// to the same process as the current span. This flag is most commonly
// used to indicate the need to adjust time as clocks in different
// processes may not be synchronized.
SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"`
SameProcessAsParentSpan *wrapperspb.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"`
// An optional number of child spans that were generated while this span
// was active. If set, allows an implementation to detect missing child spans.
ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"`
ChildSpanCount *wrapperspb.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"`
}
func (x *Span) Reset() {
@ -384,14 +379,14 @@ func (x *Span) GetKind() Span_SpanKind {
return Span_SPAN_KIND_UNSPECIFIED
}
func (x *Span) GetStartTime() *timestamp.Timestamp {
func (x *Span) GetStartTime() *timestamppb.Timestamp {
if x != nil {
return x.StartTime
}
return nil
}
func (x *Span) GetEndTime() *timestamp.Timestamp {
func (x *Span) GetEndTime() *timestamppb.Timestamp {
if x != nil {
return x.EndTime
}
@ -440,14 +435,14 @@ func (x *Span) GetResource() *v1.Resource {
return nil
}
func (x *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue {
func (x *Span) GetSameProcessAsParentSpan() *wrapperspb.BoolValue {
if x != nil {
return x.SameProcessAsParentSpan
}
return nil
}
func (x *Span) GetChildSpanCount() *wrappers.UInt32Value {
func (x *Span) GetChildSpanCount() *wrapperspb.UInt32Value {
if x != nil {
return x.ChildSpanCount
}
@ -952,7 +947,7 @@ type Span_TimeEvent struct {
unknownFields protoimpl.UnknownFields
// The time the event occurred.
Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
// A `TimeEvent` can contain either an `Annotation` object or a
// `MessageEvent` object, but not both.
//
@ -994,7 +989,7 @@ func (*Span_TimeEvent) Descriptor() ([]byte, []int) {
return file_opencensus_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0, 2}
}
func (x *Span_TimeEvent) GetTime() *timestamp.Timestamp {
func (x *Span_TimeEvent) GetTime() *timestamppb.Timestamp {
if x != nil {
return x.Time
}
@ -1899,16 +1894,16 @@ var file_opencensus_proto_trace_v1_trace_proto_rawDesc = []byte{
0x6c, 0x75, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64,
0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
0x05, 0x52, 0x12, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65,
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x8c, 0x01, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65,
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x8f, 0x01, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65,
0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f,
0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65,
0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73,
0x75, 0x73, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f,
0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x19, 0x4f, 0x70, 0x65, 0x6e, 0x43,
0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63,
0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x43,
0x65, 0x6e, 0x73, 0x75, 0x73, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x54, 0x72,
0x61, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -1947,10 +1942,10 @@ var file_opencensus_proto_trace_v1_trace_proto_goTypes = []interface{}{
(*Span_TimeEvent_MessageEvent)(nil), // 18: opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent
(*StackTrace_StackFrame)(nil), // 19: opencensus.proto.trace.v1.StackTrace.StackFrame
(*StackTrace_StackFrames)(nil), // 20: opencensus.proto.trace.v1.StackTrace.StackFrames
(*timestamp.Timestamp)(nil), // 21: google.protobuf.Timestamp
(*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp
(*v1.Resource)(nil), // 22: opencensus.proto.resource.v1.Resource
(*wrappers.BoolValue)(nil), // 23: google.protobuf.BoolValue
(*wrappers.UInt32Value)(nil), // 24: google.protobuf.UInt32Value
(*wrapperspb.BoolValue)(nil), // 23: google.protobuf.BoolValue
(*wrapperspb.UInt32Value)(nil), // 24: google.protobuf.UInt32Value
}
var file_opencensus_proto_trace_v1_trace_proto_depIdxs = []int32{
9, // 0: opencensus.proto.trace.v1.Span.tracestate:type_name -> opencensus.proto.trace.v1.Span.Tracestate

View File

@ -14,14 +14,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.25.0
// protoc v3.12.3
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: opencensus/proto/trace/v1/trace_config.proto
package v1
import (
proto "github.com/golang/protobuf/proto"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
@ -35,10 +34,6 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// This is a compile-time assertion that a sufficiently up-to-date version
// of the legacy proto package is being used.
const _ = proto.ProtoPackageIsVersion4
// How spans should be sampled:
// - Always off
// - Always on
@ -432,7 +427,7 @@ var file_opencensus_proto_trace_v1_trace_config_proto_rawDesc = []byte{
0x12, 0x11, 0x0a, 0x0d, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x50, 0x41, 0x52, 0x45, 0x4e,
0x54, 0x10, 0x02, 0x22, 0x27, 0x0a, 0x13, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74,
0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x71, 0x70,
0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x71, 0x70, 0x73, 0x42, 0x92, 0x01, 0x0a,
0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x71, 0x70, 0x73, 0x42, 0x95, 0x01, 0x0a,
0x1c, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54,
0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
@ -440,9 +435,9 @@ var file_opencensus_proto_trace_v1_trace_config_proto_rawDesc = []byte{
0x6e, 0x73, 0x75, 0x73, 0x2d, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2d,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x61,
0x63, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x19, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73,
0x75, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56,
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x63, 0x65, 0x2f, 0x76, 0x31, 0xea, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73,
0x75, 0x73, 0x3a, 0x3a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3a, 0x54, 0x72, 0x61, 0x63, 0x65,
0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@ -3,8 +3,7 @@
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
This implementation provides a fast pure-Go implementation and an even faster
assembly implementation for amd64.
The package is written with optimized pure Go and also contains even faster
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
opts into using the Go code even on those architectures.
[xxHash]: http://cyan4973.github.io/xxHash/
## Compatibility
@ -45,19 +47,20 @@ I recommend using the latest release of Go.
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
| input size | purego | asm |
| --- | --- | --- |
| 5 B | 979.66 MB/s | 1291.17 MB/s |
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
| input size | purego | asm |
| ---------- | --------- | --------- |
| 4 B | 1.3 GB/s | 1.2 GB/s |
| 16 B | 2.9 GB/s | 3.5 GB/s |
| 100 B | 6.9 GB/s | 8.1 GB/s |
| 4 KB | 11.7 GB/s | 16.7 GB/s |
| 10 MB | 12.0 GB/s | 17.3 GB/s |
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
the following commands under Go 1.11.2:
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
CPU using the following commands under Go 1.19.2:
```
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
```
## Projects using this package

10
vendor/github.com/cespare/xxhash/v2/testall.sh generated vendored Normal file
View File

@ -0,0 +1,10 @@
#!/bin/bash
set -eu -o pipefail
# Small convenience script for running the tests with various combinations of
# arch/tags. This assumes we're running on amd64 and have qemu available.
go test ./...
go test -tags purego ./...
GOARCH=arm64 go test
GOARCH=arm64 go test -tags purego

View File

@ -16,19 +16,11 @@ const (
prime5 uint64 = 2870177450012600261
)
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
// possible in the Go code is worth a small (but measurable) performance boost
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
// convenience in the Go code in a few places where we need to intentionally
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
// Store the primes in an array as well.
//
// The consts are used when possible in Go code to avoid MOVs but we need a
// contiguous array of the assembly code.
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
type Digest struct {
@ -50,10 +42,10 @@ func New() *Digest {
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
d.v1 = prime1v + prime2
d.v1 = primes[0] + prime2
d.v2 = prime2
d.v3 = 0
d.v4 = -prime1v
d.v4 = -primes[0]
d.total = 0
d.n = 0
}
@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
memleft := d.mem[d.n&(len(d.mem)-1):]
if d.n+n < 32 {
// This new data doesn't even fill the current block.
copy(d.mem[d.n:], b)
copy(memleft, b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
copy(d.mem[d.n:], b)
c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
b = b[32-d.n:]
b = b[c:]
d.n = 0
}
@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
h += d.total
i, end := 0, d.n
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(d.mem[i:i+8]))
b := d.mem[:d.n&(len(d.mem)-1)]
for ; len(b) >= 8; b = b[8:] {
k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(d.mem[i:i+4])) * prime1
if len(b) >= 4 {
h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
b = b[4:]
}
for i < end {
h ^= uint64(d.mem[i]) * prime5
for ; len(b) > 0; b = b[1:] {
h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
i++
}
h ^= h >> 33

View File

@ -1,215 +1,209 @@
//go:build !appengine && gc && !purego
// +build !appengine
// +build gc
// +build !purego
#include "textflag.h"
// Register allocation:
// AX h
// SI pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
// R9 v2
// R10 v3
// R11 v4
// R12 tmp
// R13 prime1v
// R14 prime2v
// DI prime4v
// Registers:
#define h AX
#define d AX
#define p SI // pointer to advance through b
#define n DX
#define end BX // loop end
#define v1 R8
#define v2 R9
#define v3 R10
#define v4 R11
#define x R12
#define prime1 R13
#define prime2 R14
#define prime4 DI
// round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
MOVQ (SI), R12 \
ADDQ $8, SI \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
#define round(acc, x) \
IMULQ prime2, x \
ADDQ x, acc \
ROLQ $31, acc \
IMULQ prime1, acc
// mergeRound applies a merge round on the two registers acc and val.
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
ADDQ DI, acc
// round0 performs the operation x = round(0, x).
#define round0(x) \
IMULQ prime2, x \
ROLQ $31, x \
IMULQ prime1, x
// mergeRound applies a merge round on the two registers acc and x.
// It assumes that prime1, prime2, and prime4 have been loaded.
#define mergeRound(acc, x) \
round0(x) \
XORQ x, acc \
IMULQ prime1, acc \
ADDQ prime4, acc
// blockLoop processes as many 32-byte blocks as possible,
// updating v1, v2, v3, and v4. It assumes that there is at least one block
// to process.
#define blockLoop() \
loop: \
MOVQ +0(p), x \
round(v1, x) \
MOVQ +8(p), x \
round(v2, x) \
MOVQ +16(p), x \
round(v3, x) \
MOVQ +24(p), x \
round(v4, x) \
ADDQ $32, p \
CMPQ p, end \
JLE loop
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·prime4v(SB), DI
MOVQ ·primes+0(SB), prime1
MOVQ ·primes+8(SB), prime2
MOVQ ·primes+24(SB), prime4
// Load slice.
MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX
LEAQ (SI)(DX*1), BX
MOVQ b_base+0(FP), p
MOVQ b_len+8(FP), n
LEAQ (p)(n*1), end
// The first loop limit will be len(b)-32.
SUBQ $32, BX
SUBQ $32, end
// Check whether we have at least one block.
CMPQ DX, $32
CMPQ n, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
MOVQ R13, R8
ADDQ R14, R8
MOVQ R14, R9
XORQ R10, R10
XORQ R11, R11
SUBQ R13, R11
MOVQ prime1, v1
ADDQ prime2, v1
MOVQ prime2, v2
XORQ v3, v3
XORQ v4, v4
SUBQ prime1, v4
// Loop until SI > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
blockLoop()
CMPQ SI, BX
JLE blockLoop
MOVQ v1, h
ROLQ $1, h
MOVQ v2, x
ROLQ $7, x
ADDQ x, h
MOVQ v3, x
ROLQ $12, x
ADDQ x, h
MOVQ v4, x
ROLQ $18, x
ADDQ x, h
MOVQ R8, AX
ROLQ $1, AX
MOVQ R9, R12
ROLQ $7, R12
ADDQ R12, AX
MOVQ R10, R12
ROLQ $12, R12
ADDQ R12, AX
MOVQ R11, R12
ROLQ $18, R12
ADDQ R12, AX
mergeRound(AX, R8)
mergeRound(AX, R9)
mergeRound(AX, R10)
mergeRound(AX, R11)
mergeRound(h, v1)
mergeRound(h, v2)
mergeRound(h, v3)
mergeRound(h, v4)
JMP afterBlocks
noBlocks:
MOVQ ·prime5v(SB), AX
MOVQ ·primes+32(SB), h
afterBlocks:
ADDQ DX, AX
ADDQ n, h
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX
ADDQ $24, end
CMPQ p, end
JG try4
CMPQ SI, BX
JG fourByte
loop8:
MOVQ (p), x
ADDQ $8, p
round0(x)
XORQ x, h
ROLQ $27, h
IMULQ prime1, h
ADDQ prime4, h
wordLoop:
// Calculate k1.
MOVQ (SI), R8
ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
CMPQ p, end
JLE loop8
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
ADDQ DI, AX
try4:
ADDQ $4, end
CMPQ p, end
JG try1
CMPQ SI, BX
JLE wordLoop
MOVL (p), x
ADDQ $4, p
IMULQ prime1, x
XORQ x, h
fourByte:
ADDQ $4, BX
CMPQ SI, BX
JG singles
ROLQ $23, h
IMULQ prime2, h
ADDQ ·primes+16(SB), h
MOVL (SI), R8
ADDQ $4, SI
IMULQ R13, R8
XORQ R8, AX
ROLQ $23, AX
IMULQ R14, AX
ADDQ ·prime3v(SB), AX
singles:
ADDQ $4, BX
CMPQ SI, BX
try1:
ADDQ $4, end
CMPQ p, end
JGE finalize
singlesLoop:
MOVBQZX (SI), R12
ADDQ $1, SI
IMULQ ·prime5v(SB), R12
XORQ R12, AX
loop1:
MOVBQZX (p), x
ADDQ $1, p
IMULQ ·primes+32(SB), x
XORQ x, h
ROLQ $11, h
IMULQ prime1, h
ROLQ $11, AX
IMULQ R13, AX
CMPQ SI, BX
JL singlesLoop
CMPQ p, end
JL loop1
finalize:
MOVQ AX, R12
SHRQ $33, R12
XORQ R12, AX
IMULQ R14, AX
MOVQ AX, R12
SHRQ $29, R12
XORQ R12, AX
IMULQ ·prime3v(SB), AX
MOVQ AX, R12
SHRQ $32, R12
XORQ R12, AX
MOVQ h, x
SHRQ $33, x
XORQ x, h
IMULQ prime2, h
MOVQ h, x
SHRQ $29, x
XORQ x, h
IMULQ ·primes+16(SB), h
MOVQ h, x
SHRQ $32, x
XORQ x, h
MOVQ AX, ret+24(FP)
MOVQ h, ret+24(FP)
RET
// writeBlocks uses the same registers as above except that it uses AX to store
// the d pointer.
// func writeBlocks(d *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·primes+0(SB), prime1
MOVQ ·primes+8(SB), prime2
// Load slice.
MOVQ b_base+8(FP), SI
MOVQ b_len+16(FP), DX
LEAQ (SI)(DX*1), BX
SUBQ $32, BX
MOVQ b_base+8(FP), p
MOVQ b_len+16(FP), n
LEAQ (p)(n*1), end
SUBQ $32, end
// Load vN from d.
MOVQ d+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
MOVQ 24(AX), R11 // v4
MOVQ s+0(FP), d
MOVQ 0(d), v1
MOVQ 8(d), v2
MOVQ 16(d), v3
MOVQ 24(d), v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX
JLE blockLoop
blockLoop()
// Copy vN back to d.
MOVQ R8, 0(AX)
MOVQ R9, 8(AX)
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
MOVQ v1, 0(d)
MOVQ v2, 8(d)
MOVQ v3, 16(d)
MOVQ v4, 24(d)
// The number of bytes written is SI minus the old base pointer.
SUBQ b_base+8(FP), SI
MOVQ SI, ret+32(FP)
// The number of bytes written is p minus the old base pointer.
SUBQ b_base+8(FP), p
MOVQ p, ret+32(FP)
RET

183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s generated vendored Normal file
View File

@ -0,0 +1,183 @@
//go:build !appengine && gc && !purego
// +build !appengine
// +build gc
// +build !purego
#include "textflag.h"
// Registers:
#define digest R1
#define h R2 // return value
#define p R3 // input pointer
#define n R4 // input length
#define nblocks R5 // n / 32
#define prime1 R7
#define prime2 R8
#define prime3 R9
#define prime4 R10
#define prime5 R11
#define v1 R12
#define v2 R13
#define v3 R14
#define v4 R15
#define x1 R20
#define x2 R21
#define x3 R22
#define x4 R23
#define round(acc, x) \
MADD prime2, acc, x, acc \
ROR $64-31, acc \
MUL prime1, acc
// round0 performs the operation x = round(0, x).
#define round0(x) \
MUL prime2, x \
ROR $64-31, x \
MUL prime1, x
#define mergeRound(acc, x) \
round0(x) \
EOR x, acc \
MADD acc, prime4, prime1, acc
// blockLoop processes as many 32-byte blocks as possible,
// updating v1, v2, v3, and v4. It assumes that n >= 32.
#define blockLoop() \
LSR $5, n, nblocks \
PCALIGN $16 \
loop: \
LDP.P 16(p), (x1, x2) \
LDP.P 16(p), (x3, x4) \
round(v1, x1) \
round(v2, x2) \
round(v3, x3) \
round(v4, x4) \
SUB $1, nblocks \
CBNZ nblocks, loop
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
LDP b_base+0(FP), (p, n)
LDP ·primes+0(SB), (prime1, prime2)
LDP ·primes+16(SB), (prime3, prime4)
MOVD ·primes+32(SB), prime5
CMP $32, n
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
BLT afterLoop
ADD prime1, prime2, v1
MOVD prime2, v2
MOVD $0, v3
NEG prime1, v4
blockLoop()
ROR $64-1, v1, x1
ROR $64-7, v2, x2
ADD x1, x2
ROR $64-12, v3, x3
ROR $64-18, v4, x4
ADD x3, x4
ADD x2, x4, h
mergeRound(h, v1)
mergeRound(h, v2)
mergeRound(h, v3)
mergeRound(h, v4)
afterLoop:
ADD n, h
TBZ $4, n, try8
LDP.P 16(p), (x1, x2)
round0(x1)
// NOTE: here and below, sequencing the EOR after the ROR (using a
// rotated register) is worth a small but measurable speedup for small
// inputs.
ROR $64-27, h
EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h
round0(x2)
ROR $64-27, h
EOR x2 @> 64-27, h, h
MADD h, prime4, prime1, h
try8:
TBZ $3, n, try4
MOVD.P 8(p), x1
round0(x1)
ROR $64-27, h
EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h
try4:
TBZ $2, n, try2
MOVWU.P 4(p), x2
MUL prime1, x2
ROR $64-23, h
EOR x2 @> 64-23, h, h
MADD h, prime3, prime2, h
try2:
TBZ $1, n, try1
MOVHU.P 2(p), x3
AND $255, x3, x1
LSR $8, x3, x2
MUL prime5, x1
ROR $64-11, h
EOR x1 @> 64-11, h, h
MUL prime1, h
MUL prime5, x2
ROR $64-11, h
EOR x2 @> 64-11, h, h
MUL prime1, h
try1:
TBZ $0, n, finalize
MOVBU (p), x4
MUL prime5, x4
ROR $64-11, h
EOR x4 @> 64-11, h, h
MUL prime1, h
finalize:
EOR h >> 33, h
MUL prime2, h
EOR h >> 29, h
MUL prime3, h
EOR h >> 32, h
MOVD h, ret+24(FP)
RET
// func writeBlocks(d *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
LDP ·primes+0(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously.
MOVD d+0(FP), digest
LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4)
LDP b_base+8(FP), (p, n)
blockLoop()
// Store updated state.
STP (v1, v2), 0(digest)
STP (v3, v4), 16(digest)
BIC $31, n
MOVD n, ret+32(FP)
RET

View File

@ -1,3 +1,5 @@
//go:build (amd64 || arm64) && !appengine && gc && !purego
// +build amd64 arm64
// +build !appengine
// +build gc
// +build !purego

View File

@ -1,4 +1,5 @@
// +build !amd64 appengine !gc purego
//go:build (!amd64 && !arm64) || appengine || !gc || purego
// +build !amd64,!arm64 appengine !gc purego
package xxhash
@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
var h uint64
if n >= 32 {
v1 := prime1v + prime2
v1 := primes[0] + prime2
v2 := prime2
v3 := uint64(0)
v4 := -prime1v
v4 := -primes[0]
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
h += uint64(n)
i, end := 0, len(b)
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(b[i:i+8:len(b)]))
for ; len(b) >= 8; b = b[8:] {
k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
if len(b) >= 4 {
h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
b = b[4:]
}
for ; i < end; i++ {
h ^= uint64(b[i]) * prime5
for ; len(b) > 0; b = b[1:] {
h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
}

View File

@ -1,3 +1,4 @@
//go:build appengine
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.

View File

@ -1,3 +1,4 @@
//go:build !appengine
// +build !appengine
// This file encapsulates usage of unsafe.
@ -11,7 +12,7 @@ import (
// In the future it's possible that compiler optimizations will make these
// XxxString functions unnecessary by realizing that calls such as
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.

View File

@ -1,180 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package descriptor provides functions for obtaining the protocol buffer
// descriptors of generated Go types.
//
// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package
// for how to obtain an EnumDescriptor or MessageDescriptor in order to
// programatically interact with the protobuf type system.
package descriptor
import (
"bytes"
"compress/gzip"
"io/ioutil"
"sync"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoimpl"
descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
)
// Message is proto.Message with a method to return its descriptor.
//
// Deprecated: The Descriptor method may not be generated by future
// versions of protoc-gen-go, meaning that this interface may not
// be implemented by many concrete message types.
type Message interface {
proto.Message
Descriptor() ([]byte, []int)
}
// ForMessage returns the file descriptor proto containing
// the message and the message descriptor proto for the message itself.
// The returned proto messages must not be mutated.
//
// Deprecated: Not all concrete message types satisfy the Message interface.
// Use MessageDescriptorProto instead. If possible, the calling code should
// be rewritten to use protobuf reflection instead.
// See package "google.golang.org/protobuf/reflect/protoreflect" for details.
func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
return MessageDescriptorProto(m)
}
type rawDesc struct {
fileDesc []byte
indexes []int
}
var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc
func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) {
// Fast-path: check whether raw descriptors are already cached.
origDesc := d
if v, ok := rawDescCache.Load(origDesc); ok {
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
}
// Slow-path: derive the raw descriptor from the v2 descriptor.
// Start with the leaf (a given enum or message declaration) and
// ascend upwards until we hit the parent file descriptor.
var idxs []int
for {
idxs = append(idxs, d.Index())
d = d.Parent()
if d == nil {
// TODO: We could construct a FileDescriptor stub for standalone
// descriptors to satisfy the API.
return nil, nil
}
if _, ok := d.(protoreflect.FileDescriptor); ok {
break
}
}
// Obtain the raw file descriptor.
fd := d.(protoreflect.FileDescriptor)
b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd))
file := protoimpl.X.CompressGZIP(b)
// Reverse the indexes, since we populated it in reverse.
for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 {
idxs[i], idxs[j] = idxs[j], idxs[i]
}
if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok {
return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
}
return file, idxs
}
// EnumRawDescriptor returns the GZIP'd raw file descriptor representing
// the enum and the index path to reach the enum declaration.
// The returned slices must not be mutated.
func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) {
if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok {
return ev.EnumDescriptor()
}
ed := protoimpl.X.EnumTypeOf(e)
return deriveRawDescriptor(ed.Descriptor())
}
// MessageRawDescriptor returns the GZIP'd raw file descriptor representing
// the message and the index path to reach the message declaration.
// The returned slices must not be mutated.
func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) {
if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok {
return mv.Descriptor()
}
md := protoimpl.X.MessageTypeOf(m)
return deriveRawDescriptor(md.Descriptor())
}
var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto
func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto {
// Fast-path: check whether descriptor protos are already cached.
if v, ok := fileDescCache.Load(&rawDesc[0]); ok {
return v.(*descriptorpb.FileDescriptorProto)
}
// Slow-path: derive the descriptor proto from the GZIP'd message.
zr, err := gzip.NewReader(bytes.NewReader(rawDesc))
if err != nil {
panic(err)
}
b, err := ioutil.ReadAll(zr)
if err != nil {
panic(err)
}
fd := new(descriptorpb.FileDescriptorProto)
if err := proto.Unmarshal(b, fd); err != nil {
panic(err)
}
if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok {
return v.(*descriptorpb.FileDescriptorProto)
}
return fd
}
// EnumDescriptorProto returns the file descriptor proto representing
// the enum and the enum descriptor proto for the enum itself.
// The returned proto messages must not be mutated.
func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) {
rawDesc, idxs := EnumRawDescriptor(e)
if rawDesc == nil || idxs == nil {
return nil, nil
}
fd := deriveFileDescriptor(rawDesc)
if len(idxs) == 1 {
return fd, fd.EnumType[idxs[0]]
}
md := fd.MessageType[idxs[0]]
for _, i := range idxs[1 : len(idxs)-1] {
md = md.NestedType[i]
}
ed := md.EnumType[idxs[len(idxs)-1]]
return fd, ed
}
// MessageDescriptorProto returns the file descriptor proto representing
// the message and the message descriptor proto for the message itself.
// The returned proto messages must not be mutated.
func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
rawDesc, idxs := MessageRawDescriptor(m)
if rawDesc == nil || idxs == nil {
return nil, nil
}
fd := deriveFileDescriptor(rawDesc)
md := fd.MessageType[idxs[0]]
for _, i := range idxs[1:] {
md = md.NestedType[i]
}
return fd, md
}

View File

@ -386,8 +386,14 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error
}
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
if fd.Cardinality() == protoreflect.Repeated {
return false
}
if md := fd.Message(); md != nil {
return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated
return md.FullName() == "google.protobuf.Value"
}
if ed := fd.Enum(); ed != nil {
return ed.FullName() == "google.protobuf.NullValue"
}
return false
}

View File

@ -1,200 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
package descriptor
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/descriptor.proto.
type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type
const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE
const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT
const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64
const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64
const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32
const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64
const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32
const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL
const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING
const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP
const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE
const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES
const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32
const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM
const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32
const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64
const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32
const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64
var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name
var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value
type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label
const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL
const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED
const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED
var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name
var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value
type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode
const FileOptions_SPEED = descriptorpb.FileOptions_SPEED
const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE
const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME
var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name
var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value
type FieldOptions_CType = descriptorpb.FieldOptions_CType
const FieldOptions_STRING = descriptorpb.FieldOptions_STRING
const FieldOptions_CORD = descriptorpb.FieldOptions_CORD
const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE
var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name
var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value
type FieldOptions_JSType = descriptorpb.FieldOptions_JSType
const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL
const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING
const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER
var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name
var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value
type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel
const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN
const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS
const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT
var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name
var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value
type FileDescriptorSet = descriptorpb.FileDescriptorSet
type FileDescriptorProto = descriptorpb.FileDescriptorProto
type DescriptorProto = descriptorpb.DescriptorProto
type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions
type FieldDescriptorProto = descriptorpb.FieldDescriptorProto
type OneofDescriptorProto = descriptorpb.OneofDescriptorProto
type EnumDescriptorProto = descriptorpb.EnumDescriptorProto
type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto
type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto
type MethodDescriptorProto = descriptorpb.MethodDescriptorProto
const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming
const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming
type FileOptions = descriptorpb.FileOptions
const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles
const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8
const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor
const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices
const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices
const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices
const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices
const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated
const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas
type MessageOptions = descriptorpb.MessageOptions
const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat
const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor
const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated
type FieldOptions = descriptorpb.FieldOptions
const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype
const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype
const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy
const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated
const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak
type OneofOptions = descriptorpb.OneofOptions
type EnumOptions = descriptorpb.EnumOptions
const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated
type EnumValueOptions = descriptorpb.EnumValueOptions
const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated
type ServiceOptions = descriptorpb.ServiceOptions
const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated
type MethodOptions = descriptorpb.MethodOptions
const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated
const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel
type UninterpretedOption = descriptorpb.UninterpretedOption
type SourceCodeInfo = descriptorpb.SourceCodeInfo
type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo
type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange
type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange
type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange
type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart
type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location
type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation
var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{
0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65,
0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b,
0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x32,
}
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() }
func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() {
if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil
file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil
}

View File

@ -1,71 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
package wrappers
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/wrappers.proto.
type DoubleValue = wrapperspb.DoubleValue
type FloatValue = wrapperspb.FloatValue
type Int64Value = wrapperspb.Int64Value
type UInt64Value = wrapperspb.UInt64Value
type Int32Value = wrapperspb.Int32Value
type UInt32Value = wrapperspb.UInt32Value
type BoolValue = wrapperspb.BoolValue
type StringValue = wrapperspb.StringValue
type BytesValue = wrapperspb.BytesValue
var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61,
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() }
func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() {
if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil
}

View File

@ -1,189 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: internal/errors.proto
package internal
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Error is the generic error returned from unary RPCs.
type Error struct {
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
// This is to make the error more compatible with users that expect errors to be Status objects:
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
// It should be the exact same message as the Error field.
Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Error) Reset() { *m = Error{} }
func (m *Error) String() string { return proto.CompactTextString(m) }
func (*Error) ProtoMessage() {}
func (*Error) Descriptor() ([]byte, []int) {
return fileDescriptor_9b093362ca6d1e03, []int{0}
}
func (m *Error) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Error.Unmarshal(m, b)
}
func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Error.Marshal(b, m, deterministic)
}
func (m *Error) XXX_Merge(src proto.Message) {
xxx_messageInfo_Error.Merge(m, src)
}
func (m *Error) XXX_Size() int {
return xxx_messageInfo_Error.Size(m)
}
func (m *Error) XXX_DiscardUnknown() {
xxx_messageInfo_Error.DiscardUnknown(m)
}
var xxx_messageInfo_Error proto.InternalMessageInfo
func (m *Error) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *Error) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Error) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *Error) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
return nil
}
// StreamError is a response type which is returned when
// streaming rpc returns an error.
type StreamError struct {
GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StreamError) Reset() { *m = StreamError{} }
func (m *StreamError) String() string { return proto.CompactTextString(m) }
func (*StreamError) ProtoMessage() {}
func (*StreamError) Descriptor() ([]byte, []int) {
return fileDescriptor_9b093362ca6d1e03, []int{1}
}
func (m *StreamError) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StreamError.Unmarshal(m, b)
}
func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
}
func (m *StreamError) XXX_Merge(src proto.Message) {
xxx_messageInfo_StreamError.Merge(m, src)
}
func (m *StreamError) XXX_Size() int {
return xxx_messageInfo_StreamError.Size(m)
}
func (m *StreamError) XXX_DiscardUnknown() {
xxx_messageInfo_StreamError.DiscardUnknown(m)
}
var xxx_messageInfo_StreamError proto.InternalMessageInfo
func (m *StreamError) GetGrpcCode() int32 {
if m != nil {
return m.GrpcCode
}
return 0
}
func (m *StreamError) GetHttpCode() int32 {
if m != nil {
return m.HttpCode
}
return 0
}
func (m *StreamError) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *StreamError) GetHttpStatus() string {
if m != nil {
return m.HttpStatus
}
return ""
}
func (m *StreamError) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error")
proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
}
func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) }
var fileDescriptor_9b093362ca6d1e03 = []byte{
// 252 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30,
0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52,
0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24,
0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c,
0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2,
0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1,
0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b,
0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c,
0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b,
0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72,
0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3,
0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6,
0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7,
0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae,
0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03,
0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00,
}

View File

@ -1,26 +0,0 @@
syntax = "proto3";
package grpc.gateway.runtime;
option go_package = "internal";
import "google/protobuf/any.proto";
// Error is the generic error returned from unary RPCs.
message Error {
string error = 1;
// This is to make the error more compatible with users that expect errors to be Status objects:
// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
// It should be the exact same message as the Error field.
int32 code = 2;
string message = 3;
repeated google.protobuf.Any details = 4;
}
// StreamError is a response type which is returned when
// streaming rpc returns an error.
message StreamError {
int32 grpc_code = 1;
int32 http_code = 2;
string message = 3;
string http_status = 4;
repeated google.protobuf.Any details = 5;
}

View File

@ -1,186 +0,0 @@
package runtime
import (
"context"
"io"
"net/http"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/internal"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
func HTTPStatusFromCode(code codes.Code) int {
switch code {
case codes.OK:
return http.StatusOK
case codes.Canceled:
return http.StatusRequestTimeout
case codes.Unknown:
return http.StatusInternalServerError
case codes.InvalidArgument:
return http.StatusBadRequest
case codes.DeadlineExceeded:
return http.StatusGatewayTimeout
case codes.NotFound:
return http.StatusNotFound
case codes.AlreadyExists:
return http.StatusConflict
case codes.PermissionDenied:
return http.StatusForbidden
case codes.Unauthenticated:
return http.StatusUnauthorized
case codes.ResourceExhausted:
return http.StatusTooManyRequests
case codes.FailedPrecondition:
// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
return http.StatusBadRequest
case codes.Aborted:
return http.StatusConflict
case codes.OutOfRange:
return http.StatusBadRequest
case codes.Unimplemented:
return http.StatusNotImplemented
case codes.Internal:
return http.StatusInternalServerError
case codes.Unavailable:
return http.StatusServiceUnavailable
case codes.DataLoss:
return http.StatusInternalServerError
}
grpclog.Infof("Unknown gRPC error code: %v", code)
return http.StatusInternalServerError
}
var (
// HTTPError replies to the request with an error.
//
// HTTPError is called:
// - From generated per-endpoint gateway handler code, when calling the backend results in an error.
// - From gateway runtime code, when forwarding the response message results in an error.
//
// The default value for HTTPError calls the custom error handler configured on the ServeMux via the
// WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise.
//
// To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler
// serve option.
//
// To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve
// option, set GlobalHTTPErrorHandler to a custom function.
//
// Setting this variable directly to customize error format is deprecated.
HTTPError = MuxOrGlobalHTTPError
// GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the
// WithProtoErrorHandler serve option.
//
// You can set a custom function to this variable to customize error format.
GlobalHTTPErrorHandler = DefaultHTTPError
// OtherErrorHandler handles gateway errors from parsing and routing client requests for all
// ServeMux instances not using the WithProtoErrorHandler serve option.
//
// It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
//
// To customize parsing and routing error handling of a particular ServeMux instance, use the
// WithProtoErrorHandler serve option.
//
// To customize parsing and routing error handling of all ServeMux instances not using the
// WithProtoErrorHandler serve option, set a custom function to this variable.
OtherErrorHandler = DefaultOtherErrorHandler
)
// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler.
func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
if mux.protoErrorHandler != nil {
mux.protoErrorHandler(ctx, mux, marshaler, w, r, err)
} else {
GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err)
}
}
// DefaultHTTPError is the default implementation of HTTPError.
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body returned by this function is a JSON object,
// which contains a member whose key is "error" and whose value is err.Error().
func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
const fallback = `{"error": "failed to marshal error message"}`
s, ok := status.FromError(err)
if !ok {
s = status.New(codes.Unknown, err.Error())
}
w.Header().Del("Trailer")
w.Header().Del("Transfer-Encoding")
contentType := marshaler.ContentType()
// Check marshaler on run time in order to keep backwards compatibility
// An interface param needs to be added to the ContentType() function on
// the Marshal interface to be able to remove this check
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
pb := s.Proto()
contentType = typeMarshaler.ContentTypeFromMessage(pb)
}
w.Header().Set("Content-Type", contentType)
body := &internal.Error{
Error: s.Message(),
Message: s.Message(),
Code: int32(s.Code()),
Details: s.Proto().GetDetails(),
}
buf, merr := marshaler.Marshal(body)
if merr != nil {
grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallback); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
// Unless the request includes a TE header field indicating "trailers"
// is acceptable, as described in Section 4.3, a server SHOULD NOT
// generate trailer fields that it believes are necessary for the user
// agent to receive.
var wantsTrailers bool
if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") {
wantsTrailers = true
handleForwardResponseTrailerHeader(w, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
st := HTTPStatusFromCode(s.Code())
w.WriteHeader(st)
if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
if wantsTrailers {
handleForwardResponseTrailer(w, md)
}
}
// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
// It simply writes a string representation of the given error into "w".
func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
http.Error(w, msg, code)
}

View File

@ -1,89 +0,0 @@
package runtime
import (
"encoding/json"
"io"
"strings"
descriptor2 "github.com/golang/protobuf/descriptor"
"github.com/golang/protobuf/protoc-gen-go/descriptor"
"google.golang.org/genproto/protobuf/field_mask"
)
func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) {
// TODO - should really gate this with a test that the marshaller has used json names
if md != nil {
for _, f := range md.Field {
if f.JsonName != nil && f.Name != nil && *f.JsonName == name {
var subType *descriptor.DescriptorProto
// If the field has a TypeName then we retrieve the nested type for translating the embedded message names.
if f.TypeName != nil {
typeSplit := strings.Split(*f.TypeName, ".")
typeName := typeSplit[len(typeSplit)-1]
for _, t := range md.NestedType {
if typeName == *t.Name {
subType = t
}
}
}
return *f.Name, subType
}
}
}
return name, nil
}
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) {
fm := &field_mask.FieldMask{}
var root interface{}
if err := json.NewDecoder(r).Decode(&root); err != nil {
if err == io.EOF {
return fm, nil
}
return nil, err
}
queue := []fieldMaskPathItem{{node: root, md: md}}
for len(queue) > 0 {
// dequeue an item
item := queue[0]
queue = queue[1:]
if m, ok := item.node.(map[string]interface{}); ok {
// if the item is an object, then enqueue all of its children
for k, v := range m {
protoName, subMd := translateName(k, item.md)
if subMsg, ok := v.(descriptor2.Message); ok {
_, subMd = descriptor2.ForMessage(subMsg)
}
var path string
if item.path == "" {
path = protoName
} else {
path = item.path + "." + protoName
}
queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd})
}
} else if len(item.path) > 0 {
// otherwise, it's a leaf node so print its path
fm.Paths = append(fm.Paths, item.path)
}
}
return fm, nil
}
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
type fieldMaskPathItem struct {
// the list of prior fields leading up to node connected by dots
path string
// a generic decoded json object the current item to inspect for further path extraction
node interface{}
// descriptor for parent message
md *descriptor.DescriptorProto
}

View File

@ -1,300 +0,0 @@
package runtime
import (
"context"
"fmt"
"net/http"
"net/textproto"
"strings"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when
// a request is received with a URI path that does not match any registered
// service method.
//
// Since gRPC servers return an "Unimplemented" code for requests with an
// unrecognized URI path, this error also has a gRPC "Unimplemented" code.
var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
// ServeMux is a request multiplexer for grpc-gateway.
// It matches http requests to patterns and invokes the corresponding handler.
type ServeMux struct {
// handlers maps HTTP method to a list of handlers.
handlers map[string][]handler
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
marshalers marshalerRegistry
incomingHeaderMatcher HeaderMatcherFunc
outgoingHeaderMatcher HeaderMatcherFunc
metadataAnnotators []func(context.Context, *http.Request) metadata.MD
streamErrorHandler StreamErrorHandlerFunc
protoErrorHandler ProtoErrorHandlerFunc
disablePathLengthFallback bool
lastMatchWins bool
}
// ServeMuxOption is an option that can be given to a ServeMux on construction.
type ServeMuxOption func(*ServeMux)
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
//
// forwardResponseOption is an option that will be called on the relevant context.Context,
// http.ResponseWriter, and proto.Message before every forwarded response.
//
// The message may be nil in the case where just a header is being sent.
func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
}
}
// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
// Configuring this will mean the generated swagger output is no longer correct, and it should be
// done with careful consideration.
func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
return func(serveMux *ServeMux) {
currentQueryParser = queryParameterParser
}
}
// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
type HeaderMatcherFunc func(string) (string, bool)
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
func DefaultHeaderMatcher(key string) (string, bool) {
key = textproto.CanonicalMIMEHeaderKey(key)
if isPermanentHTTPHeader(key) {
return MetadataPrefix + key, true
} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
return key[len(MetadataHeaderPrefix):], true
}
return "", false
}
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
//
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
return func(mux *ServeMux) {
mux.incomingHeaderMatcher = fn
}
}
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
//
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
// passed to http response returned from gateway. To transform the header before passing to response,
// matcher should return modified header.
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
return func(mux *ServeMux) {
mux.outgoingHeaderMatcher = fn
}
}
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
//
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
// is reading token from cookie and adding it in gRPC context.
func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
}
}
// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler.
//
// This can be used to handle an error as general proto message defined by gRPC.
// When this option is used, the mux uses the configured error handler instead of HTTPError and
// OtherErrorHandler.
func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.protoErrorHandler = fn
}
}
// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
func WithDisablePathLengthFallback() ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.disablePathLengthFallback = true
}
}
// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
// error handler, which allows for customizing the error trailer for server-streaming
// calls.
//
// For stream errors that occur before any response has been written, the mux's
// ProtoErrorHandler will be invoked. However, once data has been written, the errors must
// be handled differently: they must be included in the response body. The response body's
// final message will include the error details returned by the stream error handler.
func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.streamErrorHandler = fn
}
}
// WithLastMatchWins returns a ServeMuxOption that will enable "last
// match wins" behavior, where if multiple path patterns match a
// request path, the last one defined in the .proto file will be used.
func WithLastMatchWins() ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.lastMatchWins = true
}
}
// NewServeMux returns a new ServeMux whose internal mapping is empty.
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
serveMux := &ServeMux{
handlers: make(map[string][]handler),
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
marshalers: makeMarshalerMIMERegistry(),
streamErrorHandler: DefaultHTTPStreamErrorHandler,
}
for _, opt := range opts {
opt(serveMux)
}
if serveMux.incomingHeaderMatcher == nil {
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
}
if serveMux.outgoingHeaderMatcher == nil {
serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
}
}
return serveMux
}
// Handle associates "h" to the pair of HTTP method and path pattern.
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
if s.lastMatchWins {
s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...)
} else {
s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
}
}
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
path := r.URL.Path
if !strings.HasPrefix(path, "/") {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
}
return
}
components := strings.Split(path[1:], "/")
l := len(components)
var verb string
if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
return
} else if idx > 0 {
c := components[l-1]
components[l-1], verb = c[:idx], c[idx+1:]
}
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
r.Method = strings.ToUpper(override)
if err := r.ParseForm(); err != nil {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error())
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
} else {
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
}
return
}
}
for _, h := range s.handlers[r.Method] {
pathParams, err := h.pat.Match(components, verb)
if err != nil {
continue
}
h.h(w, r, pathParams)
return
}
// lookup other methods to handle fallback from GET to POST and
// to determine if it is MethodNotAllowed or NotFound.
for m, handlers := range s.handlers {
if m == r.Method {
continue
}
for _, h := range handlers {
pathParams, err := h.pat.Match(components, verb)
if err != nil {
continue
}
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
if s.isPathLengthFallback(r) {
if err := r.ParseForm(); err != nil {
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error())
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
} else {
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
}
return
}
h.h(w, r, pathParams)
return
}
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
}
return
}
}
if s.protoErrorHandler != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
} else {
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
}
}
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
return s.forwardResponseOptions
}
func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
}
type handler struct {
pat Pattern
h HandlerFunc
}

View File

@ -1,106 +0,0 @@
package runtime
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/ptypes/any"
"github.com/grpc-ecosystem/grpc-gateway/internal"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a
// a proto struct used to represent error at the end of a stream.
type StreamErrorHandlerFunc func(context.Context, error) *StreamError
// StreamError is the payload for the final message in a server stream in the event that the server returns an
// error after a response message has already been sent.
type StreamError internal.StreamError
// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body returned by this function is a Status message marshaled by a Marshaler.
//
// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
// return Internal when Marshal failed
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
s, ok := status.FromError(err)
if !ok {
s = status.New(codes.Unknown, err.Error())
}
w.Header().Del("Trailer")
contentType := marshaler.ContentType()
// Check marshaler on run time in order to keep backwards compatibility
// An interface param needs to be added to the ContentType() function on
// the Marshal interface to be able to remove this check
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
pb := s.Proto()
contentType = typeMarshaler.ContentTypeFromMessage(pb)
}
w.Header().Set("Content-Type", contentType)
buf, merr := marshaler.Marshal(s.Proto())
if merr != nil {
grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallback); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
handleForwardResponseTrailerHeader(w, md)
st := HTTPStatusFromCode(s.Code())
w.WriteHeader(st)
if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
handleForwardResponseTrailer(w, md)
}
// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via
// default logic.
//
// It extracts the gRPC status from err if possible. The fields of the status are
// used to populate the returned StreamError, and the HTTP status code is derived
// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a
// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code.
func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError {
grpcCode := codes.Unknown
grpcMessage := err.Error()
var grpcDetails []*any.Any
if s, ok := status.FromError(err); ok {
grpcCode = s.Code()
grpcMessage = s.Message()
grpcDetails = s.Proto().GetDetails()
}
httpCode := HTTPStatusFromCode(grpcCode)
return &StreamError{
GrpcCode: int32(grpcCode),
HttpCode: int32(httpCode),
Message: grpcMessage,
HttpStatus: http.StatusText(httpCode),
Details: grpcDetails,
}
}

View File

@ -1,406 +0,0 @@
package runtime
import (
"encoding/base64"
"fmt"
"net/url"
"reflect"
"regexp"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc/grpclog"
)
var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$")
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
// QueryParameterParser defines interface for all query parameter parsers
type QueryParameterParser interface {
Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
}
// PopulateQueryParameters parses query parameters
// into "msg" using current query parser
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
return currentQueryParser.Parse(msg, values, filter)
}
type defaultQueryParser struct{}
// Parse populates "values" into "msg".
// A value is ignored if its key starts with one of the elements in "filter".
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
for key, values := range values {
match := valuesKeyRegexp.FindStringSubmatch(key)
if len(match) == 3 {
key = match[1]
values = append([]string{match[2]}, values...)
}
fieldPath := strings.Split(key, ".")
if filter.HasCommonPrefix(fieldPath) {
continue
}
if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
return err
}
}
return nil
}
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
// It instantiates missing protobuf fields as it goes.
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
fieldPath := strings.Split(fieldPathString, ".")
return populateFieldValueFromPath(msg, fieldPath, []string{value})
}
func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
m := reflect.ValueOf(msg)
if m.Kind() != reflect.Ptr {
return fmt.Errorf("unexpected type %T: %v", msg, msg)
}
var props *proto.Properties
m = m.Elem()
for i, fieldName := range fieldPath {
isLast := i == len(fieldPath)-1
if !isLast && m.Kind() != reflect.Struct {
return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
}
var f reflect.Value
var err error
f, props, err = fieldByProtoName(m, fieldName)
if err != nil {
return err
} else if !f.IsValid() {
grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
return nil
}
switch f.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
if !isLast {
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
}
m = f
case reflect.Slice:
if !isLast {
return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
}
// Handle []byte
if f.Type().Elem().Kind() == reflect.Uint8 {
m = f
break
}
return populateRepeatedField(f, values, props)
case reflect.Ptr:
if f.IsNil() {
m = reflect.New(f.Type().Elem())
f.Set(m.Convert(f.Type()))
}
m = f.Elem()
continue
case reflect.Struct:
m = f
continue
case reflect.Map:
if !isLast {
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
}
return populateMapField(f, values, props)
default:
return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
}
}
switch len(values) {
case 0:
return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
case 1:
default:
grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
}
return populateField(m, values[0], props)
}
// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
// "m" must be a struct value. It returns zero reflect.Value if no such field found.
func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
props := proto.GetProperties(m.Type())
// look up field name in oneof map
for _, op := range props.OneofTypes {
if name == op.Prop.OrigName || name == op.Prop.JSONName {
v := reflect.New(op.Type.Elem())
field := m.Field(op.Field)
if !field.IsNil() {
return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
}
field.Set(v)
return v.Elem().Field(0), op.Prop, nil
}
}
for _, p := range props.Prop {
if p.OrigName == name {
return m.FieldByName(p.Name), p, nil
}
if p.JSONName == name {
return m.FieldByName(p.Name), p, nil
}
}
return reflect.Value{}, nil, nil
}
func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
if len(values) != 2 {
return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
}
key, value := values[0], values[1]
keyType := f.Type().Key()
valueType := f.Type().Elem()
if f.IsNil() {
f.Set(reflect.MakeMap(f.Type()))
}
keyConv, ok := convFromType[keyType.Kind()]
if !ok {
return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
}
valueConv, ok := convFromType[valueType.Kind()]
if !ok {
return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
}
keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
if err := keyV[1].Interface(); err != nil {
return err.(error)
}
valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
if err := valueV[1].Interface(); err != nil {
return err.(error)
}
f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
return nil
}
func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
elemType := f.Type().Elem()
// is the destination field a slice of an enumeration type?
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
return populateFieldEnumRepeated(f, values, enumValMap)
}
conv, ok := convFromType[elemType.Kind()]
if !ok {
return fmt.Errorf("unsupported field type %s", elemType)
}
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
for i, v := range values {
result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
if err := result[1].Interface(); err != nil {
return err.(error)
}
f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
}
return nil
}
func populateField(f reflect.Value, value string, props *proto.Properties) error {
i := f.Addr().Interface()
// Handle protobuf well known types
var name string
switch m := i.(type) {
case interface{ XXX_WellKnownType() string }:
name = m.XXX_WellKnownType()
case proto.Message:
const wktPrefix = "google.protobuf."
if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) {
name = fullName[len(wktPrefix):]
}
}
switch name {
case "Timestamp":
if value == "null" {
f.FieldByName("Seconds").SetInt(0)
f.FieldByName("Nanos").SetInt(0)
return nil
}
t, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
f.FieldByName("Seconds").SetInt(int64(t.Unix()))
f.FieldByName("Nanos").SetInt(int64(t.Nanosecond()))
return nil
case "Duration":
if value == "null" {
f.FieldByName("Seconds").SetInt(0)
f.FieldByName("Nanos").SetInt(0)
return nil
}
d, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
ns := d.Nanoseconds()
s := ns / 1e9
ns %= 1e9
f.FieldByName("Seconds").SetInt(s)
f.FieldByName("Nanos").SetInt(ns)
return nil
case "DoubleValue":
fallthrough
case "FloatValue":
float64Val, err := strconv.ParseFloat(value, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetFloat(float64Val)
return nil
case "Int64Value":
fallthrough
case "Int32Value":
int64Val, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetInt(int64Val)
return nil
case "UInt64Value":
fallthrough
case "UInt32Value":
uint64Val, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return fmt.Errorf("bad DoubleValue: %s", value)
}
f.FieldByName("Value").SetUint(uint64Val)
return nil
case "BoolValue":
if value == "true" {
f.FieldByName("Value").SetBool(true)
} else if value == "false" {
f.FieldByName("Value").SetBool(false)
} else {
return fmt.Errorf("bad BoolValue: %s", value)
}
return nil
case "StringValue":
f.FieldByName("Value").SetString(value)
return nil
case "BytesValue":
bytesVal, err := base64.StdEncoding.DecodeString(value)
if err != nil {
return fmt.Errorf("bad BytesValue: %s", value)
}
f.FieldByName("Value").SetBytes(bytesVal)
return nil
case "FieldMask":
p := f.FieldByName("Paths")
for _, v := range strings.Split(value, ",") {
if v != "" {
p.Set(reflect.Append(p, reflect.ValueOf(v)))
}
}
return nil
}
// Handle Time and Duration stdlib types
switch t := i.(type) {
case *time.Time:
pt, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
*t = pt
return nil
case *time.Duration:
d, err := time.ParseDuration(value)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
*t = d
return nil
}
// is the destination field an enumeration type?
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
return populateFieldEnum(f, value, enumValMap)
}
conv, ok := convFromType[f.Kind()]
if !ok {
return fmt.Errorf("field type %T is not supported in query parameters", i)
}
result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
if err := result[1].Interface(); err != nil {
return err.(error)
}
f.Set(result[0].Convert(f.Type()))
return nil
}
func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
// see if it's an enumeration string
if enumVal, ok := enumValMap[value]; ok {
return reflect.ValueOf(enumVal).Convert(t), nil
}
// check for an integer that matches an enumeration value
eVal, err := strconv.Atoi(value)
if err != nil {
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
}
for _, v := range enumValMap {
if v == int32(eVal) {
return reflect.ValueOf(eVal).Convert(t), nil
}
}
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
}
func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
cval, err := convertEnum(value, f.Type(), enumValMap)
if err != nil {
return err
}
f.Set(cval)
return nil
}
func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
elemType := f.Type().Elem()
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
for i, v := range values {
result, err := convertEnum(v, elemType, enumValMap)
if err != nil {
return err
}
f.Index(i).Set(result)
}
return nil
}
var (
convFromType = map[reflect.Kind]reflect.Value{
reflect.String: reflect.ValueOf(String),
reflect.Bool: reflect.ValueOf(Bool),
reflect.Float64: reflect.ValueOf(Float64),
reflect.Float32: reflect.ValueOf(Float32),
reflect.Int64: reflect.ValueOf(Int64),
reflect.Int32: reflect.ValueOf(Int32),
reflect.Uint64: reflect.ValueOf(Uint64),
reflect.Uint32: reflect.ValueOf(Uint32),
reflect.Slice: reflect.ValueOf(Bytes),
}
)

View File

@ -0,0 +1,121 @@
package httprule
import (
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
)
const (
opcodeVersion = 1
)
// Template is a compiled representation of path templates.
type Template struct {
// Version is the version number of the format.
Version int
// OpCodes is a sequence of operations.
OpCodes []int
// Pool is a constant pool
Pool []string
// Verb is a VERB part in the template.
Verb string
// Fields is a list of field paths bound in this template.
Fields []string
// Original template (example: /v1/a_bit_of_everything)
Template string
}
// Compiler compiles utilities representation of path templates into marshallable operations.
// They can be unmarshalled by runtime.NewPattern.
type Compiler interface {
Compile() Template
}
type op struct {
// code is the opcode of the operation
code utilities.OpCode
// str is a string operand of the code.
// num is ignored if str is not empty.
str string
// num is a numeric operand of the code.
num int
}
func (w wildcard) compile() []op {
return []op{
{code: utilities.OpPush},
}
}
func (w deepWildcard) compile() []op {
return []op{
{code: utilities.OpPushM},
}
}
func (l literal) compile() []op {
return []op{
{
code: utilities.OpLitPush,
str: string(l),
},
}
}
func (v variable) compile() []op {
var ops []op
for _, s := range v.segments {
ops = append(ops, s.compile()...)
}
ops = append(ops, op{
code: utilities.OpConcatN,
num: len(v.segments),
}, op{
code: utilities.OpCapture,
str: v.path,
})
return ops
}
func (t template) Compile() Template {
var rawOps []op
for _, s := range t.segments {
rawOps = append(rawOps, s.compile()...)
}
var (
ops []int
pool []string
fields []string
)
consts := make(map[string]int)
for _, op := range rawOps {
ops = append(ops, int(op.code))
if op.str == "" {
ops = append(ops, op.num)
} else {
// eof segment literal represents the "/" path pattern
if op.str == eof {
op.str = ""
}
if _, ok := consts[op.str]; !ok {
consts[op.str] = len(pool)
pool = append(pool, op.str)
}
ops = append(ops, consts[op.str])
}
if op.code == utilities.OpCapture {
fields = append(fields, op.str)
}
}
return Template{
Version: opcodeVersion,
OpCodes: ops,
Pool: pool,
Verb: t.verb,
Fields: fields,
Template: t.template,
}
}

View File

@ -0,0 +1,11 @@
// +build gofuzz
package httprule
func Fuzz(data []byte) int {
_, err := Parse(string(data))
if err != nil {
return 0
}
return 0
}

View File

@ -0,0 +1,369 @@
package httprule
import (
"fmt"
"strings"
)
// InvalidTemplateError indicates that the path template is not valid.
type InvalidTemplateError struct {
tmpl string
msg string
}
func (e InvalidTemplateError) Error() string {
return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
}
// Parse parses the string representation of path template
func Parse(tmpl string) (Compiler, error) {
if !strings.HasPrefix(tmpl, "/") {
return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
}
tokens, verb := tokenize(tmpl[1:])
p := parser{tokens: tokens}
segs, err := p.topLevelSegments()
if err != nil {
return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
}
return template{
segments: segs,
verb: verb,
template: tmpl,
}, nil
}
func tokenize(path string) (tokens []string, verb string) {
if path == "" {
return []string{eof}, ""
}
const (
init = iota
field
nested
)
st := init
for path != "" {
var idx int
switch st {
case init:
idx = strings.IndexAny(path, "/{")
case field:
idx = strings.IndexAny(path, ".=}")
case nested:
idx = strings.IndexAny(path, "/}")
}
if idx < 0 {
tokens = append(tokens, path)
break
}
switch r := path[idx]; r {
case '/', '.':
case '{':
st = field
case '=':
st = nested
case '}':
st = init
}
if idx == 0 {
tokens = append(tokens, path[idx:idx+1])
} else {
tokens = append(tokens, path[:idx], path[idx:idx+1])
}
path = path[idx+1:]
}
l := len(tokens)
// See
// https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ;
// although normal and backwards-compat logic here is to use the last index
// of a colon, if the final segment is a variable followed by a colon, the
// part following the colon must be a verb. Hence if the previous token is
// an end var marker, we switch the index we're looking for to Index instead
// of LastIndex, so that we correctly grab the remaining part of the path as
// the verb.
var penultimateTokenIsEndVar bool
switch l {
case 0, 1:
// Not enough to be variable so skip this logic and don't result in an
// invalid index
default:
penultimateTokenIsEndVar = tokens[l-2] == "}"
}
t := tokens[l-1]
var idx int
if penultimateTokenIsEndVar {
idx = strings.Index(t, ":")
} else {
idx = strings.LastIndex(t, ":")
}
if idx == 0 {
tokens, verb = tokens[:l-1], t[1:]
} else if idx > 0 {
tokens[l-1], verb = t[:idx], t[idx+1:]
}
tokens = append(tokens, eof)
return tokens, verb
}
// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
type parser struct {
tokens []string
accepted []string
}
// topLevelSegments is the target of this parser.
func (p *parser) topLevelSegments() ([]segment, error) {
if _, err := p.accept(typeEOF); err == nil {
p.tokens = p.tokens[:0]
return []segment{literal(eof)}, nil
}
segs, err := p.segments()
if err != nil {
return nil, err
}
if _, err := p.accept(typeEOF); err != nil {
return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
}
return segs, nil
}
func (p *parser) segments() ([]segment, error) {
s, err := p.segment()
if err != nil {
return nil, err
}
segs := []segment{s}
for {
if _, err := p.accept("/"); err != nil {
return segs, nil
}
s, err := p.segment()
if err != nil {
return segs, err
}
segs = append(segs, s)
}
}
func (p *parser) segment() (segment, error) {
if _, err := p.accept("*"); err == nil {
return wildcard{}, nil
}
if _, err := p.accept("**"); err == nil {
return deepWildcard{}, nil
}
if l, err := p.literal(); err == nil {
return l, nil
}
v, err := p.variable()
if err != nil {
return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err)
}
return v, err
}
func (p *parser) literal() (segment, error) {
lit, err := p.accept(typeLiteral)
if err != nil {
return nil, err
}
return literal(lit), nil
}
func (p *parser) variable() (segment, error) {
if _, err := p.accept("{"); err != nil {
return nil, err
}
path, err := p.fieldPath()
if err != nil {
return nil, err
}
var segs []segment
if _, err := p.accept("="); err == nil {
segs, err = p.segments()
if err != nil {
return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err)
}
} else {
segs = []segment{wildcard{}}
}
if _, err := p.accept("}"); err != nil {
return nil, fmt.Errorf("unterminated variable segment: %s", path)
}
return variable{
path: path,
segments: segs,
}, nil
}
func (p *parser) fieldPath() (string, error) {
c, err := p.accept(typeIdent)
if err != nil {
return "", err
}
components := []string{c}
for {
if _, err = p.accept("."); err != nil {
return strings.Join(components, "."), nil
}
c, err := p.accept(typeIdent)
if err != nil {
return "", fmt.Errorf("invalid field path component: %v", err)
}
components = append(components, c)
}
}
// A termType is a type of terminal symbols.
type termType string
// These constants define some of valid values of termType.
// They improve readability of parse functions.
//
// You can also use "/", "*", "**", "." or "=" as valid values.
const (
typeIdent = termType("ident")
typeLiteral = termType("literal")
typeEOF = termType("$")
)
const (
// eof is the terminal symbol which always appears at the end of token sequence.
eof = "\u0000"
)
// accept tries to accept a token in "p".
// This function consumes a token and returns it if it matches to the specified "term".
// If it doesn't match, the function does not consume any tokens and return an error.
func (p *parser) accept(term termType) (string, error) {
t := p.tokens[0]
switch term {
case "/", "*", "**", ".", "=", "{", "}":
if t != string(term) && t != "/" {
return "", fmt.Errorf("expected %q but got %q", term, t)
}
case typeEOF:
if t != eof {
return "", fmt.Errorf("expected EOF but got %q", t)
}
case typeIdent:
if err := expectIdent(t); err != nil {
return "", err
}
case typeLiteral:
if err := expectPChars(t); err != nil {
return "", err
}
default:
return "", fmt.Errorf("unknown termType %q", term)
}
p.tokens = p.tokens[1:]
p.accepted = append(p.accepted, t)
return t, nil
}
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
//
// https://www.ietf.org/rfc/rfc3986.txt, P.49
//
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
// / "*" / "+" / "," / ";" / "="
// pct-encoded = "%" HEXDIG HEXDIG
func expectPChars(t string) error {
const (
init = iota
pct1
pct2
)
st := init
for _, r := range t {
if st != init {
if !isHexDigit(r) {
return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
}
switch st {
case pct1:
st = pct2
case pct2:
st = init
}
continue
}
// unreserved
switch {
case 'A' <= r && r <= 'Z':
continue
case 'a' <= r && r <= 'z':
continue
case '0' <= r && r <= '9':
continue
}
switch r {
case '-', '.', '_', '~':
// unreserved
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
// sub-delims
case ':', '@':
// rest of pchar
case '%':
// pct-encoded
st = pct1
default:
return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
}
}
if st != init {
return fmt.Errorf("invalid percent-encoding in %q", t)
}
return nil
}
// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
func expectIdent(ident string) error {
if ident == "" {
return fmt.Errorf("empty identifier")
}
for pos, r := range ident {
switch {
case '0' <= r && r <= '9':
if pos == 0 {
return fmt.Errorf("identifier starting with digit: %s", ident)
}
continue
case 'A' <= r && r <= 'Z':
continue
case 'a' <= r && r <= 'z':
continue
case r == '_':
continue
default:
return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
}
}
return nil
}
func isHexDigit(r rune) bool {
switch {
case '0' <= r && r <= '9':
return true
case 'A' <= r && r <= 'F':
return true
case 'a' <= r && r <= 'f':
return true
}
return false
}

View File

@ -0,0 +1,60 @@
package httprule
import (
"fmt"
"strings"
)
type template struct {
segments []segment
verb string
template string
}
type segment interface {
fmt.Stringer
compile() (ops []op)
}
type wildcard struct{}
type deepWildcard struct{}
type literal string
type variable struct {
path string
segments []segment
}
func (wildcard) String() string {
return "*"
}
func (deepWildcard) String() string {
return "**"
}
func (l literal) String() string {
return string(l)
}
func (v variable) String() string {
var segs []string
for _, s := range v.segments {
segs = append(segs, s.String())
}
return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/"))
}
func (t template) String() string {
var segs []string
for _, s := range t.segments {
segs = append(segs, s.String())
}
str := strings.Join(segs, "/")
if t.verb != "" {
str = fmt.Sprintf("%s:%s", str, t.verb)
}
return "/" + str
}

View File

@ -41,6 +41,25 @@ var (
DefaultContextTimeout = 0 * time.Second
)
// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed.
// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context.
var malformedHTTPHeaders = map[string]struct{}{
"connection": {},
}
type (
rpcMethodKey struct{}
httpPathPatternKey struct{}
AnnotateContextOption func(ctx context.Context) context.Context
)
func WithHTTPPathPattern(pattern string) AnnotateContextOption {
return func(ctx context.Context) context.Context {
return withHTTPPathPattern(ctx, pattern)
}
}
func decodeBinHeader(v string) ([]byte, error) {
if len(v)%4 == 0 {
// Input was padded, or padding was not necessary.
@ -56,8 +75,8 @@ At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
except that the forwarded destination is not another HTTP service but rather
a gRPC service.
*/
func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
ctx, md, err := annotateContext(ctx, mux, req)
func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
if err != nil {
return nil, err
}
@ -70,8 +89,8 @@ func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (con
// AnnotateIncomingContext adds context information such as metadata from the request.
// Attach metadata as incoming context.
func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
ctx, md, err := annotateContext(ctx, mux, req)
func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
if err != nil {
return nil, err
}
@ -82,7 +101,11 @@ func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Reque
return metadata.NewIncomingContext(ctx, md), nil
}
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) {
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
ctx = withRPCMethod(ctx, rpcMethodName)
for _, o := range options {
ctx = o(ctx)
}
var pairs []string
timeout := DefaultContextTimeout
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
@ -132,6 +155,7 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (con
}
if timeout != 0 {
//nolint:govet // The context outlives this function
ctx, _ = context.WithTimeout(ctx, timeout)
}
if len(pairs) == 0 {
@ -154,11 +178,17 @@ type serverMetadataKey struct{}
// NewServerMetadataContext creates a new context with ServerMetadata
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
if ctx == nil {
ctx = context.Background()
}
return context.WithValue(ctx, serverMetadataKey{}, md)
}
// ServerMetadataFromContext returns the ServerMetadata in ctx
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
if ctx == nil {
return md, false
}
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
return
}
@ -289,3 +319,46 @@ func isPermanentHTTPHeader(hdr string) bool {
}
return false
}
// isMalformedHTTPHeader checks whether header belongs to the list of
// "malformed headers" and would be rejected by the gRPC server.
func isMalformedHTTPHeader(header string) bool {
_, isMalformed := malformedHTTPHeaders[strings.ToLower(header)]
return isMalformed
}
// RPCMethod returns the method string for the server context. The returned
// string is in the format of "/package.service/method".
func RPCMethod(ctx context.Context) (string, bool) {
m := ctx.Value(rpcMethodKey{})
if m == nil {
return "", false
}
ms, ok := m.(string)
if !ok {
return "", false
}
return ms, true
}
func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context {
return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName)
}
// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists.
// The format of the returned string is defined by the google.api.http path template type.
func HTTPPathPattern(ctx context.Context) (string, bool) {
m := ctx.Value(httpPathPatternKey{})
if m == nil {
return "", false
}
ms, ok := m.(string)
if !ok {
return "", false
}
return ms, true
}
func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context {
return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern)
}

View File

@ -6,10 +6,10 @@ import (
"strconv"
"strings"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/golang/protobuf/ptypes/wrappers"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/timestamppb"
"google.golang.org/protobuf/types/known/wrapperspb"
)
// String just returns the given string.
@ -205,9 +205,11 @@ func BytesSlice(val, sep string) ([][]byte, error) {
}
// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
func Timestamp(val string) (*timestamp.Timestamp, error) {
var r timestamp.Timestamp
err := jsonpb.UnmarshalString(val, &r)
func Timestamp(val string) (*timestamppb.Timestamp, error) {
var r timestamppb.Timestamp
val = strconv.Quote(strings.Trim(val, `"`))
unmarshaler := &protojson.UnmarshalOptions{}
err := unmarshaler.Unmarshal([]byte(val), &r)
if err != nil {
return nil, err
}
@ -215,9 +217,11 @@ func Timestamp(val string) (*timestamp.Timestamp, error) {
}
// Duration converts the given string into a timestamp.Duration.
func Duration(val string) (*duration.Duration, error) {
var r duration.Duration
err := jsonpb.UnmarshalString(val, &r)
func Duration(val string) (*durationpb.Duration, error) {
var r durationpb.Duration
val = strconv.Quote(strings.Trim(val, `"`))
unmarshaler := &protojson.UnmarshalOptions{}
err := unmarshaler.Unmarshal([]byte(val), &r)
if err != nil {
return nil, err
}
@ -261,58 +265,58 @@ func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
}
/*
Support fot google.protobuf.wrappers on top of primitive types
Support for google.protobuf.wrappers on top of primitive types
*/
// StringValue well-known type support as wrapper around string type
func StringValue(val string) (*wrappers.StringValue, error) {
return &wrappers.StringValue{Value: val}, nil
func StringValue(val string) (*wrapperspb.StringValue, error) {
return &wrapperspb.StringValue{Value: val}, nil
}
// FloatValue well-known type support as wrapper around float32 type
func FloatValue(val string) (*wrappers.FloatValue, error) {
func FloatValue(val string) (*wrapperspb.FloatValue, error) {
parsedVal, err := Float32(val)
return &wrappers.FloatValue{Value: parsedVal}, err
return &wrapperspb.FloatValue{Value: parsedVal}, err
}
// DoubleValue well-known type support as wrapper around float64 type
func DoubleValue(val string) (*wrappers.DoubleValue, error) {
func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
parsedVal, err := Float64(val)
return &wrappers.DoubleValue{Value: parsedVal}, err
return &wrapperspb.DoubleValue{Value: parsedVal}, err
}
// BoolValue well-known type support as wrapper around bool type
func BoolValue(val string) (*wrappers.BoolValue, error) {
func BoolValue(val string) (*wrapperspb.BoolValue, error) {
parsedVal, err := Bool(val)
return &wrappers.BoolValue{Value: parsedVal}, err
return &wrapperspb.BoolValue{Value: parsedVal}, err
}
// Int32Value well-known type support as wrapper around int32 type
func Int32Value(val string) (*wrappers.Int32Value, error) {
func Int32Value(val string) (*wrapperspb.Int32Value, error) {
parsedVal, err := Int32(val)
return &wrappers.Int32Value{Value: parsedVal}, err
return &wrapperspb.Int32Value{Value: parsedVal}, err
}
// UInt32Value well-known type support as wrapper around uint32 type
func UInt32Value(val string) (*wrappers.UInt32Value, error) {
func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
parsedVal, err := Uint32(val)
return &wrappers.UInt32Value{Value: parsedVal}, err
return &wrapperspb.UInt32Value{Value: parsedVal}, err
}
// Int64Value well-known type support as wrapper around int64 type
func Int64Value(val string) (*wrappers.Int64Value, error) {
func Int64Value(val string) (*wrapperspb.Int64Value, error) {
parsedVal, err := Int64(val)
return &wrappers.Int64Value{Value: parsedVal}, err
return &wrapperspb.Int64Value{Value: parsedVal}, err
}
// UInt64Value well-known type support as wrapper around uint64 type
func UInt64Value(val string) (*wrappers.UInt64Value, error) {
func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
parsedVal, err := Uint64(val)
return &wrappers.UInt64Value{Value: parsedVal}, err
return &wrapperspb.UInt64Value{Value: parsedVal}, err
}
// BytesValue well-known type support as wrapper around bytes[] type
func BytesValue(val string) (*wrappers.BytesValue, error) {
func BytesValue(val string) (*wrapperspb.BytesValue, error) {
parsedVal, err := Bytes(val)
return &wrappers.BytesValue{Value: parsedVal}, err
return &wrapperspb.BytesValue{Value: parsedVal}, err
}

View File

@ -0,0 +1,181 @@
package runtime
import (
"context"
"errors"
"io"
"net/http"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
// ErrorHandlerFunc is the signature used to configure error handling.
type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
// StreamErrorHandlerFunc is the signature used to configure stream error handling.
type StreamErrorHandlerFunc func(context.Context, error) *status.Status
// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors.
type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int)
// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error
// passed to the DefaultRoutingErrorHandler.
type HTTPStatusError struct {
HTTPStatus int
Err error
}
func (e *HTTPStatusError) Error() string {
return e.Err.Error()
}
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
func HTTPStatusFromCode(code codes.Code) int {
switch code {
case codes.OK:
return http.StatusOK
case codes.Canceled:
return http.StatusRequestTimeout
case codes.Unknown:
return http.StatusInternalServerError
case codes.InvalidArgument:
return http.StatusBadRequest
case codes.DeadlineExceeded:
return http.StatusGatewayTimeout
case codes.NotFound:
return http.StatusNotFound
case codes.AlreadyExists:
return http.StatusConflict
case codes.PermissionDenied:
return http.StatusForbidden
case codes.Unauthenticated:
return http.StatusUnauthorized
case codes.ResourceExhausted:
return http.StatusTooManyRequests
case codes.FailedPrecondition:
// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
return http.StatusBadRequest
case codes.Aborted:
return http.StatusConflict
case codes.OutOfRange:
return http.StatusBadRequest
case codes.Unimplemented:
return http.StatusNotImplemented
case codes.Internal:
return http.StatusInternalServerError
case codes.Unavailable:
return http.StatusServiceUnavailable
case codes.DataLoss:
return http.StatusInternalServerError
}
grpclog.Infof("Unknown gRPC error code: %v", code)
return http.StatusInternalServerError
}
// HTTPError uses the mux-configured error handler.
func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
mux.errorHandler(ctx, mux, marshaler, w, r, err)
}
// DefaultHTTPErrorHandler is the default error handler.
// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler
// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode
// are insufficient for.
// If otherwise, it replies with http.StatusInternalServerError.
//
// The response body written by this function is a Status message marshaled by the Marshaler.
func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
// return Internal when Marshal failed
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
var customStatus *HTTPStatusError
if errors.As(err, &customStatus) {
err = customStatus.Err
}
s := status.Convert(err)
pb := s.Proto()
w.Header().Del("Trailer")
w.Header().Del("Transfer-Encoding")
contentType := marshaler.ContentType(pb)
w.Header().Set("Content-Type", contentType)
if s.Code() == codes.Unauthenticated {
w.Header().Set("WWW-Authenticate", s.Message())
}
buf, merr := marshaler.Marshal(pb)
if merr != nil {
grpclog.Infof("Failed to marshal error message %q: %v", s, merr)
w.WriteHeader(http.StatusInternalServerError)
if _, err := io.WriteString(w, fallback); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
return
}
md, ok := ServerMetadataFromContext(ctx)
if !ok {
grpclog.Infof("Failed to extract ServerMetadata from context")
}
handleForwardResponseServerMetadata(w, mux, md)
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
// Unless the request includes a TE header field indicating "trailers"
// is acceptable, as described in Section 4.3, a server SHOULD NOT
// generate trailer fields that it believes are necessary for the user
// agent to receive.
doForwardTrailers := requestAcceptsTrailers(r)
if doForwardTrailers {
handleForwardResponseTrailerHeader(w, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
st := HTTPStatusFromCode(s.Code())
if customStatus != nil {
st = customStatus.HTTPStatus
}
w.WriteHeader(st)
if _, err := w.Write(buf); err != nil {
grpclog.Infof("Failed to write response: %v", err)
}
if doForwardTrailers {
handleForwardResponseTrailer(w, md)
}
}
func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
return status.Convert(err)
}
// DefaultRoutingErrorHandler is our default handler for routing errors.
// By default http error codes mapped on the following error codes:
//
// NotFound -> grpc.NotFound
// StatusBadRequest -> grpc.InvalidArgument
// MethodNotAllowed -> grpc.Unimplemented
// Other -> grpc.Internal, method is not expecting to be called for anything else
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
sterr := status.Error(codes.Internal, "Unexpected routing error")
switch httpStatus {
case http.StatusBadRequest:
sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus))
case http.StatusMethodNotAllowed:
sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus))
case http.StatusNotFound:
sterr = status.Error(codes.NotFound, http.StatusText(httpStatus))
}
mux.errorHandler(ctx, mux, marshaler, w, r, sterr)
}

View File

@ -0,0 +1,165 @@
package runtime
import (
"encoding/json"
"fmt"
"io"
"sort"
"google.golang.org/genproto/protobuf/field_mask"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
)
func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
fd := fields.ByName(protoreflect.Name(name))
if fd != nil {
return fd
}
return fields.ByJSONName(name)
}
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) {
fm := &field_mask.FieldMask{}
var root interface{}
if err := json.NewDecoder(r).Decode(&root); err != nil {
if err == io.EOF {
return fm, nil
}
return nil, err
}
queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}}
for len(queue) > 0 {
// dequeue an item
item := queue[0]
queue = queue[1:]
m, ok := item.node.(map[string]interface{})
switch {
case ok:
// if the item is an object, then enqueue all of its children
for k, v := range m {
if item.msg == nil {
return nil, fmt.Errorf("JSON structure did not match request type")
}
fd := getFieldByName(item.msg.Descriptor().Fields(), k)
if fd == nil {
return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName())
}
if isDynamicProtoMessage(fd.Message()) {
for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) {
newPath := p
if item.path != "" {
newPath = item.path + "." + newPath
}
queue = append(queue, fieldMaskPathItem{path: newPath})
}
continue
}
if isProtobufAnyMessage(fd.Message()) {
_, hasTypeField := v.(map[string]interface{})["@type"]
if hasTypeField {
queue = append(queue, fieldMaskPathItem{path: k})
continue
} else {
return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName())
}
}
child := fieldMaskPathItem{
node: v,
}
if item.path == "" {
child.path = string(fd.FullName().Name())
} else {
child.path = item.path + "." + string(fd.FullName().Name())
}
switch {
case fd.IsList(), fd.IsMap():
// As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86
// Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop.
fm.Paths = append(fm.Paths, child.path)
case fd.Message() != nil:
child.msg = item.msg.Get(fd).Message()
fallthrough
default:
queue = append(queue, child)
}
}
case len(item.path) > 0:
// otherwise, it's a leaf node so print its path
fm.Paths = append(fm.Paths, item.path)
}
}
// Sort for deterministic output in the presence
// of repeated fields.
sort.Strings(fm.Paths)
return fm, nil
}
func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool {
return md != nil && (md.FullName() == "google.protobuf.Any")
}
func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool {
return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value")
}
// buildPathsBlindly does not attempt to match proto field names to the
// json value keys. Instead it relies completely on the structure of
// the unmarshalled json contained within in.
// Returns a slice containing all subpaths with the root at the
// passed in name and json value.
func buildPathsBlindly(name string, in interface{}) []string {
m, ok := in.(map[string]interface{})
if !ok {
return []string{name}
}
var paths []string
queue := []fieldMaskPathItem{{path: name, node: m}}
for len(queue) > 0 {
cur := queue[0]
queue = queue[1:]
m, ok := cur.node.(map[string]interface{})
if !ok {
// This should never happen since we should always check that we only add
// nodes of type map[string]interface{} to the queue.
continue
}
for k, v := range m {
if mi, ok := v.(map[string]interface{}); ok {
queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi})
} else {
// This is not a struct, so there are no more levels to descend.
curPath := cur.path + "." + k
paths = append(paths, curPath)
}
}
}
return paths
}
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
type fieldMaskPathItem struct {
// the list of prior fields leading up to node connected by dots
path string
// a generic decoded json object the current item to inspect for further path extraction
node interface{}
// parent message
msg protoreflect.Message
}

View File

@ -2,19 +2,19 @@ package runtime
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/textproto"
"strings"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/internal"
"google.golang.org/genproto/googleapis/api/httpbody"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
var errEmptyResponse = errors.New("empty response")
// ForwardResponseStream forwards the stream from gRPC server to REST client.
func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
f, ok := w.(http.Flusher)
@ -33,7 +33,6 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
handleForwardResponseServerMetadata(w, mux, md)
w.Header().Set("Transfer-Encoding", "chunked")
w.Header().Set("Content-Type", marshaler.ContentType())
if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
HTTPError(ctx, mux, marshaler, w, req, err)
return
@ -53,18 +52,25 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
return
}
if err != nil {
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
return
}
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
return
}
if !wroteHeader {
w.Header().Set("Content-Type", marshaler.ContentType(resp))
}
var buf []byte
httpBody, isHTTPBody := resp.(*httpbody.HttpBody)
switch {
case resp == nil:
buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse)))
buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response")))
case isHTTPBody:
buf = httpBody.GetData()
default:
result := map[string]interface{}{"result": resp}
if rb, ok := resp.(responseBody); ok {
@ -76,7 +82,7 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
if err != nil {
grpclog.Infof("Failed to marshal response chunk: %v", err)
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
return
}
if _, err = w.Write(buf); err != nil {
@ -132,15 +138,22 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
}
handleForwardResponseServerMetadata(w, mux, md)
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
// Unless the request includes a TE header field indicating "trailers"
// is acceptable, as described in Section 4.3, a server SHOULD NOT
// generate trailer fields that it believes are necessary for the user
// agent to receive.
doForwardTrailers := requestAcceptsTrailers(req)
if doForwardTrailers {
handleForwardResponseTrailerHeader(w, md)
w.Header().Set("Transfer-Encoding", "chunked")
}
handleForwardResponseTrailerHeader(w, md)
contentType := marshaler.ContentType()
// Check marshaler on run time in order to keep backwards compatibility
// An interface param needs to be added to the ContentType() function on
// the Marshal interface to be able to remove this check
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
contentType = typeMarshaler.ContentTypeFromMessage(resp)
}
contentType := marshaler.ContentType(resp)
w.Header().Set("Content-Type", contentType)
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
@ -164,7 +177,14 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
grpclog.Infof("Failed to write response: %v", err)
}
handleForwardResponseTrailer(w, md)
if doForwardTrailers {
handleForwardResponseTrailer(w, md)
}
}
func requestAcceptsTrailers(req *http.Request) bool {
te := req.Header.Get("TE")
return strings.Contains(strings.ToLower(te), "trailers")
}
func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
@ -180,12 +200,14 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re
return nil
}
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
serr := streamError(ctx, mux.streamErrorHandler, err)
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) {
st := mux.streamErrorHandler(ctx, err)
msg := errorChunk(st)
if !wroteHeader {
w.WriteHeader(int(serr.HttpCode))
w.Header().Set("Content-Type", marshaler.ContentType(msg))
w.WriteHeader(HTTPStatusFromCode(st.Code()))
}
buf, merr := marshaler.Marshal(errorChunk(serr))
buf, merr := marshaler.Marshal(msg)
if merr != nil {
grpclog.Infof("Failed to marshal an error: %v", merr)
return
@ -194,19 +216,12 @@ func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, mar
grpclog.Infof("Failed to notify error to client: %v", werr)
return
}
}
// streamError returns the payload for the final message in a response stream
// that represents the given err.
func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError {
serr := errHandler(ctx, err)
if serr != nil {
return serr
if _, derr := w.Write(delimiter); derr != nil {
grpclog.Infof("Failed to send delimiter chunk: %v", err)
return
}
// TODO: log about misbehaving stream error handler?
return DefaultHTTPStreamErrorHandler(ctx, err)
}
func errorChunk(err *StreamError) map[string]proto.Message {
return map[string]proto.Message{"error": (*internal.StreamError)(err)}
func errorChunk(st *status.Status) map[string]proto.Message {
return map[string]proto.Message{"error": st.Proto()}
}

View File

@ -4,13 +4,6 @@ import (
"google.golang.org/genproto/googleapis/api/httpbody"
)
// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
func SetHTTPBodyMarshaler(serveMux *ServeMux) {
serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
Marshaler: &JSONPb{OrigName: true},
}
}
// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
// google.api.HttpBody message as the full response body if it is
// the actual message used as the response. If not, then this will
@ -19,18 +12,14 @@ type HTTPBodyMarshaler struct {
Marshaler
}
// ContentType implementation to keep backwards compatibility with marshal interface
func (h *HTTPBodyMarshaler) ContentType() string {
return h.ContentTypeFromMessage(nil)
}
// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
// its specified content type otherwise fall back to the default Marshaler.
func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
// ContentType returns its specified content type in case v is a
// google.api.HttpBody message, otherwise it will fall back to the default Marshalers
// content type.
func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
if httpBody, ok := v.(*httpbody.HttpBody); ok {
return httpBody.GetContentType()
}
return h.Marshaler.ContentType()
return h.Marshaler.ContentType(v)
}
// Marshal marshals "v" by returning the body bytes if v is a

View File

@ -15,7 +15,7 @@ import (
type JSONBuiltin struct{}
// ContentType always Returns "application/json".
func (*JSONBuiltin) ContentType() string {
func (*JSONBuiltin) ContentType(_ interface{}) string {
return "application/json"
}

View File

@ -6,21 +6,25 @@ import (
"fmt"
"io"
"reflect"
"strconv"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
)
// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
// with the "github.com/golang/protobuf/jsonpb".
// It supports fully functionality of protobuf unlike JSONBuiltin.
// with the "google.golang.org/protobuf/encoding/protojson" marshaler.
// It supports the full functionality of protobuf unlike JSONBuiltin.
//
// The NewDecoder method returns a DecoderWrapper, so the underlying
// *json.Decoder methods can be used.
type JSONPb jsonpb.Marshaler
type JSONPb struct {
protojson.MarshalOptions
protojson.UnmarshalOptions
}
// ContentType always returns "application/json".
func (*JSONPb) ContentType() string {
func (*JSONPb) ContentType(_ interface{}) string {
return "application/json"
}
@ -47,7 +51,13 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
_, err = w.Write(buf)
return err
}
return (*jsonpb.Marshaler)(j).Marshal(w, p)
b, err := j.MarshalOptions.Marshal(p)
if err != nil {
return err
}
_, err = w.Write(b)
return err
}
var (
@ -56,8 +66,8 @@ var (
)
// marshalNonProto marshals a non-message field of a protobuf message.
// This function does not correctly marshals arbitrary data structure into JSON,
// but it is only capable of marshaling non-message field values of protobuf,
// This function does not correctly marshal arbitrary data structures into JSON,
// it is only capable of marshaling non-message field values of protobuf,
// i.e. primitive types, enums; pointers to primitives or enums; maps from
// integer/string types to primitives/enums/pointers to messages.
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
@ -74,7 +84,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
if rv.Kind() == reflect.Slice {
if rv.IsNil() {
if j.EmitDefaults {
if j.EmitUnpopulated {
return []byte("[]"), nil
}
return []byte("null"), nil
@ -93,7 +103,37 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
return nil, err
}
}
if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
return nil, err
}
}
err = buf.WriteByte(']')
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
if rv.Type().Elem().Implements(typeProtoEnum) {
var buf bytes.Buffer
err := buf.WriteByte('[')
if err != nil {
return nil, err
}
for i := 0; i < rv.Len(); i++ {
if i != 0 {
err = buf.WriteByte(',')
if err != nil {
return nil, err
}
}
if j.UseEnumNumbers {
_, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
} else {
_, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"")
}
if err != nil {
return nil, err
}
}
@ -120,7 +160,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
}
return json.Marshal(m)
}
if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers {
return json.Marshal(enum.String())
}
return json.Marshal(rv.Interface())
@ -128,25 +168,29 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
// Unmarshal unmarshals JSON "data" into "v"
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
return unmarshalJSONPb(data, v)
return unmarshalJSONPb(data, j.UnmarshalOptions, v)
}
// NewDecoder returns a Decoder which reads JSON stream from "r".
func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
d := json.NewDecoder(r)
return DecoderWrapper{Decoder: d}
return DecoderWrapper{
Decoder: d,
UnmarshalOptions: j.UnmarshalOptions,
}
}
// DecoderWrapper is a wrapper around a *json.Decoder that adds
// support for protos to the Decode method.
type DecoderWrapper struct {
*json.Decoder
protojson.UnmarshalOptions
}
// Decode wraps the embedded decoder's Decode method to support
// protos using a jsonpb.Unmarshaler.
func (d DecoderWrapper) Decode(v interface{}) error {
return decodeJSONPb(d.Decoder, v)
return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v)
}
// NewEncoder returns an Encoder which writes JSON stream into "w".
@ -162,21 +206,28 @@ func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
})
}
func unmarshalJSONPb(data []byte, v interface{}) error {
func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
d := json.NewDecoder(bytes.NewReader(data))
return decodeJSONPb(d, v)
return decodeJSONPb(d, unmarshaler, v)
}
func decodeJSONPb(d *json.Decoder, v interface{}) error {
func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
p, ok := v.(proto.Message)
if !ok {
return decodeNonProtoField(d, v)
return decodeNonProtoField(d, unmarshaler, v)
}
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
return unmarshaler.UnmarshalNext(d, p)
// Decode into bytes for marshalling
var b json.RawMessage
err := d.Decode(&b)
if err != nil {
return err
}
return unmarshaler.Unmarshal([]byte(b), p)
}
func decodeNonProtoField(d *json.Decoder, v interface{}) error {
func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return fmt.Errorf("%T is not a pointer", v)
@ -186,8 +237,14 @@ func decodeNonProtoField(d *json.Decoder, v interface{}) error {
rv.Set(reflect.New(rv.Type().Elem()))
}
if rv.Type().ConvertibleTo(typeProtoMessage) {
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
// Decode into bytes for marshalling
var b json.RawMessage
err := d.Decode(&b)
if err != nil {
return err
}
return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message))
}
rv = rv.Elem()
}
@ -211,24 +268,56 @@ func decodeNonProtoField(d *json.Decoder, v interface{}) error {
}
bk := result[0]
bv := reflect.New(rv.Type().Elem())
if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
if v == nil {
null := json.RawMessage("null")
v = &null
}
if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil {
return err
}
rv.SetMapIndex(bk, bv.Elem())
}
return nil
}
if rv.Kind() == reflect.Slice {
if rv.Type().Elem().Kind() == reflect.Uint8 {
var sl []byte
if err := d.Decode(&sl); err != nil {
return err
}
if sl != nil {
rv.SetBytes(sl)
}
return nil
}
var sl []json.RawMessage
if err := d.Decode(&sl); err != nil {
return err
}
if sl != nil {
rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
}
for _, item := range sl {
bv := reflect.New(rv.Type().Elem())
if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil {
return err
}
rv.Set(reflect.Append(rv, bv.Elem()))
}
return nil
}
if _, ok := rv.Interface().(protoEnum); ok {
var repr interface{}
if err := d.Decode(&repr); err != nil {
return err
}
switch repr.(type) {
switch v := repr.(type) {
case string:
// TODO(yugui) Should use proto.StructProperties?
return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
case float64:
rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type()))
return nil
default:
return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
@ -242,6 +331,8 @@ type protoEnum interface {
EnumDescriptor() ([]byte, []int)
}
var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem()
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
// Delimiter for newline encoded JSON streams.
@ -249,14 +340,16 @@ func (j *JSONPb) Delimiter() []byte {
return []byte("\n")
}
// allowUnknownFields helps not to return an error when the destination
// is a struct and the input contains object keys which do not match any
// non-ignored, exported fields in the destination.
var allowUnknownFields = true
// DisallowUnknownFields enables option in decoder (unmarshaller) to
// return an error when it finds an unknown field. This function must be
// called before using the JSON marshaller.
func DisallowUnknownFields() {
allowUnknownFields = false
}
var (
convFromType = map[reflect.Kind]reflect.Value{
reflect.String: reflect.ValueOf(String),
reflect.Bool: reflect.ValueOf(Bool),
reflect.Float64: reflect.ValueOf(Float64),
reflect.Float32: reflect.ValueOf(Float32),
reflect.Int64: reflect.ValueOf(Int64),
reflect.Int32: reflect.ValueOf(Int32),
reflect.Uint64: reflect.ValueOf(Uint64),
reflect.Uint32: reflect.ValueOf(Uint32),
reflect.Slice: reflect.ValueOf(Bytes),
}
)

View File

@ -4,15 +4,16 @@ import (
"io"
"errors"
"github.com/golang/protobuf/proto"
"io/ioutil"
"google.golang.org/protobuf/proto"
)
// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
type ProtoMarshaller struct{}
// ContentType always returns "application/octet-stream".
func (*ProtoMarshaller) ContentType() string {
func (*ProtoMarshaller) ContentType(_ interface{}) string {
return "application/octet-stream"
}

View File

@ -16,14 +16,9 @@ type Marshaler interface {
// NewEncoder returns an Encoder which writes bytes sequence into "w".
NewEncoder(w io.Writer) Encoder
// ContentType returns the Content-Type which this marshaler is responsible for.
ContentType() string
}
// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called
// to set the Content-Type header on the response
type contentTypeMarshaler interface {
// ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message
ContentTypeFromMessage(v interface{}) string
// The parameter describes the type which is being marshalled, which can sometimes
// affect the content type returned.
ContentType(v interface{}) string
}
// Decoder decodes a byte sequence

View File

@ -6,6 +6,7 @@ import (
"net/http"
"google.golang.org/grpc/grpclog"
"google.golang.org/protobuf/encoding/protojson"
)
// MIMEWildcard is the fallback MIME type used for requests which do not match
@ -16,7 +17,16 @@ var (
acceptHeader = http.CanonicalHeaderKey("Accept")
contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
defaultMarshaler = &JSONPb{OrigName: true}
defaultMarshaler = &HTTPBodyMarshaler{
Marshaler: &JSONPb{
MarshalOptions: protojson.MarshalOptions{
EmitUnpopulated: true,
},
UnmarshalOptions: protojson.UnmarshalOptions{
DiscardUnknown: true,
},
},
}
)
// MarshalerForRequest returns the inbound/outbound marshalers for this request.

View File

@ -0,0 +1,442 @@
package runtime
import (
"context"
"errors"
"fmt"
"net/http"
"net/textproto"
"regexp"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// UnescapingMode defines the behavior of ServeMux when unescaping path parameters.
type UnescapingMode int
const (
// UnescapingModeLegacy is the default V2 behavior, which escapes the entire
// path string before doing any routing.
UnescapingModeLegacy UnescapingMode = iota
// UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570
// reserved characters.
UnescapingModeAllExceptReserved
// UnescapingModeAllExceptSlash unescapes URL path parameters except path
// separators, which will be left as "%2F".
UnescapingModeAllExceptSlash
// UnescapingModeAllCharacters unescapes all URL path parameters.
UnescapingModeAllCharacters
// UnescapingModeDefault is the default escaping type.
// TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's
// reference implementation
UnescapingModeDefault = UnescapingModeLegacy
)
var (
encodedPathSplitter = regexp.MustCompile("(/|%2F)")
)
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
// ServeMux is a request multiplexer for grpc-gateway.
// It matches http requests to patterns and invokes the corresponding handler.
type ServeMux struct {
// handlers maps HTTP method to a list of handlers.
handlers map[string][]handler
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
marshalers marshalerRegistry
incomingHeaderMatcher HeaderMatcherFunc
outgoingHeaderMatcher HeaderMatcherFunc
metadataAnnotators []func(context.Context, *http.Request) metadata.MD
errorHandler ErrorHandlerFunc
streamErrorHandler StreamErrorHandlerFunc
routingErrorHandler RoutingErrorHandlerFunc
disablePathLengthFallback bool
unescapingMode UnescapingMode
}
// ServeMuxOption is an option that can be given to a ServeMux on construction.
type ServeMuxOption func(*ServeMux)
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
//
// forwardResponseOption is an option that will be called on the relevant context.Context,
// http.ResponseWriter, and proto.Message before every forwarded response.
//
// The message may be nil in the case where just a header is being sent.
func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
}
}
// WithEscapingType sets the escaping type. See the definitions of UnescapingMode
// for more information.
func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.unescapingMode = mode
}
}
// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be
// done with careful consideration.
func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
return func(serveMux *ServeMux) {
currentQueryParser = queryParameterParser
}
}
// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
type HeaderMatcherFunc func(string) (string, bool)
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
func DefaultHeaderMatcher(key string) (string, bool) {
key = textproto.CanonicalMIMEHeaderKey(key)
if isPermanentHTTPHeader(key) {
return MetadataPrefix + key, true
} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
return key[len(MetadataHeaderPrefix):], true
}
return "", false
}
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
//
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
for _, header := range fn.matchedMalformedHeaders() {
grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
}
return func(mux *ServeMux) {
mux.incomingHeaderMatcher = fn
}
}
// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server.
func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
if fn == nil {
return nil
}
headers := make([]string, 0)
for header := range malformedHTTPHeaders {
out, accept := fn(header)
if accept && isMalformedHTTPHeader(out) {
headers = append(headers, out)
}
}
return headers
}
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
//
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
// passed to http response returned from gateway. To transform the header before passing to response,
// matcher should return modified header.
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
return func(mux *ServeMux) {
mux.outgoingHeaderMatcher = fn
}
}
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
//
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
// is reading token from cookie and adding it in gRPC context.
func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
}
}
// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler.
//
// This can be used to configure a custom error response.
func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.errorHandler = fn
}
}
// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
// error handler, which allows for customizing the error trailer for server-streaming
// calls.
//
// For stream errors that occur before any response has been written, the mux's
// ErrorHandler will be invoked. However, once data has been written, the errors must
// be handled differently: they must be included in the response body. The response body's
// final message will include the error details returned by the stream error handler.
func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.streamErrorHandler = fn
}
}
// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors.
//
// Method called for errors which can happen before gRPC route selected or executed.
// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.routingErrorHandler = fn
}
}
// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
func WithDisablePathLengthFallback() ServeMuxOption {
return func(serveMux *ServeMux) {
serveMux.disablePathLengthFallback = true
}
}
// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
// When called the handler will forward the request to the upstream grpc service health check (defined in the
// gRPC Health Checking Protocol).
//
// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how
// to setup the protocol in the grpc server.
//
// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest.
func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption {
return func(s *ServeMux) {
// error can be ignored since pattern is definitely valid
_ = s.HandlePath(
http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
) {
_, outboundMarshaler := MarshalerForRequest(s, r)
resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{
Service: r.URL.Query().Get("service"),
})
if err != nil {
s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
return
}
w.Header().Set("Content-Type", "application/json")
if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING {
var err error
switch resp.GetStatus() {
case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN:
err = status.Error(codes.Unavailable, resp.String())
case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN:
err = status.Error(codes.NotFound, resp.String())
}
s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
return
}
_ = outboundMarshaler.NewEncoder(w).Encode(resp)
})
}
}
// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux.
//
// See WithHealthEndpointAt for the general implementation.
func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption {
return WithHealthEndpointAt(healthCheckClient, "/healthz")
}
// NewServeMux returns a new ServeMux whose internal mapping is empty.
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
serveMux := &ServeMux{
handlers: make(map[string][]handler),
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
marshalers: makeMarshalerMIMERegistry(),
errorHandler: DefaultHTTPErrorHandler,
streamErrorHandler: DefaultStreamErrorHandler,
routingErrorHandler: DefaultRoutingErrorHandler,
unescapingMode: UnescapingModeDefault,
}
for _, opt := range opts {
opt(serveMux)
}
if serveMux.incomingHeaderMatcher == nil {
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
}
if serveMux.outgoingHeaderMatcher == nil {
serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
}
}
return serveMux
}
// Handle associates "h" to the pair of HTTP method and path pattern.
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...)
}
// HandlePath allows users to configure custom path handlers.
// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/
func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error {
compiler, err := httprule.Parse(pathPattern)
if err != nil {
return fmt.Errorf("parsing path pattern: %w", err)
}
tp := compiler.Compile()
pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb)
if err != nil {
return fmt.Errorf("creating new pattern: %w", err)
}
s.Handle(meth, pattern, h)
return nil
}
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path.
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
path := r.URL.Path
if !strings.HasPrefix(path, "/") {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest)
return
}
// TODO(v3): remove UnescapingModeLegacy
if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" {
path = r.URL.RawPath
}
var components []string
// since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F"
// in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the
// path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default
// behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved
if s.unescapingMode == UnescapingModeAllCharacters {
components = encodedPathSplitter.Split(path[1:], -1)
} else {
components = strings.Split(path[1:], "/")
}
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
r.Method = strings.ToUpper(override)
if err := r.ParseForm(); err != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error())
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
return
}
}
// Verb out here is to memoize for the fallback case below
var verb string
for _, h := range s.handlers[r.Method] {
// If the pattern has a verb, explicitly look for a suffix in the last
// component that matches a colon plus the verb. This allows us to
// handle some cases that otherwise can't be correctly handled by the
// former LastIndex case, such as when the verb literal itself contains
// a colon. This should work for all cases that have run through the
// parser because we know what verb we're looking for, however, there
// are still some cases that the parser itself cannot disambiguate. See
// the comment there if interested.
patVerb := h.pat.Verb()
l := len(components)
lastComponent := components[l-1]
var idx int = -1
if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) {
idx = len(lastComponent) - len(patVerb) - 1
}
if idx == 0 {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
return
}
if idx > 0 {
components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:]
}
pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
if err != nil {
var mse MalformedSequenceError
if ok := errors.As(err, &mse); ok {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
HTTPStatus: http.StatusBadRequest,
Err: mse,
})
}
continue
}
h.h(w, r, pathParams)
return
}
// lookup other methods to handle fallback from GET to POST and
// to determine if it is NotImplemented or NotFound.
for m, handlers := range s.handlers {
if m == r.Method {
continue
}
for _, h := range handlers {
pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
if err != nil {
var mse MalformedSequenceError
if ok := errors.As(err, &mse); ok {
_, outboundMarshaler := MarshalerForRequest(s, r)
s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
HTTPStatus: http.StatusBadRequest,
Err: mse,
})
}
continue
}
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
if s.isPathLengthFallback(r) {
if err := r.ParseForm(); err != nil {
_, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error())
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
return
}
h.h(w, r, pathParams)
return
}
_, outboundMarshaler := MarshalerForRequest(s, r)
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed)
return
}
}
_, outboundMarshaler := MarshalerForRequest(s, r)
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
}
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
return s.forwardResponseOptions
}
func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
}
type handler struct {
pat Pattern
h HandlerFunc
}

View File

@ -3,9 +3,10 @@ package runtime
import (
"errors"
"fmt"
"strconv"
"strings"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc/grpclog"
)
@ -14,14 +15,23 @@ var (
ErrNotMatch = errors.New("not match to the path pattern")
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
ErrInvalidPattern = errors.New("invalid pattern")
// ErrMalformedSequence indicates that an escape sequence was malformed.
ErrMalformedSequence = errors.New("malformed escape sequence")
)
type MalformedSequenceError string
func (e MalformedSequenceError) Error() string {
return "malformed path escape " + strconv.Quote(string(e))
}
type op struct {
code utilities.OpCode
operand int
}
// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
// Pattern is a template pattern of http request paths defined in
// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
type Pattern struct {
// ops is a list of operations
ops []op
@ -35,31 +45,14 @@ type Pattern struct {
tailLen int
// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
verb string
// assumeColonVerb indicates whether a path suffix after a final
// colon may only be interpreted as a verb.
assumeColonVerb bool
}
type patternOptions struct {
assumeColonVerb bool
}
// PatternOpt is an option for creating Patterns.
type PatternOpt func(*patternOptions)
// NewPattern returns a new Pattern from the given definition values.
// "ops" is a sequence of op codes. "pool" is a constant pool.
// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
// "version" must be 1 for now.
// It returns an error if the given definition is invalid.
func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) {
options := patternOptions{
assumeColonVerb: true,
}
for _, o := range opts {
o(&options)
}
func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
if version != 1 {
grpclog.Infof("unsupported version: %d", version)
return Pattern{}, ErrInvalidPattern
@ -111,7 +104,7 @@ func NewPattern(version int, ops []int, pool []string, verb string, opts ...Patt
}
stack -= op.operand
if stack < 0 {
grpclog.Print("stack underflow")
grpclog.Info("stack underflow")
return Pattern{}, ErrInvalidPattern
}
stack++
@ -139,13 +132,12 @@ func NewPattern(version int, ops []int, pool []string, verb string, opts ...Patt
typedOps = append(typedOps, op)
}
return Pattern{
ops: typedOps,
pool: pool,
vars: vars,
stacksize: maxstack,
tailLen: tailLen,
verb: verb,
assumeColonVerb: options.assumeColonVerb,
ops: typedOps,
pool: pool,
vars: vars,
stacksize: maxstack,
tailLen: tailLen,
verb: verb,
}, nil
}
@ -157,12 +149,13 @@ func MustPattern(p Pattern, err error) Pattern {
return p
}
// Match examines components if it matches to the Pattern.
// If it matches, the function returns a mapping from field paths to their captured values.
// If otherwise, the function returns an error.
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
// MatchAndEscape examines components to determine if they match to a Pattern.
// MatchAndEscape will return an error if no Patterns matched or if a pattern
// matched but contained malformed escape sequences. If successful, the function
// returns a mapping from field paths to their captured values.
func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) {
if p.verb != verb {
if p.assumeColonVerb || p.verb != "" {
if p.verb != "" {
return nil, ErrNotMatch
}
if len(components) == 0 {
@ -171,7 +164,6 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err
components = append([]string{}, components...)
components[len(components)-1] += ":" + verb
}
verb = ""
}
var pos int
@ -179,6 +171,8 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err
captured := make([]string, len(p.vars))
l := len(components)
for _, op := range p.ops {
var err error
switch op.code {
case utilities.OpNop:
continue
@ -191,6 +185,10 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err
if lit := p.pool[op.operand]; c != lit {
return nil, ErrNotMatch
}
} else if op.code == utilities.OpPush {
if c, err = unescape(c, unescapingMode, false); err != nil {
return nil, err
}
}
stack = append(stack, c)
pos++
@ -200,7 +198,11 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err
return nil, ErrNotMatch
}
end -= p.tailLen
stack = append(stack, strings.Join(components[pos:end], "/"))
c := strings.Join(components[pos:end], "/")
if c, err = unescape(c, unescapingMode, true); err != nil {
return nil, err
}
stack = append(stack, c)
pos = end
case utilities.OpConcatN:
n := op.operand
@ -222,6 +224,16 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err
return bindings, nil
}
// MatchAndEscape examines components to determine if they match to a Pattern.
// It will never perform per-component unescaping (see: UnescapingModeLegacy).
// MatchAndEscape will return an error if no Patterns matched. If successful,
// the function returns a mapping from field paths to their captured values.
//
// Deprecated: Use MatchAndEscape.
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
return p.MatchAndEscape(components, verb, UnescapingModeDefault)
}
// Verb returns the verb part of the Pattern.
func (p Pattern) Verb() string { return p.verb }
@ -253,10 +265,119 @@ func (p Pattern) String() string {
return "/" + segs
}
// AssumeColonVerbOpt indicates whether a path suffix after a final
// colon may only be interpreted as a verb.
func AssumeColonVerbOpt(val bool) PatternOpt {
return PatternOpt(func(o *patternOptions) {
o.assumeColonVerb = val
})
/*
* The following code is adopted and modified from Go's standard library
* and carries the attached license.
*
* Copyright 2009 The Go Authors. All rights reserved.
* Use of this source code is governed by a BSD-style
* license that can be found in the LICENSE file.
*/
// ishex returns whether or not the given byte is a valid hex character
func ishex(c byte) bool {
switch {
case '0' <= c && c <= '9':
return true
case 'a' <= c && c <= 'f':
return true
case 'A' <= c && c <= 'F':
return true
}
return false
}
func isRFC6570Reserved(c byte) bool {
switch c {
case '!', '#', '$', '&', '\'', '(', ')', '*',
'+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
return true
default:
return false
}
}
// unhex converts a hex point to the bit representation
func unhex(c byte) byte {
switch {
case '0' <= c && c <= '9':
return c - '0'
case 'a' <= c && c <= 'f':
return c - 'a' + 10
case 'A' <= c && c <= 'F':
return c - 'A' + 10
}
return 0
}
// shouldUnescapeWithMode returns true if the character is escapable with the
// given mode
func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool {
switch mode {
case UnescapingModeAllExceptReserved:
if isRFC6570Reserved(c) {
return false
}
case UnescapingModeAllExceptSlash:
if c == '/' {
return false
}
case UnescapingModeAllCharacters:
return true
}
return true
}
// unescape unescapes a path string using the provided mode
func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) {
// TODO(v3): remove UnescapingModeLegacy
if mode == UnescapingModeLegacy {
return s, nil
}
if !multisegment {
mode = UnescapingModeAllCharacters
}
// Count %, check that they're well-formed.
n := 0
for i := 0; i < len(s); {
if s[i] == '%' {
n++
if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
s = s[i:]
if len(s) > 3 {
s = s[:3]
}
return "", MalformedSequenceError(s)
}
i += 3
} else {
i++
}
}
if n == 0 {
return s, nil
}
var t strings.Builder
t.Grow(len(s))
for i := 0; i < len(s); i++ {
switch s[i] {
case '%':
c := unhex(s[i+1])<<4 | unhex(s[i+2])
if shouldUnescapeWithMode(c, mode) {
t.WriteByte(c)
i += 2
continue
}
fallthrough
default:
t.WriteByte(s[i])
}
}
return t.String(), nil
}

View File

@ -1,7 +1,7 @@
package runtime
import (
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/proto"
)
// StringP returns a pointer to a string whose pointee is same as the given string value.

View File

@ -0,0 +1,342 @@
package runtime
import (
"errors"
"fmt"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/genproto/protobuf/field_mask"
"google.golang.org/grpc/grpclog"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/structpb"
"google.golang.org/protobuf/types/known/timestamppb"
"google.golang.org/protobuf/types/known/wrapperspb"
)
var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
var currentQueryParser QueryParameterParser = &DefaultQueryParser{}
// QueryParameterParser defines interface for all query parameter parsers
type QueryParameterParser interface {
Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
}
// PopulateQueryParameters parses query parameters
// into "msg" using current query parser
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
return currentQueryParser.Parse(msg, values, filter)
}
// DefaultQueryParser is a QueryParameterParser which implements the default
// query parameters parsing behavior.
//
// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context.
type DefaultQueryParser struct{}
// Parse populates "values" into "msg".
// A value is ignored if its key starts with one of the elements in "filter".
func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
for key, values := range values {
match := valuesKeyRegexp.FindStringSubmatch(key)
if len(match) == 3 {
key = match[1]
values = append([]string{match[2]}, values...)
}
fieldPath := strings.Split(key, ".")
if filter.HasCommonPrefix(fieldPath) {
continue
}
if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil {
return err
}
}
return nil
}
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
fieldPath := strings.Split(fieldPathString, ".")
return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
}
func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
if len(fieldPath) < 1 {
return errors.New("no field path")
}
if len(values) < 1 {
return errors.New("no value provided")
}
var fieldDescriptor protoreflect.FieldDescriptor
for i, fieldName := range fieldPath {
fields := msgValue.Descriptor().Fields()
// Get field by name
fieldDescriptor = fields.ByName(protoreflect.Name(fieldName))
if fieldDescriptor == nil {
fieldDescriptor = fields.ByJSONName(fieldName)
if fieldDescriptor == nil {
// We're not returning an error here because this could just be
// an extra query parameter that isn't part of the request.
grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, "."))
return nil
}
}
// If this is the last element, we're done
if i == len(fieldPath)-1 {
break
}
// Only singular message fields are allowed
if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated {
return fmt.Errorf("invalid path: %q is not a message", fieldName)
}
// Get the nested message
msgValue = msgValue.Mutable(fieldDescriptor).Message()
}
// Check if oneof already set
if of := fieldDescriptor.ContainingOneof(); of != nil {
if f := msgValue.WhichOneof(of); f != nil {
return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
}
}
switch {
case fieldDescriptor.IsList():
return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
case fieldDescriptor.IsMap():
return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values)
}
if len(values) > 1 {
return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", "))
}
return populateField(fieldDescriptor, msgValue, values[0])
}
func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error {
v, err := parseField(fieldDescriptor, value)
if err != nil {
return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err)
}
msgValue.Set(fieldDescriptor, v)
return nil
}
func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error {
for _, value := range values {
v, err := parseField(fieldDescriptor, value)
if err != nil {
return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err)
}
list.Append(v)
}
return nil
}
func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error {
if len(values) != 2 {
return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName())
}
key, err := parseField(fieldDescriptor.MapKey(), values[0])
if err != nil {
return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err)
}
value, err := parseField(fieldDescriptor.MapValue(), values[1])
if err != nil {
return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err)
}
mp.Set(key.MapKey(), value)
return nil
}
func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) {
switch fieldDescriptor.Kind() {
case protoreflect.BoolKind:
v, err := strconv.ParseBool(value)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfBool(v), nil
case protoreflect.EnumKind:
enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
switch {
case errors.Is(err, protoregistry.NotFound):
return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
case err != nil:
return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
}
// Look for enum by name
v := enum.Descriptor().Values().ByName(protoreflect.Name(value))
if v == nil {
i, err := strconv.Atoi(value)
if err != nil {
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
}
// Look for enum by number
v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i))
if v == nil {
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
}
}
return protoreflect.ValueOfEnum(v.Number()), nil
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
v, err := strconv.ParseInt(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfInt32(int32(v)), nil
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
v, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfInt64(v), nil
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
v, err := strconv.ParseUint(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfUint32(uint32(v)), nil
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
v, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfUint64(v), nil
case protoreflect.FloatKind:
v, err := strconv.ParseFloat(value, 32)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfFloat32(float32(v)), nil
case protoreflect.DoubleKind:
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfFloat64(v), nil
case protoreflect.StringKind:
return protoreflect.ValueOfString(value), nil
case protoreflect.BytesKind:
v, err := Bytes(value)
if err != nil {
return protoreflect.Value{}, err
}
return protoreflect.ValueOfBytes(v), nil
case protoreflect.MessageKind, protoreflect.GroupKind:
return parseMessage(fieldDescriptor.Message(), value)
default:
panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind()))
}
}
func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) {
var msg proto.Message
switch msgDescriptor.FullName() {
case "google.protobuf.Timestamp":
t, err := time.Parse(time.RFC3339Nano, value)
if err != nil {
return protoreflect.Value{}, err
}
msg = timestamppb.New(t)
case "google.protobuf.Duration":
d, err := time.ParseDuration(value)
if err != nil {
return protoreflect.Value{}, err
}
msg = durationpb.New(d)
case "google.protobuf.DoubleValue":
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.DoubleValue{Value: v}
case "google.protobuf.FloatValue":
v, err := strconv.ParseFloat(value, 32)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.FloatValue{Value: float32(v)}
case "google.protobuf.Int64Value":
v, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.Int64Value{Value: v}
case "google.protobuf.Int32Value":
v, err := strconv.ParseInt(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.Int32Value{Value: int32(v)}
case "google.protobuf.UInt64Value":
v, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.UInt64Value{Value: v}
case "google.protobuf.UInt32Value":
v, err := strconv.ParseUint(value, 10, 32)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.UInt32Value{Value: uint32(v)}
case "google.protobuf.BoolValue":
v, err := strconv.ParseBool(value)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.BoolValue{Value: v}
case "google.protobuf.StringValue":
msg = &wrapperspb.StringValue{Value: value}
case "google.protobuf.BytesValue":
v, err := Bytes(value)
if err != nil {
return protoreflect.Value{}, err
}
msg = &wrapperspb.BytesValue{Value: v}
case "google.protobuf.FieldMask":
fm := &field_mask.FieldMask{}
fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
msg = fm
case "google.protobuf.Value":
var v structpb.Value
err := protojson.Unmarshal([]byte(value), &v)
if err != nil {
return protoreflect.Value{}, err
}
msg = &v
case "google.protobuf.Struct":
var v structpb.Struct
err := protojson.Unmarshal([]byte(value), &v)
if err != nil {
return protoreflect.Value{}, err
}
msg = &v
default:
return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
}
return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil
}

View File

@ -0,0 +1,33 @@
package utilities
import (
"flag"
"strings"
)
// flagInterface is an cut down interface to `flag`
type flagInterface interface {
Var(value flag.Value, name string, usage string)
}
// StringArrayFlag defines a flag with the specified name and usage string.
// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag.
func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags {
value := &StringArrayFlags{}
f.Var(value, name, usage)
return value
}
// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var`
type StringArrayFlags []string
// String returns a string representation of `StringArrayFlags`
func (i *StringArrayFlags) String() string {
return strings.Join(*i, ",")
}
// Set appends a value to `StringArrayFlags`
func (i *StringArrayFlags) Set(value string) error {
*i = append(*i, value)
return nil
}

View File

@ -145,10 +145,7 @@ func (l byLex) Less(i, j int) bool {
return false
}
}
if k < len(sj) {
return true
}
return false
return k < len(sj)
}
// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.

8
vendor/go.opencensus.io/Makefile generated vendored
View File

@ -91,7 +91,7 @@ embedmd:
.PHONY: install-tools
install-tools:
go get -u golang.org/x/lint/golint
go get -u golang.org/x/tools/cmd/cover
go get -u golang.org/x/tools/cmd/goimports
go get -u github.com/rakyll/embedmd
go install golang.org/x/lint/golint@latest
go install golang.org/x/tools/cmd/cover@latest
go install golang.org/x/tools/cmd/goimports@latest
go install github.com/rakyll/embedmd@latest

View File

@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io"
// Version is the current release version of OpenCensus in use.
func Version() string {
return "0.23.0"
return "0.24.0"
}

View File

@ -28,6 +28,7 @@ var (
ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless)
ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes)
ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds)
ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless)
ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds)
)
@ -70,6 +71,14 @@ var (
Aggregation: view.Count(),
}
ClientStartedRPCsView = &view.View{
Measure: ClientStartedRPCs,
Name: "grpc.io/client/started_rpcs",
Description: "Number of started client RPCs.",
TagKeys: []tag.Key{KeyClientMethod},
Aggregation: view.Count(),
}
ClientSentMessagesPerRPCView = &view.View{
Measure: ClientSentMessagesPerRPC,
Name: "grpc.io/client/sent_messages_per_rpc",

View File

@ -27,6 +27,7 @@ var (
ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes)
ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless)
ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes)
ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless)
ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds)
)
@ -73,6 +74,14 @@ var (
Aggregation: view.Count(),
}
ServerStartedRPCsView = &view.View{
Measure: ServerStartedRPCs,
Name: "grpc.io/server/started_rpcs",
Description: "Number of started server RPCs.",
TagKeys: []tag.Key{KeyServerMethod},
Aggregation: view.Count(),
}
ServerReceivedMessagesPerRPCView = &view.View{
Name: "grpc.io/server/received_messages_per_rpc",
Description: "Distribution of messages received count per RPC, by method.",

View File

@ -82,8 +82,10 @@ func methodName(fullname string) string {
// statsHandleRPC processes the RPC events.
func statsHandleRPC(ctx context.Context, s stats.RPCStats) {
switch st := s.(type) {
case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer:
// do nothing for client
case *stats.Begin:
handleRPCBegin(ctx, st)
case *stats.OutPayload:
handleRPCOutPayload(ctx, st)
case *stats.InPayload:
@ -95,6 +97,25 @@ func statsHandleRPC(ctx context.Context, s stats.RPCStats) {
}
}
func handleRPCBegin(ctx context.Context, s *stats.Begin) {
d, ok := ctx.Value(rpcDataKey).(*rpcData)
if !ok {
if grpclog.V(2) {
grpclog.Infoln("Failed to retrieve *rpcData from context.")
}
}
if s.IsClient() {
ocstats.RecordWithOptions(ctx,
ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))),
ocstats.WithMeasurements(ClientStartedRPCs.M(1)))
} else {
ocstats.RecordWithOptions(ctx,
ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))),
ocstats.WithMeasurements(ServerStartedRPCs.M(1)))
}
}
func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) {
d, ok := ctx.Value(rpcDataKey).(*rpcData)
if !ok {

View File

@ -31,14 +31,14 @@ import (
// Handler is an http.Handler wrapper to instrument your HTTP server with
// OpenCensus. It supports both stats and tracing.
//
// Tracing
// # Tracing
//
// This handler is aware of the incoming request's span, reading it from request
// headers as configured using the Propagation field.
// The extracted span can be accessed from the incoming request's
// context.
//
// span := trace.FromContext(r.Context())
// span := trace.FromContext(r.Context())
//
// The server span will be automatically ended at the end of ServeHTTP.
type Handler struct {
@ -224,7 +224,9 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) {
}
// wrappedResponseWriter returns a wrapped version of the original
// ResponseWriter and only implements the same combination of additional
//
// ResponseWriter and only implements the same combination of additional
//
// interfaces as the original.
// This implementation is based on https://github.com/felixge/httpsnoop.
func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter {

View File

@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording.
OpenCensus allows users to create typed measures, record measurements,
aggregate the collected data, and export the aggregated data.
Measures
# Measures
A measure represents a type of data point to be tracked and recorded.
For example, latency, request Mb/s, and response Mb/s are measures
@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then
create views and collect and break down measures by the tags they are
interested in.
Recording measurements
# Recording measurements
Measurement is a data point to be collected for a measure. For example,
for a latency (ms) measure, 100 is a measurement that represents a 100ms
@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide
on which measurements they want to collect by registering views. This allows
libraries to turn on the instrumentation by default.
Exemplars
# Exemplars
For a given recorded measurement, the associated exemplar is a diagnostic map
that gives more information about the measurement.
@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen
When exported to a supporting back end, you should be able to easily navigate
to example traces that fell into each bucket in the Distribution.
*/
package stats // import "go.opencensus.io/stats"

View File

@ -21,5 +21,11 @@ import (
// DefaultRecorder will be called for each Record call.
var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{})
// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but
// avoids interface{} conversion.
// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type,
// but is interface{} here to avoid import loops
var MeasurementRecorder interface{}
// SubscriptionReporter reports when a view subscribed with a measure.
var SubscriptionReporter func(measure string)

View File

@ -86,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions {
return o
}
type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{})
// Record records one or multiple measurements with the same context at once.
// If there are any tags in the context, measurements will be tagged with them.
func Record(ctx context.Context, ms ...Measurement) {
RecordWithOptions(ctx, WithMeasurements(ms...))
// Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality
// (RecordOptions) we can reduce some allocations to speed up this hot path
if len(ms) == 0 {
return
}
recorder := internal.MeasurementRecorder.(measurementRecorder)
record := false
for _, m := range ms {
if m.desc.subscribed() {
record = true
break
}
}
if !record {
return
}
recorder(tag.FromContext(ctx), ms, nil)
return
}
// RecordWithTags records one or multiple measurements at once.

View File

@ -90,9 +90,9 @@ func Sum() *Aggregation {
//
// If len(bounds) >= 2 then the boundaries for bucket index i are:
//
// [-infinity, bounds[i]) for i = 0
// [bounds[i-1], bounds[i]) for 0 < i < length
// [bounds[i-1], +infinity) for i = length
// [-infinity, bounds[i]) for i = 0
// [bounds[i-1], bounds[i]) for 0 < i < length
// [bounds[i-1], +infinity) for i = length
//
// If len(bounds) is 0 then there is no histogram associated with the
// distribution. There will be a single bucket with boundaries

View File

@ -59,8 +59,15 @@ func (c *collector) clearRows() {
// encodeWithKeys encodes the map by using values
// only associated with the keys provided.
func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte {
// Compute the buffer length we will need ahead of time to avoid resizing later
reqLen := 0
for _, k := range keys {
s, _ := m.Value(k)
// We will store each key + its length
reqLen += len(s) + 1
}
vb := &tagencoding.Values{
Buffer: make([]byte, len(keys)),
Buffer: make([]byte, reqLen),
}
for _, k := range keys {
v, _ := m.Value(k)

View File

@ -34,7 +34,7 @@
// Libraries can define views but it is recommended that in most cases registering
// views be left up to applications.
//
// Exporting
// # Exporting
//
// Collected and aggregated data can be exported to a metric collection
// backend by registering its exporter.

View File

@ -33,6 +33,7 @@ func init() {
defaultWorker = NewMeter().(*worker)
go defaultWorker.start()
internal.DefaultRecorder = record
internal.MeasurementRecorder = recordMeasurement
}
type measureRef struct {
@ -199,11 +200,21 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
defaultWorker.Record(tags, ms, attachments)
}
func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) {
defaultWorker.recordMeasurement(tags, ms, attachments)
}
// Record records a set of measurements ms associated with the given tags and attachments.
func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) {
w.recordMeasurement(tags, ms.([]stats.Measurement), attachments)
}
// recordMeasurement records a set of measurements ms associated with the given tags and attachments.
// This is the same as Record but without an interface{} type to avoid allocations
func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) {
req := &recordReq{
tm: tags,
ms: ms.([]stats.Measurement),
ms: ms,
attachments: attachments,
t: time.Now(),
}
@ -221,6 +232,11 @@ func SetReportingPeriod(d time.Duration) {
defaultWorker.SetReportingPeriod(d)
}
// Stop stops the default worker.
func Stop() {
defaultWorker.Stop()
}
// SetReportingPeriod sets the interval between reporting aggregated views in
// the program. If duration is less than or equal to zero, it enables the
// default behavior.
@ -281,7 +297,7 @@ func (w *worker) start() {
case <-w.quit:
w.timer.Stop()
close(w.c)
w.done <- true
close(w.done)
return
}
}
@ -290,8 +306,11 @@ func (w *worker) start() {
func (w *worker) Stop() {
prodMgr := metricproducer.GlobalManager()
prodMgr.DeleteProducer(w)
w.quit <- true
select {
case <-w.quit:
default:
close(w.quit)
}
<-w.done
}

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.9
// +build go1.9
package tag

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !go1.9
// +build !go1.9
package tag

13
vendor/go.opencensus.io/trace/doc.go generated vendored
View File

@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing.
The following assumes a basic familiarity with OpenCensus concepts.
See http://opencensus.io
Exporting Traces
# Exporting Traces
To export collected tracing data, register at least one exporter. You can use
one of the provided exporters or write your own.
trace.RegisterExporter(exporter)
trace.RegisterExporter(exporter)
By default, traces will be sampled relatively rarely. To change the sampling
frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler
to sample a subset of traces, or use AlwaysSample to collect a trace on every run:
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
Be careful about using trace.AlwaysSample in a production application with
significant traffic: a new trace will be started and exported for every request.
Adding Spans to a Trace
# Adding Spans to a Trace
A trace consists of a tree of spans. In Go, the current span is carried in a
context.Context.
@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F
this to work, the function must take a context.Context as a parameter. Add these two
lines to the top of the function:
ctx, span := trace.StartSpan(ctx, "example.com/Run")
defer span.End()
ctx, span := trace.StartSpan(ctx, "example.com/Run")
defer span.End()
StartSpan will create a new top-level span if the context
doesn't contain another span, otherwise it will create a child span.

View File

@ -44,7 +44,7 @@ func (lm lruMap) len() int {
}
func (lm lruMap) keys() []interface{} {
keys := make([]interface{}, len(lm.cacheKeys))
keys := make([]interface{}, 0, len(lm.cacheKeys))
for k := range lm.cacheKeys {
keys = append(keys, k)
}

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.11
// +build go1.11
package trace

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !go1.11
// +build !go1.11
package trace

View File

@ -1,71 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
import (
"context"
"io"
"net/http"
"net/url"
"strings"
)
// Do sends an HTTP request with the provided http.Client and returns
// an HTTP response.
//
// If the client is nil, http.DefaultClient is used.
//
// The provided ctx must be non-nil. If it is canceled or times out,
// ctx.Err() will be returned.
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
resp, err := client.Do(req.WithContext(ctx))
// If we got an error, and the context has been canceled,
// the context's error is probably more useful.
if err != nil {
select {
case <-ctx.Done():
err = ctx.Err()
default:
}
}
return resp, err
}
// Get issues a GET request via the Do function.
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Head issues a HEAD request via the Do function.
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return nil, err
}
return Do(ctx, client, req)
}
// Post issues a POST request via the Do function.
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", bodyType)
return Do(ctx, client, req)
}
// PostForm issues a POST request via the Do function.
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
}

View File

@ -44,7 +44,7 @@ func init() {
// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to
// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to
// h2c. When either of those situations occur we hijack the HTTP/1 connection,
// convert it to a HTTP/2 connection and pass the net.Conn to http2.ServeConn.
// convert it to an HTTP/2 connection and pass the net.Conn to http2.ServeConn.
type h2cHandler struct {
Handler http.Handler
s *http2.Server

View File

@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
p.c.L = &p.mu
}
defer p.c.Signal()
if p.err != nil {
if p.err != nil || p.breakErr != nil {
return 0, errClosedPipeWrite
}
if p.breakErr != nil {
p.unread += len(d)
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
}

View File

@ -441,7 +441,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
if s.NewWriteScheduler != nil {
sc.writeSched = s.NewWriteScheduler()
} else {
sc.writeSched = NewPriorityWriteScheduler(nil)
sc.writeSched = newRoundRobinWriteScheduler()
}
// These start at the RFC-specified defaults. If there is a higher
@ -1822,15 +1822,18 @@ func (sc *serverConn) processData(f *DataFrame) error {
}
if len(data) > 0 {
st.bodyBytes += int64(len(data))
wrote, err := st.body.Write(data)
if err != nil {
// The handler has closed the request body.
// Return the connection-level flow control for the discarded data,
// but not the stream-level flow control.
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
return nil
}
if wrote != len(data) {
panic("internal error: bad Writer")
}
st.bodyBytes += int64(len(data))
}
// Return any padded flow control now, since we won't
@ -2426,7 +2429,7 @@ type requestBody struct {
conn *serverConn
closeOnce sync.Once // for use by Close only
sawEOF bool // for use by Read only
pipe *pipe // non-nil if we have a HTTP entity message body
pipe *pipe // non-nil if we have an HTTP entity message body
needsContinue bool // need to send a 100-continue
}
@ -2566,7 +2569,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
clen = ""
}
}
if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
_, hasContentLength := rws.snapHeader["Content-Length"]
if !hasContentLength && clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
clen = strconv.Itoa(len(p))
}
_, hasContentType := rws.snapHeader["Content-Type"]
@ -2771,7 +2775,7 @@ func (w *responseWriter) FlushError() error {
err = rws.bw.Flush()
} else {
// The bufio.Writer won't call chunkWriter.Write
// (writeChunk with zero bytes, so we have to do it
// (writeChunk with zero bytes), so we have to do it
// ourselves to force the HTTP response header and/or
// final DATA frame (with END_STREAM) to be sent.
_, err = chunkWriter{rws}.Write(nil)

View File

@ -560,10 +560,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
traceGotConn(req, cc, reused)
res, err := cc.RoundTrip(req)
if err != nil && retry <= 6 {
roundTripErr := err
if req, err = shouldRetryRequest(req, err); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
}
backoff := float64(uint(1) << (uint(retry) - 1))
@ -572,7 +573,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
timer := backoffNewTimer(d)
select {
case <-timer.C:
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
timer.Stop()
@ -1265,6 +1266,29 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return res, nil
}
cancelRequest := func(cs *clientStream, err error) error {
cs.cc.mu.Lock()
bodyClosed := cs.reqBodyClosed
cs.cc.mu.Unlock()
// Wait for the request body to be closed.
//
// If nothing closed the body before now, abortStreamLocked
// will have started a goroutine to close it.
//
// Closing the body before returning avoids a race condition
// with net/http checking its readTrackingBody to see if the
// body was read from or closed. See golang/go#60041.
//
// The body is closed in a separate goroutine without the
// connection mutex held, but dropping the mutex before waiting
// will keep us from holding it indefinitely if the body
// close is slow for some reason.
if bodyClosed != nil {
<-bodyClosed
}
return err
}
for {
select {
case <-cs.respHeaderRecv:
@ -1284,10 +1308,10 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
case <-ctx.Done():
err := ctx.Err()
cs.abortStream(err)
return nil, err
return nil, cancelRequest(cs, err)
case <-cs.reqCancel:
cs.abortStream(errRequestCanceled)
return nil, errRequestCanceled
return nil, cancelRequest(cs, errRequestCanceled)
}
}
}
@ -1844,6 +1868,9 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
if err != nil {
return nil, err
}
if !httpguts.ValidHostHeader(host) {
return nil, errors.New("http2: invalid Host header")
}
var path string
if req.Method != "CONNECT" {
@ -1880,7 +1907,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
// followed by the query production (see Sections 3.3 and 3.4 of
// followed by the query production, see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
m := req.Method
@ -2555,6 +2582,9 @@ func (b transportResponseBody) Close() error {
cs := b.cs
cc := cs.cc
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
unread := cs.bufPipe.Len()
if unread > 0 {
cc.mu.Lock()
@ -2573,9 +2603,6 @@ func (b transportResponseBody) Close() error {
cc.wmu.Unlock()
}
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
select {
case <-cs.donec:
case <-cs.ctx.Done():

View File

@ -184,7 +184,8 @@ func (wr *FrameWriteRequest) replyToWriter(err error) {
// writeQueue is used by implementations of WriteScheduler.
type writeQueue struct {
s []FrameWriteRequest
s []FrameWriteRequest
prev, next *writeQueue
}
func (q *writeQueue) empty() bool { return len(q.s) == 0 }

119
vendor/golang.org/x/net/http2/writesched_roundrobin.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"fmt"
"math"
)
type roundRobinWriteScheduler struct {
// control contains control frames (SETTINGS, PING, etc.).
control writeQueue
// streams maps stream ID to a queue.
streams map[uint32]*writeQueue
// stream queues are stored in a circular linked list.
// head is the next stream to write, or nil if there are no streams open.
head *writeQueue
// pool of empty queues for reuse.
queuePool writeQueuePool
}
// newRoundRobinWriteScheduler constructs a new write scheduler.
// The round robin scheduler priorizes control frames
// like SETTINGS and PING over DATA frames.
// When there are no control frames to send, it performs a round-robin
// selection from the ready streams.
func newRoundRobinWriteScheduler() WriteScheduler {
ws := &roundRobinWriteScheduler{
streams: make(map[uint32]*writeQueue),
}
return ws
}
func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
if ws.streams[streamID] != nil {
panic(fmt.Errorf("stream %d already opened", streamID))
}
q := ws.queuePool.get()
ws.streams[streamID] = q
if ws.head == nil {
ws.head = q
q.next = q
q.prev = q
} else {
// Queues are stored in a ring.
// Insert the new stream before ws.head, putting it at the end of the list.
q.prev = ws.head.prev
q.next = ws.head
q.prev.next = q
q.next.prev = q
}
}
func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) {
q := ws.streams[streamID]
if q == nil {
return
}
if q.next == q {
// This was the only open stream.
ws.head = nil
} else {
q.prev.next = q.next
q.next.prev = q.prev
if ws.head == q {
ws.head = q.next
}
}
delete(ws.streams, streamID)
ws.queuePool.put(q)
}
func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {}
func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) {
if wr.isControl() {
ws.control.push(wr)
return
}
q := ws.streams[wr.StreamID()]
if q == nil {
// This is a closed stream.
// wr should not be a HEADERS or DATA frame.
// We push the request onto the control queue.
if wr.DataSize() > 0 {
panic("add DATA on non-open stream")
}
ws.control.push(wr)
return
}
q.push(wr)
}
func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) {
// Control and RST_STREAM frames first.
if !ws.control.empty() {
return ws.control.shift(), true
}
if ws.head == nil {
return FrameWriteRequest{}, false
}
q := ws.head
for {
if wr, ok := q.consume(math.MaxInt32); ok {
ws.head = q.next
return wr, true
}
q = q.next
if q == ws.head {
break
}
}
return FrameWriteRequest{}, false
}

View File

@ -121,7 +121,7 @@ func CheckJoiners(enable bool) Option {
}
}
// StrictDomainName limits the set of permissable ASCII characters to those
// StrictDomainName limits the set of permissible ASCII characters to those
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
// but is only useful if ValidateLabels is set.

File diff suppressed because it is too large Load Diff

5145
vendor/golang.org/x/net/idna/tables15.0.0.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

21
vendor/golang.org/x/net/idna/trie.go generated vendored
View File

@ -6,27 +6,6 @@
package idna
// appendMapping appends the mapping for the respective rune. isMapped must be
// true. A mapping is a categorization of a rune as defined in UTS #46.
func (c info) appendMapping(b []byte, s string) []byte {
index := int(c >> indexShift)
if c&xorBit == 0 {
s := mappings[index:]
return append(b, s[1:s[0]+1]...)
}
b = append(b, s...)
if c&inlineXOR == inlineXOR {
// TODO: support and handle two-byte inline masks
b[len(b)-1] ^= byte(index)
} else {
for p := len(b) - int(xorData[index]); p < len(b); p++ {
index++
b[p] ^= xorData[index]
}
}
return b
}
// Sparse block handling code.
type valueRange struct {

31
vendor/golang.org/x/net/idna/trie12.0.0.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.16
// +build !go1.16
package idna
// appendMapping appends the mapping for the respective rune. isMapped must be
// true. A mapping is a categorization of a rune as defined in UTS #46.
func (c info) appendMapping(b []byte, s string) []byte {
index := int(c >> indexShift)
if c&xorBit == 0 {
s := mappings[index:]
return append(b, s[1:s[0]+1]...)
}
b = append(b, s...)
if c&inlineXOR == inlineXOR {
// TODO: support and handle two-byte inline masks
b[len(b)-1] ^= byte(index)
} else {
for p := len(b) - int(xorData[index]); p < len(b); p++ {
index++
b[p] ^= xorData[index]
}
}
return b
}

31
vendor/golang.org/x/net/idna/trie13.0.0.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.16
// +build go1.16
package idna
// appendMapping appends the mapping for the respective rune. isMapped must be
// true. A mapping is a categorization of a rune as defined in UTS #46.
func (c info) appendMapping(b []byte, s string) []byte {
index := int(c >> indexShift)
if c&xorBit == 0 {
p := index
return append(b, mappings[mappingIndex[p]:mappingIndex[p+1]]...)
}
b = append(b, s...)
if c&inlineXOR == inlineXOR {
// TODO: support and handle two-byte inline masks
b[len(b)-1] ^= byte(index)
} else {
for p := len(b) - int(xorData[index]); p < len(b); p++ {
index++
b[p] ^= xorData[index]
}
}
return b
}

12
vendor/golang.org/x/oauth2/README.md generated vendored
View File

@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples.
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google)
## Policy for new packages
## Policy for new endpoints
We no longer accept new provider-specific packages in this repo if all
they do is add a single endpoint variable. If you just want to add a
@ -29,8 +29,12 @@ package.
## Report Issues / Send Patches
This repository uses Gerrit for code changes. To learn how to submit changes to
this repository, see https://golang.org/doc/contribute.html.
The main issue tracker for the oauth2 repository is located at
https://github.com/golang/oauth2/issues.
This repository uses Gerrit for code changes. To learn how to submit changes to
this repository, see https://golang.org/doc/contribute.html. In particular:
* Excluding trivial changes, all contributions should be connected to an existing issue.
* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted.
* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2).

View File

@ -14,7 +14,7 @@ import (
// ParseKey converts the binary contents of a private key file
// to an *rsa.PrivateKey. It detects whether the private key is in a
// PEM container or not. If so, it extracts the the private key
// PEM container or not. If so, it extracts the private key
// from PEM container before conversion. It only supports PEM
// containers with no passphrase.
func ParseKey(key []byte) (*rsa.PrivateKey, error) {

View File

@ -19,8 +19,6 @@ import (
"strings"
"sync"
"time"
"golang.org/x/net/context/ctxhttp"
)
// Token represents the credentials used to authorize
@ -57,12 +55,18 @@ type Token struct {
}
// tokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
// providers returning a token or error in JSON form.
// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1
type tokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
// error fields
// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2
ErrorCode string `json:"error"`
ErrorDescription string `json:"error_description"`
ErrorURI string `json:"error_uri"`
}
func (e *tokenJSON) expiry() (t time.Time) {
@ -229,7 +233,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
}
func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
r, err := ContextClient(ctx).Do(req.WithContext(ctx))
if err != nil {
return nil, err
}
@ -238,21 +242,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if code := r.StatusCode; code < 200 || code > 299 {
return nil, &RetrieveError{
Response: r,
Body: body,
}
failureStatus := r.StatusCode < 200 || r.StatusCode > 299
retrieveError := &RetrieveError{
Response: r,
Body: body,
// attempt to populate error detail below
}
var token *Token
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
switch content {
case "application/x-www-form-urlencoded", "text/plain":
// some endpoints return a query string
vals, err := url.ParseQuery(string(body))
if err != nil {
return nil, err
if failureStatus {
return nil, retrieveError
}
return nil, fmt.Errorf("oauth2: cannot parse response: %v", err)
}
retrieveError.ErrorCode = vals.Get("error")
retrieveError.ErrorDescription = vals.Get("error_description")
retrieveError.ErrorURI = vals.Get("error_uri")
token = &Token{
AccessToken: vals.Get("access_token"),
TokenType: vals.Get("token_type"),
@ -267,8 +279,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
default:
var tj tokenJSON
if err = json.Unmarshal(body, &tj); err != nil {
return nil, err
if failureStatus {
return nil, retrieveError
}
return nil, fmt.Errorf("oauth2: cannot parse json: %v", err)
}
retrieveError.ErrorCode = tj.ErrorCode
retrieveError.ErrorDescription = tj.ErrorDescription
retrieveError.ErrorURI = tj.ErrorURI
token = &Token{
AccessToken: tj.AccessToken,
TokenType: tj.TokenType,
@ -278,17 +296,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
}
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
}
// according to spec, servers should respond status 400 in error case
// https://www.rfc-editor.org/rfc/rfc6749#section-5.2
// but some unorthodox servers respond 200 in error case
if failureStatus || retrieveError.ErrorCode != "" {
return nil, retrieveError
}
if token.AccessToken == "" {
return nil, errors.New("oauth2: server response missing access_token")
}
return token, nil
}
// mirrors oauth2.RetrieveError
type RetrieveError struct {
Response *http.Response
Body []byte
Response *http.Response
Body []byte
ErrorCode string
ErrorDescription string
ErrorURI string
}
func (r *RetrieveError) Error() string {
if r.ErrorCode != "" {
s := fmt.Sprintf("oauth2: %q", r.ErrorCode)
if r.ErrorDescription != "" {
s += fmt.Sprintf(" %q", r.ErrorDescription)
}
if r.ErrorURI != "" {
s += fmt.Sprintf(" %q", r.ErrorURI)
}
return s
}
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
}

33
vendor/golang.org/x/oauth2/oauth2.go generated vendored
View File

@ -16,6 +16,7 @@ import (
"net/url"
"strings"
"sync"
"time"
"golang.org/x/oauth2/internal"
)
@ -140,7 +141,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
//
// State is a token to protect the user from CSRF attacks. You must
// always provide a non-empty string and validate that it matches the
// the state query parameter on your redirect callback.
// state query parameter on your redirect callback.
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
//
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
@ -290,6 +291,8 @@ type reuseTokenSource struct {
mu sync.Mutex // guards t
t *Token
expiryDelta time.Duration
}
// Token returns the current token if it's still valid, else will
@ -305,6 +308,7 @@ func (s *reuseTokenSource) Token() (*Token, error) {
if err != nil {
return nil, err
}
t.expiryDelta = s.expiryDelta
s.t = t
return t, nil
}
@ -379,3 +383,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
new: src,
}
}
// ReuseTokenSource returns a TokenSource that acts in the same manner as the
// TokenSource returned by ReuseTokenSource, except the expiry buffer is
// configurable. The expiration time of a token is calculated as
// t.Expiry.Add(-earlyExpiry).
func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource {
// Don't wrap a reuseTokenSource in itself. That would work,
// but cause an unnecessary number of mutex operations.
// Just build the equivalent one.
if rt, ok := src.(*reuseTokenSource); ok {
if t == nil {
// Just use it directly, but set the expiryDelta to earlyExpiry,
// so the behavior matches what the user expects.
rt.expiryDelta = earlyExpiry
return rt
}
src = rt.new
}
if t != nil {
t.expiryDelta = earlyExpiry
}
return &reuseTokenSource{
t: t,
new: src,
expiryDelta: earlyExpiry,
}
}

33
vendor/golang.org/x/oauth2/token.go generated vendored
View File

@ -16,10 +16,10 @@ import (
"golang.org/x/oauth2/internal"
)
// expiryDelta determines how earlier a token should be considered
// defaultExpiryDelta determines how earlier a token should be considered
// expired than its actual expiration time. It is used to avoid late
// expirations due to client-server time mismatches.
const expiryDelta = 10 * time.Second
const defaultExpiryDelta = 10 * time.Second
// Token represents the credentials used to authorize
// the requests to access protected resources on the OAuth 2.0
@ -52,6 +52,11 @@ type Token struct {
// raw optionally contains extra metadata from the server
// when updating a token.
raw interface{}
// expiryDelta is used to calculate when a token is considered
// expired, by subtracting from Expiry. If zero, defaultExpiryDelta
// is used.
expiryDelta time.Duration
}
// Type returns t.TokenType if non-empty, else "Bearer".
@ -127,6 +132,11 @@ func (t *Token) expired() bool {
if t.Expiry.IsZero() {
return false
}
expiryDelta := defaultExpiryDelta
if t.expiryDelta != 0 {
expiryDelta = t.expiryDelta
}
return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
}
@ -165,14 +175,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error)
}
// RetrieveError is the error returned when the token endpoint returns a
// non-2XX HTTP status code.
// non-2XX HTTP status code or populates RFC 6749's 'error' parameter.
// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2
type RetrieveError struct {
Response *http.Response
// Body is the body that was consumed by reading Response.Body.
// It may be truncated.
Body []byte
// ErrorCode is RFC 6749's 'error' parameter.
ErrorCode string
// ErrorDescription is RFC 6749's 'error_description' parameter.
ErrorDescription string
// ErrorURI is RFC 6749's 'error_uri' parameter.
ErrorURI string
}
func (r *RetrieveError) Error() string {
if r.ErrorCode != "" {
s := fmt.Sprintf("oauth2: %q", r.ErrorCode)
if r.ErrorDescription != "" {
s += fmt.Sprintf(" %q", r.ErrorDescription)
}
if r.ErrorURI != "" {
s += fmt.Sprintf(" %q", r.ErrorURI)
}
return s
}
return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
}

Some files were not shown because too many files have changed in this diff Show More