vendor c/common@852ca05a1f
Also force an update of c/image to prevent a downgrade. Signed-off-by: Valentin Rothberg <vrothberg@redhat.com>
This commit is contained in:
parent
bac20d1917
commit
adacd3b127
19
go.mod
19
go.mod
|
@ -12,9 +12,9 @@ require (
|
|||
github.com/containernetworking/cni v1.1.2
|
||||
github.com/containernetworking/plugins v1.2.0
|
||||
github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7
|
||||
github.com/containers/common v0.51.0
|
||||
github.com/containers/common v0.51.1-0.20230221111605-852ca05a1fbb
|
||||
github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/image/v5 v5.24.1-0.20230214095352-ae0edb7a4261
|
||||
github.com/containers/image/v5 v5.24.2-0.20230221092641-10858b2058d8
|
||||
github.com/containers/ocicrypt v1.1.7
|
||||
github.com/containers/psgo v1.8.0
|
||||
github.com/containers/storage v1.45.3
|
||||
|
@ -124,7 +124,7 @@ require (
|
|||
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
|
@ -147,12 +147,12 @@ require (
|
|||
github.com/rivo/uniseg v0.4.3 // indirect
|
||||
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/sigstore/fulcio v1.0.0 // indirect
|
||||
github.com/sigstore/fulcio v1.1.0 // indirect
|
||||
github.com/sigstore/rekor v1.0.1 // indirect
|
||||
github.com/sigstore/sigstore v1.5.1 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.9.1 // indirect
|
||||
github.com/sylabs/sif/v2 v2.9.2 // indirect
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||
github.com/theupdateframework/go-tuf v0.5.2 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
|
@ -163,12 +163,13 @@ require (
|
|||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/oauth2 v0.5.0 // indirect
|
||||
golang.org/x/tools v0.4.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
|
||||
google.golang.org/grpc v1.51.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc // indirect
|
||||
google.golang.org/grpc v1.53.0 // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
|
|
50
go.sum
50
go.sum
|
@ -135,7 +135,7 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
|
|||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0 h1:txB5jvhzUCSiiQmqmMWpo5CEB7Gj/Hq5Xqi7eaPl8ko=
|
||||
github.com/checkpoint-restore/checkpointctl v0.0.0-20220321135231-33f4a66335f0/go.mod h1:67kWC1PXQLR3lM/mmNnu3Kzn7K4TSWZAGUuQP1JSngk=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.2.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||
|
@ -261,12 +261,12 @@ github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP
|
|||
github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
|
||||
github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7 h1:GmQhTfsGuYgGfuYWEF4Ed+rEvlSWRmxisLBL2J8rCb4=
|
||||
github.com/containers/buildah v1.29.1-0.20230201192322-e56eb25575c7/go.mod h1:sFvOi+WMtMtrkxx1Dn8EhF5/ddXNyC1f5LAj4ZGzjAs=
|
||||
github.com/containers/common v0.51.0 h1:Ax4YHNTG8cEPHZJcMYRoP7sfBgOISceeyOvmZzmSucg=
|
||||
github.com/containers/common v0.51.0/go.mod h1:3W2WIdalgQfrsX/T5tjX+6CxgT3ThJVN2G9sNuFjuCM=
|
||||
github.com/containers/common v0.51.1-0.20230221111605-852ca05a1fbb h1:F4gLGDX/R8sheL+KVD4XFkJ24QqJ9y+paKtHvtGz5HE=
|
||||
github.com/containers/common v0.51.1-0.20230221111605-852ca05a1fbb/go.mod h1:nnffag2+LETTBL5tYfq1TSd1j45bnsqG+Yeu9Ib07sc=
|
||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||
github.com/containers/image/v5 v5.24.1-0.20230214095352-ae0edb7a4261 h1:/MH0DESjLYCdhoW7BHsXkhNL8wzqG2tEhsZcPbrqN7c=
|
||||
github.com/containers/image/v5 v5.24.1-0.20230214095352-ae0edb7a4261/go.mod h1:Ct2hpzGz3SQ5VOlDJ1uFp1N4ux798yMW/hNvDL361DU=
|
||||
github.com/containers/image/v5 v5.24.2-0.20230221092641-10858b2058d8 h1:v+nxA4BL/y0OUZRUgxPZB7Ac5rK2vdgZ3/0ZZN//zfY=
|
||||
github.com/containers/image/v5 v5.24.2-0.20230221092641-10858b2058d8/go.mod h1:8Mrhb3iwzDVdzKRHTPiCb8ZVxurvRyPY6k0hQEVSAOI=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||
|
@ -465,6 +465,7 @@ github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4
|
|||
github.com/go-rod/rod v0.112.3 h1:xbSaA9trZ8v/+eJRGOM6exK1RCsLPwwnzA78vpES0gk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
|
||||
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
|
||||
|
@ -584,6 +585,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
|
@ -719,8 +721,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8=
|
||||
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA=
|
||||
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I=
|
||||
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0=
|
||||
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
|
@ -827,7 +829,7 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv
|
|||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.8.1 h1:xFTEVwOFa1D/Ty24Ws1npBWkDYEV9BqZrsDxVrVkrrU=
|
||||
github.com/onsi/ginkgo/v2 v2.8.3 h1:RpbK1G8nWPNaCVFBWsOGnEQQGgASi6b8fxcWBvDYjxQ=
|
||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
|
@ -917,7 +919,7 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
|||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
|
@ -929,7 +931,7 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
|
|||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
|
||||
|
@ -958,8 +960,8 @@ github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c
|
|||
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sigstore/fulcio v1.0.0 h1:hBZW6qg9GXTtCX8jOg1hmyjYLrmsEKZGeMwAbW3XNEg=
|
||||
github.com/sigstore/fulcio v1.0.0/go.mod h1:j4MzLxX/Be0rHYh3JF2dgMorkWGzEMHBqIHwFU8I/Rw=
|
||||
github.com/sigstore/fulcio v1.1.0 h1:mzzJ05Ccu8Y2inyioklNvc8MpzlGHxu8YqNeTm0dHfU=
|
||||
github.com/sigstore/fulcio v1.1.0/go.mod h1:zv1ZQTXZbUwQdRwajlQksc34pRas+2aZYpIZoQBNev8=
|
||||
github.com/sigstore/rekor v1.0.1 h1:rcESXSNkAPRWFYZel9rarspdvneET60F2ngNkadi89c=
|
||||
github.com/sigstore/rekor v1.0.1/go.mod h1:ecTKdZWGWqE1pl3U1m1JebQJLU/hSjD9vYHOmHQ7w4g=
|
||||
github.com/sigstore/sigstore v1.5.1 h1:iUou0QJW8eQKMUkTXbFyof9ZOblDtfaW2Sn2+QI8Tcs=
|
||||
|
@ -1025,8 +1027,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
|||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/sylabs/sif/v2 v2.9.1 h1:LxF9EcH4hmwSqDBdRv9Tt57YVkvV9rDu66AA/nmns2Y=
|
||||
github.com/sylabs/sif/v2 v2.9.1/go.mod h1:10lbqUw/uptKH4Z6dRDZl+9Iz7jMiFMDE99eHRJDwOs=
|
||||
github.com/sylabs/sif/v2 v2.9.2 h1:i8YxBON4FOdqiIBX/bbY4IiHZTVJLlyA6yx9TJyQRyo=
|
||||
github.com/sylabs/sif/v2 v2.9.2/go.mod h1:YSXiKUZTG7pcFpAMwQxdrVV4tVRuv1MBVBX3br1PkTg=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
|
@ -1197,8 +1199,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -1487,8 +1489,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
|||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1570,8 +1572,8 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY=
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc h1:ijGwO+0vL2hJt5gaygqP2j6PfflOBrRot0IczKbmtio=
|
||||
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
|
@ -1597,8 +1599,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
|||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
|
||||
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
|
||||
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1629,6 +1631,8 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
|
|||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.1 h1:qEzJlIDmG9q5VO0M/o8tGS65QMHMS1w01TQJB1VPJ4U=
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.1/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
|
@ -1698,7 +1702,7 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
|||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||
k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
|
|
|
@ -71,8 +71,8 @@ load helpers
|
|||
run_podman --events-backend=file events --stream=false --filter type=image --since $t0
|
||||
is "$output" ".*image push $imageID dir:$pushedDir
|
||||
.*image save $imageID $tarball
|
||||
.*image loadfromarchive *$tarball
|
||||
.*image pull *docker-archive:$tarball
|
||||
.*image loadfromarchive $imageID $tarball
|
||||
.*image pull $imageID docker-archive:$tarball
|
||||
.*image tag $imageID $tag
|
||||
.*image untag $imageID $tag:latest
|
||||
.*image tag $imageID $tag
|
||||
|
|
|
@ -24,10 +24,6 @@ type LoadOptions struct {
|
|||
func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ([]string, error) {
|
||||
logrus.Debugf("Loading image from %q", path)
|
||||
|
||||
if r.eventChannel != nil {
|
||||
defer r.writeEvent(&Event{ID: "", Name: path, Time: time.Now(), Type: EventTypeImageLoad})
|
||||
}
|
||||
|
||||
if options == nil {
|
||||
options = &LoadOptions{}
|
||||
}
|
||||
|
@ -81,7 +77,10 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) (
|
|||
} {
|
||||
loadedImages, transportName, err := f()
|
||||
if err == nil {
|
||||
return loadedImages, nil
|
||||
if r.eventChannel != nil {
|
||||
err = r.writeLoadEvents(path, loadedImages)
|
||||
}
|
||||
return loadedImages, err
|
||||
}
|
||||
logrus.Debugf("Error loading %s (%s): %v", path, transportName, err)
|
||||
loadErrors = append(loadErrors, fmt.Errorf("%s: %v", transportName, err))
|
||||
|
@ -98,6 +97,18 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) (
|
|||
return nil, loadError
|
||||
}
|
||||
|
||||
// writeLoadEvents writes the events of the loaded image.
|
||||
func (r *Runtime) writeLoadEvents(path string, loadedImages []string) error {
|
||||
for _, name := range loadedImages {
|
||||
image, _, err := r.LookupImage(name, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("locating pulled image %q name in containers storage: %w", name, err)
|
||||
}
|
||||
r.writeEvent(&Event{ID: image.ID(), Name: path, Time: time.Now(), Type: EventTypeImageLoad})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadMultiImageDockerArchive loads the docker archive specified by ref. In
|
||||
// case the path@reference notation was used, only the specified image will be
|
||||
// loaded. Otherwise, all images will be loaded.
|
||||
|
|
|
@ -116,10 +116,6 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
|
|||
return nil, fmt.Errorf("pulling all tags is not supported for %s transport", ref.Transport().Name())
|
||||
}
|
||||
|
||||
if r.eventChannel != nil {
|
||||
defer r.writeEvent(&Event{ID: "", Name: name, Time: time.Now(), Type: EventTypeImagePull})
|
||||
}
|
||||
|
||||
// Some callers may set the platform via the system context at creation
|
||||
// time of the runtime. We need this information to decide whether we
|
||||
// need to enforce pulling from a registry (see
|
||||
|
@ -160,10 +156,10 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
|
|||
}
|
||||
|
||||
localImages := []*Image{}
|
||||
for _, name := range pulledImages {
|
||||
image, _, err := r.LookupImage(name, nil)
|
||||
for _, iName := range pulledImages {
|
||||
image, _, err := r.LookupImage(iName, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("locating pulled image %q name in containers storage: %w", name, err)
|
||||
return nil, fmt.Errorf("locating pulled image %q name in containers storage: %w", iName, err)
|
||||
}
|
||||
|
||||
// Note that we can ignore the 2nd return value here. Some
|
||||
|
@ -184,6 +180,11 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
|
|||
}
|
||||
}
|
||||
|
||||
if r.eventChannel != nil {
|
||||
// Note that we use the input name here to preserve the transport data.
|
||||
r.writeEvent(&Event{ID: image.ID(), Name: name, Time: time.Now(), Type: EventTypeImagePull})
|
||||
}
|
||||
|
||||
localImages = append(localImages, image)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package libimage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -54,6 +55,19 @@ type SearchOptions struct {
|
|||
NoTrunc bool
|
||||
// Authfile is the path to the authentication file.
|
||||
Authfile string
|
||||
// Path to the certificates directory.
|
||||
CertDirPath string
|
||||
// Username to use when authenticating at a container registry.
|
||||
Username string
|
||||
// Password to use when authenticating at a container registry.
|
||||
Password string
|
||||
// Credentials is an alternative way to specify credentials in format
|
||||
// "username[:password]". Cannot be used in combination with
|
||||
// Username/Password.
|
||||
Credentials string
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `json:"identitytoken,omitempty"`
|
||||
// InsecureSkipTLSVerify allows to skip TLS verification.
|
||||
InsecureSkipTLSVerify types.OptionalBool
|
||||
// ListTags returns the search result with available tags
|
||||
|
@ -201,6 +215,35 @@ func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry stri
|
|||
sys.AuthFilePath = options.Authfile
|
||||
}
|
||||
|
||||
if options.CertDirPath != "" {
|
||||
sys.DockerCertPath = options.CertDirPath
|
||||
}
|
||||
|
||||
authConf := &types.DockerAuthConfig{IdentityToken: options.IdentityToken}
|
||||
if options.Username != "" {
|
||||
if options.Credentials != "" {
|
||||
return nil, errors.New("username/password cannot be used with credentials")
|
||||
}
|
||||
authConf.Username = options.Username
|
||||
authConf.Password = options.Password
|
||||
}
|
||||
|
||||
if options.Credentials != "" {
|
||||
split := strings.SplitN(options.Credentials, ":", 2)
|
||||
switch len(split) {
|
||||
case 1:
|
||||
authConf.Username = split[0]
|
||||
default:
|
||||
authConf.Username = split[0]
|
||||
authConf.Password = split[1]
|
||||
}
|
||||
}
|
||||
// We should set the authConf unless a token was set. That's especially
|
||||
// useful for Podman's remote API.
|
||||
if options.IdentityToken != "" {
|
||||
sys.DockerAuthConfig = authConf
|
||||
}
|
||||
|
||||
if options.ListTags {
|
||||
results, err := searchRepositoryTags(ctx, sys, registry, term, options)
|
||||
if err != nil {
|
||||
|
|
|
@ -86,6 +86,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) (
|
|||
|
||||
switch newNetwork.Driver {
|
||||
case types.BridgeNetworkDriver:
|
||||
internalutil.MapDockerBridgeDriverOptions(newNetwork)
|
||||
err = internalutil.CreateBridge(n, newNetwork, usedNetworks, n.defaultsubnetPools)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -44,7 +44,7 @@ type cniNetwork struct {
|
|||
isMachine bool
|
||||
|
||||
// lock is a internal lock for critical operations
|
||||
lock lockfile.Locker
|
||||
lock *lockfile.LockFile
|
||||
|
||||
// modTime is the timestamp when the config dir was modified
|
||||
modTime time.Time
|
||||
|
|
|
@ -129,3 +129,19 @@ func GetFreeIPv6NetworkSubnet(usedNetworks []*net.IPNet) (*types.Subnet, error)
|
|||
}
|
||||
return nil, errors.New("failed to get random ipv6 subnet")
|
||||
}
|
||||
|
||||
// Map docker driver network options to podman network options
|
||||
func MapDockerBridgeDriverOptions(n *types.Network) {
|
||||
// validate the given options
|
||||
for key, value := range n.Options {
|
||||
switch key {
|
||||
case "com.docker.network.driver.mtu":
|
||||
n.Options[types.MTUOption] = value
|
||||
delete(n.Options, "com.docker.network.driver.mtu")
|
||||
|
||||
case "com.docker.network.bridge.name":
|
||||
n.NetworkInterface = value
|
||||
delete(n.Options, "com.docker.network.bridge.name")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -155,6 +155,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
|||
|
||||
switch newNetwork.Driver {
|
||||
case types.BridgeNetworkDriver:
|
||||
internalutil.MapDockerBridgeDriverOptions(newNetwork)
|
||||
err = internalutil.CreateBridge(n, newNetwork, usedNetworks, n.defaultsubnetPools)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -186,6 +187,7 @@ func (n *netavarkNetwork) networkCreate(newNetwork *types.Network, defaultNet bo
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported bridge network option %s", key)
|
||||
}
|
||||
|
@ -251,9 +253,6 @@ func createMacvlan(network *types.Network) error {
|
|||
// we already validated the drivers before so we just have to set the default here
|
||||
switch network.IPAMOptions[types.Driver] {
|
||||
case "":
|
||||
if len(network.Subnets) == 0 {
|
||||
return fmt.Errorf("macvlan driver needs at least one subnet specified, DHCP is not yet supported with netavark")
|
||||
}
|
||||
network.IPAMOptions[types.Driver] = types.HostLocalIPAMDriver
|
||||
case types.HostLocalIPAMDriver:
|
||||
if len(network.Subnets) == 0 {
|
||||
|
@ -353,13 +352,11 @@ func (n *netavarkNetwork) NetworkInspect(nameOrID string) (types.Network, error)
|
|||
func validateIPAMDriver(n *types.Network) error {
|
||||
ipamDriver := n.IPAMOptions[types.Driver]
|
||||
switch ipamDriver {
|
||||
case "", types.HostLocalIPAMDriver:
|
||||
case "", types.HostLocalIPAMDriver, types.DHCPIPAMDriver:
|
||||
case types.NoneIPAMDriver:
|
||||
if len(n.Subnets) > 0 {
|
||||
return errors.New("none ipam driver is set but subnets are given")
|
||||
}
|
||||
case types.DHCPIPAMDriver:
|
||||
return errors.New("dhcp ipam driver is not yet supported with netavark")
|
||||
default:
|
||||
return fmt.Errorf("unsupported ipam driver %q", ipamDriver)
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ type netavarkNetwork struct {
|
|||
syslog bool
|
||||
|
||||
// lock is a internal lock for critical operations
|
||||
lock lockfile.Locker
|
||||
lock *lockfile.LockFile
|
||||
|
||||
// modTime is the timestamp when the config dir was modified
|
||||
modTime time.Time
|
||||
|
|
|
@ -143,16 +143,16 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
|
|||
}
|
||||
|
||||
if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil {
|
||||
// Write the new credentials to the authfile
|
||||
desc, err := config.SetCredentials(systemContext, key, username, password)
|
||||
if err != nil {
|
||||
return err
|
||||
if !opts.NoWriteBack {
|
||||
// Write the new credentials to the authfile
|
||||
desc, err := config.SetCredentials(systemContext, key, username, password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.Verbose {
|
||||
fmt.Fprintln(opts.Stdout, "Used: ", desc)
|
||||
}
|
||||
}
|
||||
if opts.Verbose {
|
||||
fmt.Fprintln(opts.Stdout, "Used: ", desc)
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
fmt.Fprintln(opts.Stdout, "Login Succeeded!")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ type LoginOptions struct {
|
|||
Stdin io.Reader // set to os.Stdin
|
||||
Stdout io.Writer // set to os.Stdout
|
||||
AcceptUnspecifiedRegistry bool // set to true if allows login with unspecified registry
|
||||
NoWriteBack bool // set to true to not write the credentials to the authfile/cred helpers
|
||||
}
|
||||
|
||||
// LogoutOptions represents the results for flags in logout
|
||||
|
|
|
@ -251,6 +251,9 @@ type EngineConfig struct {
|
|||
// in containers-registries.conf(5).
|
||||
CompatAPIEnforceDockerHub bool `toml:"compat_api_enforce_docker_hub,omitempty"`
|
||||
|
||||
// DBBackend is the database backend to be used by Podman.
|
||||
DBBackend string `toml:"database_backend,omitempty"`
|
||||
|
||||
// DetachKeys is the sequence of keys used to detach a container.
|
||||
DetachKeys string `toml:"detach_keys,omitempty"`
|
||||
|
||||
|
@ -609,7 +612,7 @@ type MachineConfig struct {
|
|||
CPUs uint64 `toml:"cpus,omitempty,omitzero"`
|
||||
// DiskSize is the size of the disk in GB created when init-ing a podman-machine VM
|
||||
DiskSize uint64 `toml:"disk_size,omitempty,omitzero"`
|
||||
// MachineImage is the image used when init-ing a podman-machine VM
|
||||
// Image is the image used when init-ing a podman-machine VM
|
||||
Image string `toml:"image,omitempty"`
|
||||
// Memory in MB a machine is created with.
|
||||
Memory uint64 `toml:"memory,omitempty,omitzero"`
|
||||
|
@ -617,6 +620,8 @@ type MachineConfig struct {
|
|||
User string `toml:"user,omitempty"`
|
||||
// Volumes are host directories mounted into the VM by default.
|
||||
Volumes []string `toml:"volumes"`
|
||||
// Provider is the virtualization provider used to run podman-machine VM
|
||||
Provider string `toml:"provider,omitempty"`
|
||||
}
|
||||
|
||||
// Destination represents destination for remote service
|
||||
|
@ -896,6 +901,11 @@ func (c *EngineConfig) Validate() error {
|
|||
if _, err := ValidatePullPolicy(pullPolicy); err != nil {
|
||||
return fmt.Errorf("invalid pull type from containers.conf %q: %w", c.PullPolicy, err)
|
||||
}
|
||||
|
||||
if _, err := ParseDBBackend(c.DBBackend); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1330,9 +1340,13 @@ func (c *Config) FindHelperBinary(name string, searchPATH bool) (string, error)
|
|||
path = filepath.Join(bindirPath, strings.TrimPrefix(path, bindirPrefix+string(filepath.Separator)))
|
||||
}
|
||||
}
|
||||
fullpath := filepath.Join(path, name)
|
||||
if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() {
|
||||
return fullpath, nil
|
||||
// Absolute path will force exec.LookPath to check for binary existence instead of lookup everywhere in PATH
|
||||
if abspath, err := filepath.Abs(filepath.Join(path, name)); err == nil {
|
||||
// exec.LookPath from absolute path on Unix is equal to os.Stat + IsNotDir + check for executable bits in FileMode
|
||||
// exec.LookPath from absolute path on Windows is equal to os.Stat + IsNotDir for `file.ext` or loops through extensions from PATHEXT for `file`
|
||||
if lp, err := exec.LookPath(abspath); err == nil {
|
||||
return lp, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if searchPATH {
|
||||
|
|
|
@ -693,7 +693,7 @@ default_sysctls = [
|
|||
# "https://example.com/linux/amd64/foobar.ami" on a Linux AMD machine.
|
||||
# The default value is `testing`.
|
||||
#
|
||||
# image = "testing"
|
||||
#image = "testing"
|
||||
|
||||
# Memory in MB a machine is created with.
|
||||
#
|
||||
|
@ -709,10 +709,15 @@ default_sysctls = [
|
|||
# the source and destination. An optional third field `:ro` can be used to
|
||||
# tell the container engines to mount the volume readonly.
|
||||
#
|
||||
# volumes = [
|
||||
#volumes = [
|
||||
# "$HOME:$HOME",
|
||||
#]
|
||||
|
||||
# Virtualization provider used to run Podman machine.
|
||||
# If it is empty or commented out, the default provider will be used.
|
||||
#
|
||||
#provider = ""
|
||||
|
||||
# The [machine] table MUST be the last entry in this file.
|
||||
# (Unless another table is added)
|
||||
# TOML does not provide a way to end a table other than a further table being
|
||||
|
|
|
@ -626,10 +626,15 @@ default_sysctls = [
|
|||
# the source and destination. An optional third field `:ro` can be used to
|
||||
# tell the container engines to mount the volume readonly.
|
||||
#
|
||||
# volumes = [
|
||||
#volumes = [
|
||||
# "$HOME:$HOME",
|
||||
#]
|
||||
|
||||
# Virtualization provider used to run Podman machine.
|
||||
# If it is empty or commented out, the default provider will be used.
|
||||
#
|
||||
#provider = ""
|
||||
|
||||
# The [machine] table MUST be the last entry in this file.
|
||||
# (Unless another table is added)
|
||||
# TOML does not provide a way to end a table other than a further table being
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
package config
|
||||
|
||||
import "fmt"
|
||||
|
||||
// DBBackend determines which supported database backend Podman should use.
|
||||
type DBBackend int
|
||||
|
||||
const (
|
||||
// Unsupported database backend. Used as a sane base value for the type.
|
||||
DBBackendUnsupported DBBackend = iota
|
||||
// BoltDB backend.
|
||||
DBBackendBoltDB
|
||||
// SQLite backend.
|
||||
DBBackendSQLite
|
||||
|
||||
stringBoltDB = "boltdb"
|
||||
stringSQLite = "sqlite"
|
||||
)
|
||||
|
||||
// String returns the DBBackend's string representation.
|
||||
func (d DBBackend) String() string {
|
||||
switch d {
|
||||
case DBBackendBoltDB:
|
||||
return stringBoltDB
|
||||
case DBBackendSQLite:
|
||||
return stringSQLite
|
||||
default:
|
||||
return fmt.Sprintf("unsupported database backend: %d", d)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate returns whether the DBBackend is supported.
|
||||
func (d DBBackend) Validate() error {
|
||||
switch d {
|
||||
case DBBackendBoltDB, DBBackendSQLite:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unsupported database backend: %d", d)
|
||||
}
|
||||
}
|
||||
|
||||
// ParseDBBackend parses the specified string into a DBBackend.
|
||||
// An error is return for unsupported backends.
|
||||
func ParseDBBackend(raw string) (DBBackend, error) {
|
||||
// NOTE: this function should be used for parsing the user-specified
|
||||
// values on Podman's CLI.
|
||||
switch raw {
|
||||
case stringBoltDB:
|
||||
return DBBackendBoltDB, nil
|
||||
case stringSQLite:
|
||||
return DBBackendSQLite, nil
|
||||
default:
|
||||
return DBBackendUnsupported, fmt.Errorf("unsupported database backend: %q", raw)
|
||||
}
|
||||
}
|
||||
|
||||
// DBBackend returns the configured database backend.
|
||||
func (c *Config) DBBackend() (DBBackend, error) {
|
||||
return ParseDBBackend(c.Engine.DBBackend)
|
||||
}
|
|
@ -104,6 +104,8 @@ const (
|
|||
CgroupfsCgroupsManager = "cgroupfs"
|
||||
// DefaultApparmorProfile specifies the default apparmor profile for the container.
|
||||
DefaultApparmorProfile = apparmor.Profile
|
||||
// DefaultDBBackend specifies the default database backend to be used by Podman.
|
||||
DefaultDBBackend = DBBackendBoltDB
|
||||
// DefaultHostsFile is the default path to the hosts file.
|
||||
DefaultHostsFile = "/etc/hosts"
|
||||
// SystemdCgroupsManager represents systemd native cgroup manager.
|
||||
|
@ -387,6 +389,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) {
|
|||
"/run/current-system/sw/bin/conmonrs",
|
||||
}
|
||||
c.PullPolicy = DefaultPullPolicy
|
||||
c.DBBackend = stringBoltDB
|
||||
c.RuntimeSupportsJSON = []string{
|
||||
"crun",
|
||||
"runc",
|
||||
|
|
|
@ -26,7 +26,7 @@ type Driver struct {
|
|||
// secretsDataFilePath is the path to the secretsfile
|
||||
secretsDataFilePath string
|
||||
// lockfile is the filedriver lockfile
|
||||
lockfile lockfile.Locker
|
||||
lockfile *lockfile.LockFile
|
||||
}
|
||||
|
||||
// NewDriver creates a new file driver.
|
||||
|
|
|
@ -62,7 +62,7 @@ type SecretsManager struct {
|
|||
// secretsPath is the path to the db file where secrets are stored
|
||||
secretsDBPath string
|
||||
// lockfile is the locker for the secrets file
|
||||
lockfile lockfile.Locker
|
||||
lockfile *lockfile.LockFile
|
||||
// db is an in-memory cache of the database of secrets
|
||||
db *db
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package version
|
||||
|
||||
// Version is the version of the build.
|
||||
const Version = "0.51.0"
|
||||
const Version = "0.51.1-dev"
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
|
@ -15,7 +13,7 @@ type archiveImageSource struct {
|
|||
|
||||
// newImageSource returns a types.ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
|
||||
func newImageSource(sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
|
||||
var archive *tarfile.Reader
|
||||
var closeArchive bool
|
||||
if ref.archiveReader != nil {
|
||||
|
|
|
@ -190,7 +190,7 @@ func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemConte
|
|||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, sys, ref)
|
||||
return newImageSource(sys, ref)
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
|
|
|
@ -229,12 +229,12 @@ func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL stri
|
|||
logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress)
|
||||
return nil
|
||||
}
|
||||
if br.lastRetryTime == (time.Time{}) || msSinceLastRetry >= bodyReaderMSSinceLastRetry {
|
||||
if br.lastRetryTime == (time.Time{}) {
|
||||
logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr)
|
||||
} else {
|
||||
logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %.3f ms…", redactedURL, originalErr, msSinceLastRetry)
|
||||
}
|
||||
if br.lastRetryTime == (time.Time{}) {
|
||||
logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr)
|
||||
return nil
|
||||
}
|
||||
if msSinceLastRetry >= bodyReaderMSSinceLastRetry {
|
||||
logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %.3f ms…", redactedURL, originalErr, msSinceLastRetry)
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Not reconnecting to %s: insufficient progress %d / time since last retry %.3f ms", redactedURL, progress, msSinceLastRetry)
|
||||
|
|
|
@ -994,7 +994,7 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
|
|||
return nil, err
|
||||
}
|
||||
defer reader.Close()
|
||||
payload, err := iolimits.ReadAtMost(reader, iolimits.MaxSignatureBodySize)
|
||||
payload, err := iolimits.ReadAtMost(reader, maxSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ type Signature interface {
|
|||
blobChunk() ([]byte, error)
|
||||
}
|
||||
|
||||
// BlobChunk returns a representation of sig as a []byte, suitable for long-term storage.
|
||||
// Blob returns a representation of sig as a []byte, suitable for long-term storage.
|
||||
func Blob(sig Signature) ([]byte, error) {
|
||||
chunk, err := sig.blobChunk()
|
||||
if err != nil {
|
||||
|
@ -79,7 +79,7 @@ func FromBlob(blob []byte) (Signature, error) {
|
|||
case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)):
|
||||
return SimpleSigningFromBlob(blobChunk), nil
|
||||
case bytes.Equal(formatBytes, []byte(SigstoreFormat)):
|
||||
return SigstoreFromBlobChunk(blobChunk)
|
||||
return sigstoreFromBlobChunk(blobChunk)
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes))
|
||||
}
|
||||
|
|
|
@ -50,8 +50,8 @@ func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, u
|
|||
}
|
||||
}
|
||||
|
||||
// SigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
|
||||
func SigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
|
||||
// sigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
|
||||
func sigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
|
||||
var v sigstoreJSONRepresentation
|
||||
if err := json.Unmarshal(blobChunk, &v); err != nil {
|
||||
return Sigstore{}, err
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
internalManifest "github.com/containers/image/v5/internal/manifest"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
ociencspec "github.com/containers/ocicrypt/spec"
|
||||
|
@ -197,7 +196,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
|
|||
// Most software calling this without human intervention is going to expect the values to be realistic and relevant,
|
||||
// and is probably better served by failing; we can always re-visit that later if we fail now, but
|
||||
// if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later.
|
||||
return nil, internalManifest.NewNonImageArtifactError(m.Config.MediaType)
|
||||
return nil, manifest.NewNonImageArtifactError(m.Config.MediaType)
|
||||
}
|
||||
|
||||
config, err := configGetter(m.ConfigInfo())
|
||||
|
@ -248,7 +247,7 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) {
|
|||
// (The only known caller of ImageID is storage/storageImageDestination.computeID,
|
||||
// which can’t work with non-image artifacts.)
|
||||
if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
|
||||
return "", internalManifest.NewNonImageArtifactError(m.Config.MediaType)
|
||||
return "", manifest.NewNonImageArtifactError(m.Config.MediaType)
|
||||
}
|
||||
|
||||
if err := m.Config.Digest.Validate(); err != nil {
|
||||
|
|
|
@ -211,7 +211,7 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
|
||||
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configClusterInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) {
|
|||
// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
|
||||
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
|
||||
// 3. load the ~/.kubernetes_auth file as a default
|
||||
func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) {
|
||||
func getServerIdentificationPartialConfig(configClusterInfo clientcmdCluster) (*restConfig, error) {
|
||||
mergedConfig := &restConfig{}
|
||||
|
||||
// configClusterInfo holds the information identify the server provided by .kubeconfig
|
||||
|
|
|
@ -69,7 +69,7 @@ type manifestSchema struct {
|
|||
}
|
||||
|
||||
type ostreeImageDestination struct {
|
||||
compat impl.Compat
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.AlwaysSupportsSignatures
|
||||
|
|
|
@ -91,7 +91,6 @@ func NewTransport() *http.Transport {
|
|||
direct := &net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}
|
||||
tr := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
|
|
|
@ -21,14 +21,14 @@ const (
|
|||
|
||||
// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature)
|
||||
type UntrustedSigstorePayload struct {
|
||||
UntrustedDockerManifestDigest digest.Digest
|
||||
UntrustedDockerReference string // FIXME: more precise type?
|
||||
UntrustedCreatorID *string
|
||||
untrustedDockerManifestDigest digest.Digest
|
||||
untrustedDockerReference string // FIXME: more precise type?
|
||||
untrustedCreatorID *string
|
||||
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
|
||||
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
|
||||
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
|
||||
// we would add another field, UntrustedTimestampNS int64.
|
||||
UntrustedTimestamp *int64
|
||||
untrustedTimestamp *int64
|
||||
}
|
||||
|
||||
// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with
|
||||
|
@ -39,10 +39,10 @@ func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerRefer
|
|||
creatorID := "containers/image " + version.Version
|
||||
timestamp := time.Now().Unix()
|
||||
return UntrustedSigstorePayload{
|
||||
UntrustedDockerManifestDigest: dockerManifestDigest,
|
||||
UntrustedDockerReference: dockerReference,
|
||||
UntrustedCreatorID: &creatorID,
|
||||
UntrustedTimestamp: ×tamp,
|
||||
untrustedDockerManifestDigest: dockerManifestDigest,
|
||||
untrustedDockerReference: dockerReference,
|
||||
untrustedCreatorID: &creatorID,
|
||||
untrustedTimestamp: ×tamp,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,20 +52,20 @@ var _ json.Marshaler = (*UntrustedSigstorePayload)(nil)
|
|||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) {
|
||||
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
|
||||
if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
|
||||
return nil, errors.New("Unexpected empty signature content")
|
||||
}
|
||||
critical := map[string]any{
|
||||
"type": sigstoreSignatureType,
|
||||
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
|
||||
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
|
||||
"image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
|
||||
"identity": map[string]string{"docker-reference": s.untrustedDockerReference},
|
||||
}
|
||||
optional := map[string]any{}
|
||||
if s.UntrustedCreatorID != nil {
|
||||
optional["creator"] = *s.UntrustedCreatorID
|
||||
if s.untrustedCreatorID != nil {
|
||||
optional["creator"] = *s.untrustedCreatorID
|
||||
}
|
||||
if s.UntrustedTimestamp != nil {
|
||||
optional["timestamp"] = *s.UntrustedTimestamp
|
||||
if s.untrustedTimestamp != nil {
|
||||
optional["timestamp"] = *s.untrustedTimestamp
|
||||
}
|
||||
signature := map[string]any{
|
||||
"critical": critical,
|
||||
|
@ -121,14 +121,14 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
|
|||
}
|
||||
}
|
||||
if gotCreatorID {
|
||||
s.UntrustedCreatorID = &creatorID
|
||||
s.untrustedCreatorID = &creatorID
|
||||
}
|
||||
if gotTimestamp {
|
||||
intTimestamp := int64(timestamp)
|
||||
if float64(intTimestamp) != timestamp {
|
||||
return NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
|
||||
}
|
||||
s.UntrustedTimestamp = &intTimestamp
|
||||
s.untrustedTimestamp = &intTimestamp
|
||||
}
|
||||
|
||||
var t string
|
||||
|
@ -150,10 +150,10 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
s.untrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
|
||||
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
|
||||
"docker-reference": &s.UntrustedDockerReference,
|
||||
"docker-reference": &s.untrustedDockerReference,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -191,10 +191,10 @@ func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte,
|
|||
if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil {
|
||||
return nil, NewInvalidSignatureError(err.Error())
|
||||
}
|
||||
if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.UntrustedDockerManifestDigest); err != nil {
|
||||
if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rules.ValidateSignedDockerReference(unmatchedPayload.UntrustedDockerReference); err != nil {
|
||||
if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// SigstorePayloadAcceptanceRules have accepted this value.
|
||||
|
|
|
@ -31,14 +31,14 @@ type Signature struct {
|
|||
|
||||
// untrustedSignature is a parsed content of a signature.
|
||||
type untrustedSignature struct {
|
||||
UntrustedDockerManifestDigest digest.Digest
|
||||
UntrustedDockerReference string // FIXME: more precise type?
|
||||
UntrustedCreatorID *string
|
||||
untrustedDockerManifestDigest digest.Digest
|
||||
untrustedDockerReference string // FIXME: more precise type?
|
||||
untrustedCreatorID *string
|
||||
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
|
||||
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
|
||||
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
|
||||
// we would add another field, UntrustedTimestampNS int64.
|
||||
UntrustedTimestamp *int64
|
||||
untrustedTimestamp *int64
|
||||
}
|
||||
|
||||
// UntrustedSignatureInformation is information available in an untrusted signature.
|
||||
|
@ -65,10 +65,10 @@ func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference s
|
|||
creatorID := "atomic " + version.Version
|
||||
timestamp := time.Now().Unix()
|
||||
return untrustedSignature{
|
||||
UntrustedDockerManifestDigest: dockerManifestDigest,
|
||||
UntrustedDockerReference: dockerReference,
|
||||
UntrustedCreatorID: &creatorID,
|
||||
UntrustedTimestamp: ×tamp,
|
||||
untrustedDockerManifestDigest: dockerManifestDigest,
|
||||
untrustedDockerReference: dockerReference,
|
||||
untrustedCreatorID: &creatorID,
|
||||
untrustedTimestamp: ×tamp,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,20 +78,20 @@ var _ json.Marshaler = (*untrustedSignature)(nil)
|
|||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (s untrustedSignature) MarshalJSON() ([]byte, error) {
|
||||
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
|
||||
if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
|
||||
return nil, errors.New("Unexpected empty signature content")
|
||||
}
|
||||
critical := map[string]any{
|
||||
"type": signatureType,
|
||||
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
|
||||
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
|
||||
"image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
|
||||
"identity": map[string]string{"docker-reference": s.untrustedDockerReference},
|
||||
}
|
||||
optional := map[string]any{}
|
||||
if s.UntrustedCreatorID != nil {
|
||||
optional["creator"] = *s.UntrustedCreatorID
|
||||
if s.untrustedCreatorID != nil {
|
||||
optional["creator"] = *s.untrustedCreatorID
|
||||
}
|
||||
if s.UntrustedTimestamp != nil {
|
||||
optional["timestamp"] = *s.UntrustedTimestamp
|
||||
if s.untrustedTimestamp != nil {
|
||||
optional["timestamp"] = *s.untrustedTimestamp
|
||||
}
|
||||
signature := map[string]any{
|
||||
"critical": critical,
|
||||
|
@ -144,14 +144,14 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
|||
return err
|
||||
}
|
||||
if gotCreatorID {
|
||||
s.UntrustedCreatorID = &creatorID
|
||||
s.untrustedCreatorID = &creatorID
|
||||
}
|
||||
if gotTimestamp {
|
||||
intTimestamp := int64(timestamp)
|
||||
if float64(intTimestamp) != timestamp {
|
||||
return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
|
||||
}
|
||||
s.UntrustedTimestamp = &intTimestamp
|
||||
s.untrustedTimestamp = &intTimestamp
|
||||
}
|
||||
|
||||
var t string
|
||||
|
@ -173,10 +173,10 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
s.untrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
|
||||
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
|
||||
"docker-reference": &s.UntrustedDockerReference,
|
||||
"docker-reference": &s.untrustedDockerReference,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -229,16 +229,16 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte
|
|||
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
|
||||
return nil, internal.NewInvalidSignatureError(err.Error())
|
||||
}
|
||||
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
|
||||
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil {
|
||||
if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// signatureAcceptanceRules have accepted this value.
|
||||
return &Signature{
|
||||
DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest,
|
||||
DockerReference: unmatchedSignature.UntrustedDockerReference,
|
||||
DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest,
|
||||
DockerReference: unmatchedSignature.untrustedDockerReference,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -269,14 +269,14 @@ func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []
|
|||
}
|
||||
|
||||
var timestamp *time.Time // = nil
|
||||
if untrustedDecodedContents.UntrustedTimestamp != nil {
|
||||
ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0)
|
||||
if untrustedDecodedContents.untrustedTimestamp != nil {
|
||||
ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0)
|
||||
timestamp = &ts
|
||||
}
|
||||
return &UntrustedSignatureInformation{
|
||||
UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest,
|
||||
UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference,
|
||||
UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID,
|
||||
UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest,
|
||||
UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference,
|
||||
UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID,
|
||||
UntrustedTimestamp: timestamp,
|
||||
UntrustedShortKeyIdentifier: shortKeyIdentifier,
|
||||
}, nil
|
||||
|
|
|
@ -159,7 +159,7 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
|||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
||||
info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options)
|
||||
info, err := s.putBlobToPendingFile(stream, blobinfo, &options)
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||
|
||||
// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
|
||||
// The caller must arrange the blob to be eventually committed using s.commitLayer().
|
||||
func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
|
||||
func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
|
||||
// Stores a layer or data blob in our temporary directory, checking that any information
|
||||
// in the blobinfo matches the incoming data.
|
||||
errorBlobInfo := types.BlobInfo{
|
||||
|
@ -203,7 +203,7 @@ func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stre
|
|||
|
||||
diffID := digest.Canonical.Digester()
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
|
||||
_, err = io.Copy(diffID.Hash(), decompressed)
|
||||
decompressed.Close()
|
||||
if err != nil {
|
||||
|
@ -302,7 +302,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
|||
// reflected in the manifest that will be written.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||
reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options)
|
||||
reused, info, err := s.tryReusingBlobAsPending(blobinfo, &options)
|
||||
if err != nil || !reused || options.LayerIndex == nil {
|
||||
return reused, info, err
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
|
||||
// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
|
||||
// The caller must arrange the blob to be eventually committed using s.commitLayer().
|
||||
func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||
func (s *storageImageDestination) tryReusingBlobAsPending(blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||
// lock the entire method as it executes fairly quickly
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
|
|
@ -43,7 +43,7 @@ func (s *storageImageCloser) Size() (int64, error) {
|
|||
|
||||
// newImage creates an image that also knows its size
|
||||
func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) {
|
||||
src, err := newImageSource(ctx, sys, s)
|
||||
src, err := newImageSource(sys, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -277,7 +277,7 @@ func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemCont
|
|||
}
|
||||
|
||||
func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, sys, s)
|
||||
return newImageSource(sys, s)
|
||||
}
|
||||
|
||||
func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
|
||||
|
|
|
@ -44,7 +44,7 @@ type storageImageSource struct {
|
|||
}
|
||||
|
||||
// newImageSource sets up an image for reading.
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
|
||||
func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
|
||||
// First, locate the image.
|
||||
img, err := imageRef.resolveImage(sys)
|
||||
if err != nil {
|
||||
|
|
|
@ -8,7 +8,7 @@ const (
|
|||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 24
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 1
|
||||
VersionPatch = 2
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = "-dev"
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
package core
|
||||
|
||||
import "fmt"
|
||||
|
||||
func newChallenge(challengeType AcmeChallenge, token string) Challenge {
|
||||
return Challenge{
|
||||
Type: challengeType,
|
||||
|
@ -25,3 +27,19 @@ func DNSChallenge01(token string) Challenge {
|
|||
func TLSALPNChallenge01(token string) Challenge {
|
||||
return newChallenge(ChallengeTypeTLSALPN01, token)
|
||||
}
|
||||
|
||||
// NewChallenge constructs a random challenge of the given kind. It returns an
|
||||
// error if the challenge type is unrecognized. If token is empty a random token
|
||||
// will be generated, otherwise the provided token is used.
|
||||
func NewChallenge(kind AcmeChallenge, token string) (Challenge, error) {
|
||||
switch kind {
|
||||
case ChallengeTypeHTTP01:
|
||||
return HTTPChallenge01(token), nil
|
||||
case ChallengeTypeDNS01:
|
||||
return DNSChallenge01(token), nil
|
||||
case ChallengeTypeTLSALPN01:
|
||||
return TLSALPNChallenge01(token), nil
|
||||
default:
|
||||
return Challenge{}, fmt.Errorf("unrecognized challenge type %q", kind)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,8 @@ import (
|
|||
// PolicyAuthority defines the public interface for the Boulder PA
|
||||
// TODO(#5891): Move this interface to a more appropriate location.
|
||||
type PolicyAuthority interface {
|
||||
WillingToIssueWildcards(identifiers []identifier.ACMEIdentifier) error
|
||||
ChallengesFor(domain identifier.ACMEIdentifier) ([]Challenge, error)
|
||||
ChallengeTypeEnabled(t AcmeChallenge) bool
|
||||
WillingToIssueWildcards([]identifier.ACMEIdentifier) error
|
||||
ChallengesFor(identifier.ACMEIdentifier) ([]Challenge, error)
|
||||
ChallengeTypeEnabled(AcmeChallenge) bool
|
||||
CheckAuthz(*Authorization) error
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package core
|
|||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
@ -12,7 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"golang.org/x/crypto/ocsp"
|
||||
"gopkg.in/square/go-jose.v2"
|
||||
"gopkg.in/go-jose/go-jose.v2"
|
||||
|
||||
"github.com/letsencrypt/boulder/identifier"
|
||||
"github.com/letsencrypt/boulder/probs"
|
||||
|
@ -53,7 +52,6 @@ const (
|
|||
type AcmeChallenge string
|
||||
|
||||
// These types are the available challenges
|
||||
// TODO(#5009): Make this a custom type as well.
|
||||
const (
|
||||
ChallengeTypeHTTP01 = AcmeChallenge("http-01")
|
||||
ChallengeTypeDNS01 = AcmeChallenge("dns-01")
|
||||
|
@ -87,44 +85,10 @@ var OCSPStatusToInt = map[OCSPStatus]int{
|
|||
// DNSPrefix is attached to DNS names in DNS challenges
|
||||
const DNSPrefix = "_acme-challenge"
|
||||
|
||||
// CertificateRequest is just a CSR
|
||||
//
|
||||
// This data is unmarshalled from JSON by way of RawCertificateRequest, which
|
||||
// represents the actual structure received from the client.
|
||||
type CertificateRequest struct {
|
||||
CSR *x509.CertificateRequest // The CSR
|
||||
Bytes []byte // The original bytes of the CSR, for logging.
|
||||
}
|
||||
|
||||
type RawCertificateRequest struct {
|
||||
CSR JSONBuffer `json:"csr"` // The encoded CSR
|
||||
}
|
||||
|
||||
// UnmarshalJSON provides an implementation for decoding CertificateRequest objects.
|
||||
func (cr *CertificateRequest) UnmarshalJSON(data []byte) error {
|
||||
var raw RawCertificateRequest
|
||||
err := json.Unmarshal(data, &raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
csr, err := x509.ParseCertificateRequest(raw.CSR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cr.CSR = csr
|
||||
cr.Bytes = raw.CSR
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON provides an implementation for encoding CertificateRequest objects.
|
||||
func (cr CertificateRequest) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(RawCertificateRequest{
|
||||
CSR: cr.CSR.Raw,
|
||||
})
|
||||
}
|
||||
|
||||
// Registration objects represent non-public metadata attached
|
||||
// to account keys.
|
||||
type Registration struct {
|
||||
|
@ -373,9 +337,6 @@ type Authorization struct {
|
|||
// slice and the order of these challenges may not be predictable.
|
||||
Challenges []Challenge `json:"challenges,omitempty" db:"-"`
|
||||
|
||||
// This field is deprecated. It's filled in by WFE for the ACMEv1 API.
|
||||
Combinations [][]int `json:"combinations,omitempty" db:"combinations"`
|
||||
|
||||
// Wildcard is a Boulder-specific Authorization field that indicates the
|
||||
// authorization was created as a result of an order containing a name with
|
||||
// a `*.`wildcard prefix. This will help convey to users that an
|
||||
|
@ -399,38 +360,25 @@ func (authz *Authorization) FindChallengeByStringID(id string) int {
|
|||
// SolvedBy will look through the Authorizations challenges, returning the type
|
||||
// of the *first* challenge it finds with Status: valid, or an error if no
|
||||
// challenge is valid.
|
||||
func (authz *Authorization) SolvedBy() (*AcmeChallenge, error) {
|
||||
func (authz *Authorization) SolvedBy() (AcmeChallenge, error) {
|
||||
if len(authz.Challenges) == 0 {
|
||||
return nil, fmt.Errorf("Authorization has no challenges")
|
||||
return "", fmt.Errorf("Authorization has no challenges")
|
||||
}
|
||||
for _, chal := range authz.Challenges {
|
||||
if chal.Status == StatusValid {
|
||||
return &chal.Type, nil
|
||||
return chal.Type, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Authorization not solved by any challenge")
|
||||
return "", fmt.Errorf("Authorization not solved by any challenge")
|
||||
}
|
||||
|
||||
// JSONBuffer fields get encoded and decoded JOSE-style, in base64url encoding
|
||||
// with stripped padding.
|
||||
type JSONBuffer []byte
|
||||
|
||||
// URL-safe base64 encode that strips padding
|
||||
func base64URLEncode(data []byte) string {
|
||||
var result = base64.URLEncoding.EncodeToString(data)
|
||||
return strings.TrimRight(result, "=")
|
||||
}
|
||||
|
||||
// URL-safe base64 decoder that adds padding
|
||||
func base64URLDecode(data string) ([]byte, error) {
|
||||
var missing = (4 - len(data)%4) % 4
|
||||
data += strings.Repeat("=", missing)
|
||||
return base64.URLEncoding.DecodeString(data)
|
||||
}
|
||||
|
||||
// MarshalJSON encodes a JSONBuffer for transmission.
|
||||
func (jb JSONBuffer) MarshalJSON() (result []byte, err error) {
|
||||
return json.Marshal(base64URLEncode(jb))
|
||||
return json.Marshal(base64.RawURLEncoding.EncodeToString(jb))
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes a JSONBuffer to an object.
|
||||
|
@ -440,7 +388,7 @@ func (jb *JSONBuffer) UnmarshalJSON(data []byte) (err error) {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*jb, err = base64URLDecode(str)
|
||||
*jb, err = base64.RawURLEncoding.DecodeString(strings.TrimRight(str, "="))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,101 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package core;
|
||||
option go_package = "github.com/letsencrypt/boulder/core/proto";
|
||||
|
||||
message Challenge {
|
||||
int64 id = 1;
|
||||
string type = 2;
|
||||
string status = 6;
|
||||
string uri = 9;
|
||||
string token = 3;
|
||||
string keyAuthorization = 5;
|
||||
repeated ValidationRecord validationrecords = 10;
|
||||
ProblemDetails error = 7;
|
||||
int64 validated = 11;
|
||||
}
|
||||
|
||||
message ValidationRecord {
|
||||
string hostname = 1;
|
||||
string port = 2;
|
||||
repeated bytes addressesResolved = 3; // net.IP.MarshalText()
|
||||
bytes addressUsed = 4; // net.IP.MarshalText()
|
||||
|
||||
repeated string authorities = 5;
|
||||
string url = 6;
|
||||
// A list of addresses tried before the address used (see
|
||||
// core/objects.go and the comment on the ValidationRecord structure
|
||||
// definition for more information.
|
||||
repeated bytes addressesTried = 7; // net.IP.MarshalText()
|
||||
}
|
||||
|
||||
message ProblemDetails {
|
||||
string problemType = 1;
|
||||
string detail = 2;
|
||||
int32 httpStatus = 3;
|
||||
}
|
||||
|
||||
message Certificate {
|
||||
int64 registrationID = 1;
|
||||
string serial = 2;
|
||||
string digest = 3;
|
||||
bytes der = 4;
|
||||
int64 issued = 5; // Unix timestamp (nanoseconds)
|
||||
int64 expires = 6; // Unix timestamp (nanoseconds)
|
||||
}
|
||||
|
||||
message CertificateStatus {
|
||||
string serial = 1;
|
||||
reserved 2; // previously subscriberApproved
|
||||
string status = 3;
|
||||
int64 ocspLastUpdated = 4;
|
||||
int64 revokedDate = 5;
|
||||
int64 revokedReason = 6;
|
||||
int64 lastExpirationNagSent = 7;
|
||||
bytes ocspResponse = 8;
|
||||
int64 notAfter = 9;
|
||||
bool isExpired = 10;
|
||||
int64 issuerID = 11;
|
||||
}
|
||||
|
||||
message Registration {
|
||||
int64 id = 1;
|
||||
bytes key = 2;
|
||||
repeated string contact = 3;
|
||||
bool contactsPresent = 4;
|
||||
string agreement = 5;
|
||||
bytes initialIP = 6;
|
||||
int64 createdAt = 7; // Unix timestamp (nanoseconds)
|
||||
string status = 8;
|
||||
}
|
||||
|
||||
message Authorization {
|
||||
string id = 1;
|
||||
string identifier = 2;
|
||||
int64 registrationID = 3;
|
||||
string status = 4;
|
||||
int64 expires = 5; // Unix timestamp (nanoseconds)
|
||||
repeated core.Challenge challenges = 6;
|
||||
reserved 7; // previously combinations
|
||||
reserved 8; // previously v2
|
||||
}
|
||||
|
||||
message Order {
|
||||
int64 id = 1;
|
||||
int64 registrationID = 2;
|
||||
int64 expires = 3;
|
||||
ProblemDetails error = 4;
|
||||
string certificateSerial = 5;
|
||||
reserved 6; // previously authorizations, deprecated in favor of v2Authorizations
|
||||
string status = 7;
|
||||
repeated string names = 8;
|
||||
bool beganProcessing = 9;
|
||||
int64 created = 10;
|
||||
repeated int64 v2Authorizations = 11;
|
||||
}
|
||||
|
||||
message CRLEntry {
|
||||
string serial = 1;
|
||||
int32 reason = 2;
|
||||
int64 revokedAt = 3; // Unix timestamp (nanoseconds)
|
||||
}
|
|
@ -23,7 +23,7 @@ import (
|
|||
"time"
|
||||
"unicode"
|
||||
|
||||
jose "gopkg.in/square/go-jose.v2"
|
||||
jose "gopkg.in/go-jose/go-jose.v2"
|
||||
)
|
||||
|
||||
const Unspecified = "Unspecified"
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
// Code generated by "stringer -type=FeatureFlag"; DO NOT EDIT.
|
||||
|
||||
package features
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[unused-0]
|
||||
_ = x[PrecertificateRevocation-1]
|
||||
_ = x[StripDefaultSchemePort-2]
|
||||
_ = x[NonCFSSLSigner-3]
|
||||
_ = x[StoreIssuerInfo-4]
|
||||
_ = x[StreamlineOrderAndAuthzs-5]
|
||||
_ = x[V1DisableNewValidations-6]
|
||||
_ = x[ExpirationMailerDontLookTwice-7]
|
||||
_ = x[OldTLSInbound-8]
|
||||
_ = x[OldTLSOutbound-9]
|
||||
_ = x[ROCSPStage1-10]
|
||||
_ = x[ROCSPStage2-11]
|
||||
_ = x[ROCSPStage3-12]
|
||||
_ = x[CAAValidationMethods-13]
|
||||
_ = x[CAAAccountURI-14]
|
||||
_ = x[EnforceMultiVA-15]
|
||||
_ = x[MultiVAFullResults-16]
|
||||
_ = x[MandatoryPOSTAsGET-17]
|
||||
_ = x[AllowV1Registration-18]
|
||||
_ = x[StoreRevokerInfo-19]
|
||||
_ = x[RestrictRSAKeySizes-20]
|
||||
_ = x[FasterNewOrdersRateLimit-21]
|
||||
_ = x[ECDSAForAll-22]
|
||||
_ = x[ServeRenewalInfo-23]
|
||||
_ = x[GetAuthzReadOnly-24]
|
||||
_ = x[GetAuthzUseIndex-25]
|
||||
_ = x[CheckFailedAuthorizationsFirst-26]
|
||||
_ = x[AllowReRevocation-27]
|
||||
_ = x[MozRevocationReasons-28]
|
||||
_ = x[SHA1CSRs-29]
|
||||
_ = x[AllowUnrecognizedFeatures-30]
|
||||
_ = x[RejectDuplicateCSRExtensions-31]
|
||||
_ = x[ROCSPStage6-32]
|
||||
_ = x[ROCSPStage7-33]
|
||||
}
|
||||
|
||||
const _FeatureFlag_name = "unusedPrecertificateRevocationStripDefaultSchemePortNonCFSSLSignerStoreIssuerInfoStreamlineOrderAndAuthzsV1DisableNewValidationsExpirationMailerDontLookTwiceOldTLSInboundOldTLSOutboundROCSPStage1ROCSPStage2ROCSPStage3CAAValidationMethodsCAAAccountURIEnforceMultiVAMultiVAFullResultsMandatoryPOSTAsGETAllowV1RegistrationStoreRevokerInfoRestrictRSAKeySizesFasterNewOrdersRateLimitECDSAForAllServeRenewalInfoGetAuthzReadOnlyGetAuthzUseIndexCheckFailedAuthorizationsFirstAllowReRevocationMozRevocationReasonsSHA1CSRsAllowUnrecognizedFeaturesRejectDuplicateCSRExtensionsROCSPStage6ROCSPStage7"
|
||||
|
||||
var _FeatureFlag_index = [...]uint16{0, 6, 30, 52, 66, 81, 105, 128, 157, 170, 184, 195, 206, 217, 237, 250, 264, 282, 300, 319, 335, 354, 378, 389, 405, 421, 437, 467, 484, 504, 512, 537, 565, 576, 587}
|
||||
|
||||
func (i FeatureFlag) String() string {
|
||||
if i < 0 || i >= FeatureFlag(len(_FeatureFlag_index)-1) {
|
||||
return "FeatureFlag(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _FeatureFlag_name[_FeatureFlag_index[i]:_FeatureFlag_index[i+1]]
|
||||
}
|
|
@ -1,203 +0,0 @@
|
|||
//go:generate stringer -type=FeatureFlag
|
||||
|
||||
package features
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type FeatureFlag int
|
||||
|
||||
const (
|
||||
unused FeatureFlag = iota // unused is used for testing
|
||||
// Deprecated features, these can be removed once stripped from production configs
|
||||
PrecertificateRevocation
|
||||
StripDefaultSchemePort
|
||||
NonCFSSLSigner
|
||||
StoreIssuerInfo
|
||||
StreamlineOrderAndAuthzs
|
||||
V1DisableNewValidations
|
||||
ExpirationMailerDontLookTwice
|
||||
OldTLSInbound
|
||||
OldTLSOutbound
|
||||
ROCSPStage1
|
||||
ROCSPStage2
|
||||
ROCSPStage3
|
||||
|
||||
// Currently in-use features
|
||||
// Check CAA and respect validationmethods parameter.
|
||||
CAAValidationMethods
|
||||
// Check CAA and respect accounturi parameter.
|
||||
CAAAccountURI
|
||||
// EnforceMultiVA causes the VA to block on remote VA PerformValidation
|
||||
// requests in order to make a valid/invalid decision with the results.
|
||||
EnforceMultiVA
|
||||
// MultiVAFullResults will cause the main VA to wait for all of the remote VA
|
||||
// results, not just the threshold required to make a decision.
|
||||
MultiVAFullResults
|
||||
// MandatoryPOSTAsGET forbids legacy unauthenticated GET requests for ACME
|
||||
// resources.
|
||||
MandatoryPOSTAsGET
|
||||
// Allow creation of new registrations in ACMEv1.
|
||||
AllowV1Registration
|
||||
// StoreRevokerInfo enables storage of the revoker and a bool indicating if the row
|
||||
// was checked for extant unrevoked certificates in the blockedKeys table.
|
||||
StoreRevokerInfo
|
||||
// RestrictRSAKeySizes enables restriction of acceptable RSA public key moduli to
|
||||
// the common sizes (2048, 3072, and 4096 bits).
|
||||
RestrictRSAKeySizes
|
||||
// FasterNewOrdersRateLimit enables use of a separate table for counting the
|
||||
// new orders rate limit.
|
||||
FasterNewOrdersRateLimit
|
||||
// ECDSAForAll enables all accounts, regardless of their presence in the CA's
|
||||
// ecdsaAllowedAccounts config value, to get issuance from ECDSA issuers.
|
||||
ECDSAForAll
|
||||
// ServeRenewalInfo exposes the renewalInfo endpoint in the directory and for
|
||||
// GET requests. WARNING: This feature is a draft and highly unstable.
|
||||
ServeRenewalInfo
|
||||
// GetAuthzReadOnly causes the SA to use its read-only database connection
|
||||
// (which is generally pointed at a replica rather than the primary db) when
|
||||
// querying the authz2 table.
|
||||
GetAuthzReadOnly
|
||||
// GetAuthzUseIndex causes the SA to use to add a USE INDEX hint when it
|
||||
// queries the authz2 table.
|
||||
GetAuthzUseIndex
|
||||
// Check the failed authorization limit before doing authz reuse.
|
||||
CheckFailedAuthorizationsFirst
|
||||
// AllowReRevocation causes the RA to allow the revocation reason of an
|
||||
// already-revoked certificate to be updated to `keyCompromise` from any
|
||||
// other reason if that compromise is demonstrated by making the second
|
||||
// revocation request signed by the certificate keypair.
|
||||
AllowReRevocation
|
||||
// MozRevocationReasons causes the RA to enforce the following upcoming
|
||||
// Mozilla policies regarding revocation:
|
||||
// - A subscriber can request that their certificate be revoked with reason
|
||||
// keyCompromise, even without demonstrating that compromise at the time.
|
||||
// However, the cert's pubkey will not be added to the blocked keys list.
|
||||
// - When an applicant other than the original subscriber requests that a
|
||||
// certificate be revoked (by demonstrating control over all names in it),
|
||||
// the cert will be revoked with reason cessationOfOperation, regardless of
|
||||
// what revocation reason they request.
|
||||
// - When anyone requests that a certificate be revoked by signing the request
|
||||
// with the certificate's keypair, the cert will be revoked with reason
|
||||
// keyCompromise, regardless of what revocation reason they request.
|
||||
MozRevocationReasons
|
||||
// SHA1CSRs controls whether the /acme/finalize endpoint rejects CSRs that
|
||||
// are self-signed using SHA1.
|
||||
SHA1CSRs
|
||||
// AllowUnrecognizedFeatures is internal to the features package: if true,
|
||||
// skip error when unrecognized feature flag names are passed.
|
||||
AllowUnrecognizedFeatures
|
||||
// RejectDuplicateCSRExtensions enables verification that submitted CSRs do
|
||||
// not contain duplicate extensions. This behavior will be on by default in
|
||||
// go1.19.
|
||||
RejectDuplicateCSRExtensions
|
||||
|
||||
// ROCSPStage6 disables writing full OCSP Responses to MariaDB during
|
||||
// (pre)certificate issuance and during revocation. Because Stage 4 involved
|
||||
// disabling ocsp-updater, this means that no ocsp response bytes will be
|
||||
// written to the database anymore.
|
||||
ROCSPStage6
|
||||
// ROCSPStage7 disables generating OCSP responses during issuance and
|
||||
// revocation. This affects codepaths in both the RA (revocation) and the CA
|
||||
// (precert "birth certificates").
|
||||
ROCSPStage7
|
||||
)
|
||||
|
||||
// List of features and their default value, protected by fMu
|
||||
var features = map[FeatureFlag]bool{
|
||||
unused: false,
|
||||
CAAValidationMethods: false,
|
||||
CAAAccountURI: false,
|
||||
EnforceMultiVA: false,
|
||||
MultiVAFullResults: false,
|
||||
MandatoryPOSTAsGET: false,
|
||||
AllowV1Registration: true,
|
||||
V1DisableNewValidations: false,
|
||||
PrecertificateRevocation: false,
|
||||
StripDefaultSchemePort: false,
|
||||
StoreIssuerInfo: false,
|
||||
StoreRevokerInfo: false,
|
||||
RestrictRSAKeySizes: false,
|
||||
FasterNewOrdersRateLimit: false,
|
||||
NonCFSSLSigner: false,
|
||||
ECDSAForAll: false,
|
||||
StreamlineOrderAndAuthzs: false,
|
||||
ServeRenewalInfo: false,
|
||||
GetAuthzReadOnly: false,
|
||||
GetAuthzUseIndex: false,
|
||||
CheckFailedAuthorizationsFirst: false,
|
||||
AllowReRevocation: false,
|
||||
MozRevocationReasons: false,
|
||||
OldTLSOutbound: true,
|
||||
OldTLSInbound: true,
|
||||
SHA1CSRs: true,
|
||||
AllowUnrecognizedFeatures: false,
|
||||
ExpirationMailerDontLookTwice: false,
|
||||
RejectDuplicateCSRExtensions: false,
|
||||
ROCSPStage1: false,
|
||||
ROCSPStage2: false,
|
||||
ROCSPStage3: false,
|
||||
ROCSPStage6: false,
|
||||
ROCSPStage7: false,
|
||||
}
|
||||
|
||||
var fMu = new(sync.RWMutex)
|
||||
|
||||
var initial = map[FeatureFlag]bool{}
|
||||
|
||||
var nameToFeature = make(map[string]FeatureFlag, len(features))
|
||||
|
||||
func init() {
|
||||
for f, v := range features {
|
||||
nameToFeature[f.String()] = f
|
||||
initial[f] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Set accepts a list of features and whether they should
|
||||
// be enabled or disabled. In the presence of unrecognized
|
||||
// flags, it will return an error or not depending on the
|
||||
// value of AllowUnrecognizedFeatures.
|
||||
func Set(featureSet map[string]bool) error {
|
||||
fMu.Lock()
|
||||
defer fMu.Unlock()
|
||||
var unknown []string
|
||||
for n, v := range featureSet {
|
||||
f, present := nameToFeature[n]
|
||||
if present {
|
||||
features[f] = v
|
||||
} else {
|
||||
unknown = append(unknown, n)
|
||||
}
|
||||
}
|
||||
if len(unknown) > 0 && !features[AllowUnrecognizedFeatures] {
|
||||
return fmt.Errorf("unrecognized feature flag names: %s",
|
||||
strings.Join(unknown, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Enabled returns true if the feature is enabled or false
|
||||
// if it isn't, it will panic if passed a feature that it
|
||||
// doesn't know.
|
||||
func Enabled(n FeatureFlag) bool {
|
||||
fMu.RLock()
|
||||
defer fMu.RUnlock()
|
||||
v, present := features[n]
|
||||
if !present {
|
||||
panic(fmt.Sprintf("feature '%s' doesn't exist", n.String()))
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Reset resets the features to their initial state
|
||||
func Reset() {
|
||||
fMu.Lock()
|
||||
defer fMu.Unlock()
|
||||
for k, v := range initial {
|
||||
features[k] = v
|
||||
}
|
||||
}
|
|
@ -13,9 +13,6 @@ import (
|
|||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
"github.com/letsencrypt/boulder/features"
|
||||
sapb "github.com/letsencrypt/boulder/sa/proto"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/titanous/rocacheck"
|
||||
)
|
||||
|
@ -68,10 +65,12 @@ func badKey(msg string, args ...interface{}) error {
|
|||
return fmt.Errorf("%w%s", ErrBadKey, fmt.Errorf(msg, args...))
|
||||
}
|
||||
|
||||
// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey method to KeyPolicy,
|
||||
// rather than storing a full sa.SQLStorageAuthority. This makes testing
|
||||
// BlockedKeyCheckFunc is used to pass in the sa.BlockedKey functionality to KeyPolicy,
|
||||
// rather than storing a full sa.SQLStorageAuthority. This allows external
|
||||
// users who don’t want to import all of boulder/sa, and makes testing
|
||||
// significantly simpler.
|
||||
type BlockedKeyCheckFunc func(context.Context, *sapb.KeyBlockedRequest, ...grpc.CallOption) (*sapb.Exists, error)
|
||||
// On success, the function returns a boolean which is true if the key is blocked.
|
||||
type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error)
|
||||
|
||||
// KeyPolicy determines which types of key may be used with various boulder
|
||||
// operations.
|
||||
|
@ -82,7 +81,7 @@ type KeyPolicy struct {
|
|||
weakRSAList *WeakRSAKeys
|
||||
blockedList *blockedKeys
|
||||
fermatRounds int
|
||||
dbCheck BlockedKeyCheckFunc
|
||||
blockedCheck BlockedKeyCheckFunc
|
||||
}
|
||||
|
||||
// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384.
|
||||
|
@ -97,7 +96,7 @@ func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) {
|
|||
AllowRSA: true,
|
||||
AllowECDSANISTP256: true,
|
||||
AllowECDSANISTP384: true,
|
||||
dbCheck: bkc,
|
||||
blockedCheck: bkc,
|
||||
}
|
||||
if config.WeakKeyFile != "" {
|
||||
keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile)
|
||||
|
@ -142,15 +141,15 @@ func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) erro
|
|||
return badKey("public key is forbidden")
|
||||
}
|
||||
}
|
||||
if policy.dbCheck != nil {
|
||||
if policy.blockedCheck != nil {
|
||||
digest, err := core.KeyDigest(key)
|
||||
if err != nil {
|
||||
return badKey("%w", err)
|
||||
}
|
||||
exists, err := policy.dbCheck(ctx, &sapb.KeyBlockedRequest{KeyHash: digest[:]})
|
||||
exists, err := policy.blockedCheck(ctx, digest[:])
|
||||
if err != nil {
|
||||
return err
|
||||
} else if exists.Exists {
|
||||
} else if exists {
|
||||
return badKey("public key is forbidden")
|
||||
}
|
||||
}
|
||||
|
@ -275,6 +274,12 @@ func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple
|
||||
// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes
|
||||
// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which
|
||||
// have a known method to easily compute their private key, such as Debian Weak
|
||||
// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at
|
||||
// common key sizes, so we restrict all issuance to those common key sizes.
|
||||
var acceptableRSAKeySizes = map[int]bool{
|
||||
2048: true,
|
||||
3072: true,
|
||||
|
@ -290,27 +295,12 @@ func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) {
|
|||
return badKey("key is on a known weak RSA key list")
|
||||
}
|
||||
|
||||
// Baseline Requirements Appendix A
|
||||
// Modulus must be >= 2048 bits and <= 4096 bits
|
||||
modulus := key.N
|
||||
|
||||
// See comment on acceptableRSAKeySizes above.
|
||||
modulusBitLen := modulus.BitLen()
|
||||
if features.Enabled(features.RestrictRSAKeySizes) {
|
||||
if !acceptableRSAKeySizes[modulusBitLen] {
|
||||
return badKey("key size not supported: %d", modulusBitLen)
|
||||
}
|
||||
} else {
|
||||
const maxKeySize = 4096
|
||||
if modulusBitLen < 2048 {
|
||||
return badKey("key too small: %d", modulusBitLen)
|
||||
}
|
||||
if modulusBitLen > maxKeySize {
|
||||
return badKey("key too large: %d > %d", modulusBitLen, maxKeySize)
|
||||
}
|
||||
// Bit lengths that are not a multiple of 8 may cause problems on some
|
||||
// client implementations.
|
||||
if modulusBitLen%8 != 0 {
|
||||
return badKey("key length wasn't a multiple of 8: %d", modulusBitLen)
|
||||
}
|
||||
if !acceptableRSAKeySizes[modulusBitLen] {
|
||||
return badKey("key size not supported: %d", modulusBitLen)
|
||||
}
|
||||
|
||||
// Rather than support arbitrary exponents, which significantly increases
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,353 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package sa;
|
||||
option go_package = "github.com/letsencrypt/boulder/sa/proto";
|
||||
|
||||
import "core/proto/core.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
// StorageAuthorityReadOnly exposes only those SA methods which are read-only.
|
||||
service StorageAuthorityReadOnly {
|
||||
rpc CountCertificatesByNames(CountCertificatesByNamesRequest) returns (CountByNames) {}
|
||||
rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {}
|
||||
rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {}
|
||||
rpc CountOrders(CountOrdersRequest) returns (Count) {}
|
||||
rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {}
|
||||
rpc CountRegistrationsByIP(CountRegistrationsByIPRequest) returns (Count) {}
|
||||
rpc CountRegistrationsByIPRange(CountRegistrationsByIPRequest) returns (Count) {}
|
||||
rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {}
|
||||
rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {}
|
||||
rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {}
|
||||
rpc GetAuthorizations2(GetAuthorizationsRequest) returns (Authorizations) {}
|
||||
rpc GetCertificate(Serial) returns (core.Certificate) {}
|
||||
rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {}
|
||||
rpc GetMaxExpiration(google.protobuf.Empty) returns (google.protobuf.Timestamp) {}
|
||||
rpc GetOrder(OrderRequest) returns (core.Order) {}
|
||||
rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {}
|
||||
rpc GetPendingAuthorization2(GetPendingAuthorizationRequest) returns (core.Authorization) {}
|
||||
rpc GetPrecertificate(Serial) returns (core.Certificate) {}
|
||||
rpc GetRegistration(RegistrationID) returns (core.Registration) {}
|
||||
rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {}
|
||||
rpc GetRevocationStatus(Serial) returns (RevocationStatus) {}
|
||||
rpc GetRevokedCerts(GetRevokedCertsRequest) returns (stream core.CRLEntry) {}
|
||||
rpc GetSerialMetadata(Serial) returns (SerialMetadata) {}
|
||||
rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {}
|
||||
rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {}
|
||||
rpc IncidentsForSerial(Serial) returns (Incidents) {}
|
||||
rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {}
|
||||
rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {}
|
||||
rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {}
|
||||
}
|
||||
|
||||
// StorageAuthority provides full read/write access to the database.
|
||||
service StorageAuthority {
|
||||
// Getters: this list must be identical to the StorageAuthorityReadOnly rpcs.
|
||||
rpc CountCertificatesByNames(CountCertificatesByNamesRequest) returns (CountByNames) {}
|
||||
rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {}
|
||||
rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {}
|
||||
rpc CountOrders(CountOrdersRequest) returns (Count) {}
|
||||
rpc CountPendingAuthorizations2(RegistrationID) returns (Count) {}
|
||||
rpc CountRegistrationsByIP(CountRegistrationsByIPRequest) returns (Count) {}
|
||||
rpc CountRegistrationsByIPRange(CountRegistrationsByIPRequest) returns (Count) {}
|
||||
rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {}
|
||||
rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {}
|
||||
rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {}
|
||||
rpc GetAuthorizations2(GetAuthorizationsRequest) returns (Authorizations) {}
|
||||
rpc GetCertificate(Serial) returns (core.Certificate) {}
|
||||
rpc GetCertificateStatus(Serial) returns (core.CertificateStatus) {}
|
||||
rpc GetMaxExpiration(google.protobuf.Empty) returns (google.protobuf.Timestamp) {}
|
||||
rpc GetOrder(OrderRequest) returns (core.Order) {}
|
||||
rpc GetOrderForNames(GetOrderForNamesRequest) returns (core.Order) {}
|
||||
rpc GetPendingAuthorization2(GetPendingAuthorizationRequest) returns (core.Authorization) {}
|
||||
rpc GetPrecertificate(Serial) returns (core.Certificate) {}
|
||||
rpc GetRegistration(RegistrationID) returns (core.Registration) {}
|
||||
rpc GetRegistrationByKey(JSONWebKey) returns (core.Registration) {}
|
||||
rpc GetRevocationStatus(Serial) returns (RevocationStatus) {}
|
||||
rpc GetRevokedCerts(GetRevokedCertsRequest) returns (stream core.CRLEntry) {}
|
||||
rpc GetSerialMetadata(Serial) returns (SerialMetadata) {}
|
||||
rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {}
|
||||
rpc GetValidOrderAuthorizations2(GetValidOrderAuthorizationsRequest) returns (Authorizations) {}
|
||||
rpc IncidentsForSerial(Serial) returns (Incidents) {}
|
||||
rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {}
|
||||
rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {}
|
||||
rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {}
|
||||
// Adders
|
||||
rpc AddBlockedKey(AddBlockedKeyRequest) returns (google.protobuf.Empty) {}
|
||||
rpc AddCertificate(AddCertificateRequest) returns (AddCertificateResponse) {}
|
||||
rpc AddPrecertificate(AddCertificateRequest) returns (google.protobuf.Empty) {}
|
||||
rpc AddSerial(AddSerialRequest) returns (google.protobuf.Empty) {}
|
||||
rpc DeactivateAuthorization2(AuthorizationID2) returns (google.protobuf.Empty) {}
|
||||
rpc DeactivateRegistration(RegistrationID) returns (google.protobuf.Empty) {}
|
||||
rpc FinalizeAuthorization2(FinalizeAuthorizationRequest) returns (google.protobuf.Empty) {}
|
||||
rpc FinalizeOrder(FinalizeOrderRequest) returns (google.protobuf.Empty) {}
|
||||
rpc NewAuthorizations2(AddPendingAuthorizationsRequest) returns (Authorization2IDs) {}
|
||||
rpc NewOrder(NewOrderRequest) returns (core.Order) {}
|
||||
rpc NewOrderAndAuthzs(NewOrderAndAuthzsRequest) returns (core.Order) {}
|
||||
rpc NewRegistration(core.Registration) returns (core.Registration) {}
|
||||
rpc RevokeCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {}
|
||||
rpc SetOrderError(SetOrderErrorRequest) returns (google.protobuf.Empty) {}
|
||||
rpc SetOrderProcessing(OrderRequest) returns (google.protobuf.Empty) {}
|
||||
rpc UpdateRegistration(core.Registration) returns (google.protobuf.Empty) {}
|
||||
rpc UpdateRevokedCertificate(RevokeCertificateRequest) returns (google.protobuf.Empty) {}
|
||||
}
|
||||
|
||||
message RegistrationID {
|
||||
int64 id = 1;
|
||||
}
|
||||
|
||||
message JSONWebKey {
|
||||
bytes jwk = 1;
|
||||
}
|
||||
|
||||
message AuthorizationID {
|
||||
string id = 1;
|
||||
}
|
||||
|
||||
message GetPendingAuthorizationRequest {
|
||||
int64 registrationID = 1;
|
||||
string identifierType = 2;
|
||||
string identifierValue = 3;
|
||||
// Result must be valid until at least this Unix timestamp (nanos)
|
||||
int64 validUntil = 4;
|
||||
}
|
||||
|
||||
message GetValidAuthorizationsRequest {
|
||||
int64 registrationID = 1;
|
||||
repeated string domains = 2;
|
||||
int64 now = 3; // Unix timestamp (nanoseconds)
|
||||
}
|
||||
|
||||
message ValidAuthorizations {
|
||||
message MapElement {
|
||||
string domain = 1;
|
||||
core.Authorization authz = 2;
|
||||
}
|
||||
repeated MapElement valid = 1;
|
||||
}
|
||||
|
||||
message Serial {
|
||||
string serial = 1;
|
||||
}
|
||||
|
||||
message SerialMetadata {
|
||||
string serial = 1;
|
||||
int64 registrationID = 2;
|
||||
int64 created = 3; // Unix timestamp (nanoseconds)
|
||||
int64 expires = 4; // Unix timestamp (nanoseconds)
|
||||
}
|
||||
|
||||
message Range {
|
||||
int64 earliest = 1; // Unix timestamp (nanoseconds)
|
||||
int64 latest = 2; // Unix timestamp (nanoseconds)
|
||||
}
|
||||
|
||||
message Count {
|
||||
int64 count = 1;
|
||||
}
|
||||
|
||||
message Timestamps {
|
||||
repeated int64 timestamps = 1; // Unix timestamp (nanoseconds)
|
||||
}
|
||||
|
||||
message CountCertificatesByNamesRequest {
|
||||
Range range = 1;
|
||||
repeated string names = 2;
|
||||
}
|
||||
|
||||
message CountByNames {
|
||||
map<string, int64> counts = 1;
|
||||
google.protobuf.Timestamp earliest = 2; // Unix timestamp (nanoseconds)
|
||||
}
|
||||
|
||||
message CountRegistrationsByIPRequest {
|
||||
bytes ip = 1;
|
||||
Range range = 2;
|
||||
}
|
||||
|
||||
message CountInvalidAuthorizationsRequest {
|
||||
int64 registrationID = 1;
|
||||
string hostname = 2;
|
||||
// Count authorizations that expire in this range.
|
||||
Range range = 3;
|
||||
}
|
||||
|
||||
message CountOrdersRequest {
|
||||
int64 accountID = 1;
|
||||
Range range = 2;
|
||||
}
|
||||
|
||||
message CountFQDNSetsRequest {
|
||||
int64 window = 1;
|
||||
repeated string domains = 2;
|
||||
}
|
||||
|
||||
message FQDNSetExistsRequest {
|
||||
repeated string domains = 1;
|
||||
}
|
||||
|
||||
message PreviousCertificateExistsRequest {
|
||||
string domain = 1;
|
||||
int64 regID = 2;
|
||||
}
|
||||
|
||||
message Exists {
|
||||
bool exists = 1;
|
||||
}
|
||||
|
||||
message AddSerialRequest {
|
||||
int64 regID = 1;
|
||||
string serial = 2;
|
||||
int64 created = 3; // Unix timestamp (nanoseconds)
|
||||
int64 expires = 4; // Unix timestamp (nanoseconds)
|
||||
}
|
||||
|
||||
message AddCertificateRequest {
|
||||
bytes der = 1;
|
||||
int64 regID = 2;
|
||||
// A signed OCSP response for the certificate contained in "der".
|
||||
// Note: The certificate status in the OCSP response is assumed to be 0 (good).
|
||||
bytes ocsp = 3;
|
||||
// An issued time. When not present the SA defaults to using
|
||||
// the current time. The orphan-finder uses this parameter to add
|
||||
// certificates with the correct historic issued date
|
||||
int64 issued = 4;
|
||||
int64 issuerID = 5;
|
||||
}
|
||||
|
||||
message AddCertificateResponse {
|
||||
string digest = 1;
|
||||
}
|
||||
|
||||
message OrderRequest {
|
||||
int64 id = 1;
|
||||
}
|
||||
|
||||
message NewOrderRequest {
|
||||
int64 registrationID = 1;
|
||||
int64 expires = 2;
|
||||
repeated string names = 3;
|
||||
repeated int64 v2Authorizations = 4;
|
||||
}
|
||||
|
||||
message NewOrderAndAuthzsRequest {
|
||||
NewOrderRequest newOrder = 1;
|
||||
repeated core.Authorization newAuthzs = 2;
|
||||
}
|
||||
|
||||
message SetOrderErrorRequest {
|
||||
int64 id = 1;
|
||||
core.ProblemDetails error = 2;
|
||||
}
|
||||
|
||||
message GetValidOrderAuthorizationsRequest {
|
||||
int64 id = 1;
|
||||
int64 acctID = 2;
|
||||
}
|
||||
|
||||
message GetOrderForNamesRequest {
|
||||
int64 acctID = 1;
|
||||
repeated string names = 2;
|
||||
}
|
||||
|
||||
message FinalizeOrderRequest {
|
||||
int64 id = 1;
|
||||
string certificateSerial = 2;
|
||||
}
|
||||
|
||||
message GetAuthorizationsRequest {
|
||||
int64 registrationID = 1;
|
||||
repeated string domains = 2;
|
||||
int64 now = 3; // Unix timestamp (nanoseconds)
|
||||
}
|
||||
|
||||
message Authorizations {
|
||||
message MapElement {
|
||||
string domain = 1;
|
||||
core.Authorization authz = 2;
|
||||
}
|
||||
repeated MapElement authz = 1;
|
||||
}
|
||||
|
||||
message AddPendingAuthorizationsRequest {
|
||||
repeated core.Authorization authz = 1;
|
||||
}
|
||||
|
||||
message AuthorizationIDs {
|
||||
repeated string ids = 1;
|
||||
}
|
||||
|
||||
message AuthorizationID2 {
|
||||
int64 id = 1;
|
||||
}
|
||||
|
||||
message Authorization2IDs {
|
||||
repeated int64 ids = 1;
|
||||
}
|
||||
|
||||
message RevokeCertificateRequest {
|
||||
string serial = 1;
|
||||
int64 reason = 2;
|
||||
int64 date = 3; // Unix timestamp (nanoseconds)
|
||||
int64 backdate = 5; // Unix timestamp (nanoseconds)
|
||||
bytes response = 4;
|
||||
int64 issuerID = 6;
|
||||
}
|
||||
|
||||
message FinalizeAuthorizationRequest {
|
||||
int64 id = 1;
|
||||
string status = 2;
|
||||
int64 expires = 3; // Unix timestamp (nanoseconds)
|
||||
string attempted = 4;
|
||||
repeated core.ValidationRecord validationRecords = 5;
|
||||
core.ProblemDetails validationError = 6;
|
||||
int64 attemptedAt = 7; // Unix timestamp (nanoseconds)
|
||||
}
|
||||
|
||||
message AddBlockedKeyRequest {
|
||||
bytes keyHash = 1;
|
||||
int64 added = 2; // Unix timestamp (nanoseconds)
|
||||
string source = 3;
|
||||
string comment = 4;
|
||||
int64 revokedBy = 5;
|
||||
}
|
||||
|
||||
message KeyBlockedRequest {
|
||||
bytes keyHash = 1;
|
||||
}
|
||||
|
||||
message Incident {
|
||||
int64 id = 1;
|
||||
string serialTable = 2;
|
||||
string url = 3;
|
||||
int64 renewBy = 4; // Unix timestamp (nanoseconds)
|
||||
bool enabled = 5;
|
||||
}
|
||||
|
||||
message Incidents {
|
||||
repeated Incident incidents = 1;
|
||||
}
|
||||
|
||||
message SerialsForIncidentRequest {
|
||||
string incidentTable = 1;
|
||||
}
|
||||
|
||||
message IncidentSerial {
|
||||
string serial = 1;
|
||||
int64 registrationID = 2;
|
||||
int64 orderID = 3;
|
||||
int64 lastNoticeSent = 4; // Unix timestamp (nanoseconds)
|
||||
}
|
||||
|
||||
message GetRevokedCertsRequest {
|
||||
int64 issuerNameID = 1;
|
||||
int64 expiresAfter = 2; // Unix timestamp (nanoseconds), inclusive
|
||||
int64 expiresBefore = 3; // Unix timestamp (nanoseconds), exclusive
|
||||
int64 revokedBefore = 4; // Unix timestamp (nanoseconds)
|
||||
}
|
||||
|
||||
message RevocationStatus {
|
||||
int64 status = 1;
|
||||
int64 revokedReason = 2;
|
||||
google.protobuf.Timestamp revokedDate = 3; // Unix timestamp (nanoseconds)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,47 +0,0 @@
|
|||
// Copied from the auto-generated sa_grpc.pb.go
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
proto "github.com/letsencrypt/boulder/core/proto"
|
||||
grpc "google.golang.org/grpc"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// StorageAuthorityGetterClient is a read-only subset of the sapb.StorageAuthorityClient interface
|
||||
type StorageAuthorityGetterClient interface {
|
||||
GetRegistration(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*proto.Registration, error)
|
||||
GetRegistrationByKey(ctx context.Context, in *JSONWebKey, opts ...grpc.CallOption) (*proto.Registration, error)
|
||||
GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
|
||||
GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
|
||||
GetCertificateStatus(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.CertificateStatus, error)
|
||||
CountCertificatesByNames(ctx context.Context, in *CountCertificatesByNamesRequest, opts ...grpc.CallOption) (*CountByNames, error)
|
||||
CountRegistrationsByIP(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
|
||||
CountRegistrationsByIPRange(ctx context.Context, in *CountRegistrationsByIPRequest, opts ...grpc.CallOption) (*Count, error)
|
||||
CountOrders(ctx context.Context, in *CountOrdersRequest, opts ...grpc.CallOption) (*Count, error)
|
||||
CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error)
|
||||
FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error)
|
||||
PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error)
|
||||
GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error)
|
||||
GetAuthorizations2(ctx context.Context, in *GetAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
|
||||
GetPendingAuthorization2(ctx context.Context, in *GetPendingAuthorizationRequest, opts ...grpc.CallOption) (*proto.Authorization, error)
|
||||
CountPendingAuthorizations2(ctx context.Context, in *RegistrationID, opts ...grpc.CallOption) (*Count, error)
|
||||
GetValidOrderAuthorizations2(ctx context.Context, in *GetValidOrderAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
|
||||
CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error)
|
||||
GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error)
|
||||
KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error)
|
||||
GetOrder(ctx context.Context, in *OrderRequest, opts ...grpc.CallOption) (*proto.Order, error)
|
||||
GetOrderForNames(ctx context.Context, in *GetOrderForNamesRequest, opts ...grpc.CallOption) (*proto.Order, error)
|
||||
IncidentsForSerial(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*Incidents, error)
|
||||
}
|
||||
|
||||
// StorageAuthorityCertificateClient is a subset of the sapb.StorageAuthorityClient interface that only reads and writes certificates
|
||||
type StorageAuthorityCertificateClient interface {
|
||||
AddSerial(ctx context.Context, in *AddSerialRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
|
||||
AddPrecertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
|
||||
GetPrecertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
|
||||
AddCertificate(ctx context.Context, in *AddCertificateRequest, opts ...grpc.CallOption) (*AddCertificateResponse, error)
|
||||
GetCertificate(ctx context.Context, in *Serial, opts ...grpc.CallOption) (*proto.Certificate, error)
|
||||
}
|
|
@ -27,7 +27,6 @@ import (
|
|||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
|
||||
"golang.org/x/tools/internal/gcimporter"
|
||||
|
@ -85,6 +84,19 @@ func NewReader(r io.Reader) (io.Reader, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// readAll works the same way as io.ReadAll, but avoids allocations and copies
|
||||
// by preallocating a byte slice of the necessary size if the size is known up
|
||||
// front. This is always possible when the input is an archive. In that case,
|
||||
// NewReader will return the known size using an io.LimitedReader.
|
||||
func readAll(r io.Reader) ([]byte, error) {
|
||||
if lr, ok := r.(*io.LimitedReader); ok {
|
||||
data := make([]byte, lr.N)
|
||||
_, err := io.ReadFull(lr, data)
|
||||
return data, err
|
||||
}
|
||||
return io.ReadAll(r)
|
||||
}
|
||||
|
||||
// Read reads export data from in, decodes it, and returns type
|
||||
// information for the package.
|
||||
//
|
||||
|
@ -102,7 +114,7 @@ func NewReader(r io.Reader) (io.Reader, error) {
|
|||
//
|
||||
// On return, the state of the reader is undefined.
|
||||
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
|
||||
data, err := ioutil.ReadAll(in)
|
||||
data, err := readAll(in)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading export data for %q: %v", path, err)
|
||||
}
|
||||
|
@ -111,12 +123,6 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
|
|||
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
|
||||
}
|
||||
|
||||
// The App Engine Go runtime v1.6 uses the old export data format.
|
||||
// TODO(adonovan): delete once v1.7 has been around for a while.
|
||||
if bytes.HasPrefix(data, []byte("package ")) {
|
||||
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
|
||||
}
|
||||
|
||||
// The indexed export format starts with an 'i'; the older
|
||||
// binary export format starts with a 'c', 'd', or 'v'
|
||||
// (from "version"). Select appropriate importer.
|
||||
|
@ -165,7 +171,7 @@ func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
|
|||
//
|
||||
// Experimental: This API is experimental and may change in the future.
|
||||
func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) {
|
||||
data, err := ioutil.ReadAll(in)
|
||||
data, err := readAll(in)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading export bundle: %v", err)
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"go/scanner"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
@ -877,12 +878,19 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
// never has to create a types.Package for an indirect dependency,
|
||||
// which would then require that such created packages be explicitly
|
||||
// inserted back into the Import graph as a final step after export data loading.
|
||||
// (Hence this return is after the Types assignment.)
|
||||
// The Diamond test exercises this case.
|
||||
if !lpkg.needtypes && !lpkg.needsrc {
|
||||
return
|
||||
}
|
||||
if !lpkg.needsrc {
|
||||
ld.loadFromExportData(lpkg)
|
||||
if err := ld.loadFromExportData(lpkg); err != nil {
|
||||
lpkg.Errors = append(lpkg.Errors, Error{
|
||||
Pos: "-",
|
||||
Msg: err.Error(),
|
||||
Kind: UnknownError, // e.g. can't find/open/parse export data
|
||||
})
|
||||
}
|
||||
return // not a source package, don't get syntax trees
|
||||
}
|
||||
|
||||
|
@ -950,6 +958,8 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
// - golang.org/issue/52078 (flag to set release tags)
|
||||
// - golang.org/issue/50825 (gopls legacy version support)
|
||||
// - golang.org/issue/55883 (go/packages confusing error)
|
||||
//
|
||||
// Should we assert a hard minimum of (currently) go1.16 here?
|
||||
var runtimeVersion int
|
||||
if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion {
|
||||
defer func() {
|
||||
|
@ -967,7 +977,8 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
|||
// The config requested loading sources and types, but sources are missing.
|
||||
// Add an error to the package and fall back to loading from export data.
|
||||
appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
|
||||
ld.loadFromExportData(lpkg)
|
||||
_ = ld.loadFromExportData(lpkg) // ignore any secondary errors
|
||||
|
||||
return // can't get syntax trees for this package
|
||||
}
|
||||
|
||||
|
@ -1191,9 +1202,10 @@ func sameFile(x, y string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// loadFromExportData returns type information for the specified
|
||||
// loadFromExportData ensures that type information is present for the specified
|
||||
// package, loading it from an export data file on the first request.
|
||||
func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) {
|
||||
// On success it sets lpkg.Types to a new Package.
|
||||
func (ld *loader) loadFromExportData(lpkg *loaderPackage) error {
|
||||
if lpkg.PkgPath == "" {
|
||||
log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
|
||||
}
|
||||
|
@ -1204,8 +1216,8 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
|||
// must be sequential. (Finer-grained locking would require
|
||||
// changes to the gcexportdata API.)
|
||||
//
|
||||
// The exportMu lock guards the Package.Pkg field and the
|
||||
// types.Package it points to, for each Package in the graph.
|
||||
// The exportMu lock guards the lpkg.Types field and the
|
||||
// types.Package it points to, for each loaderPackage in the graph.
|
||||
//
|
||||
// Not all accesses to Package.Pkg need to be protected by exportMu:
|
||||
// graph ordering ensures that direct dependencies of source
|
||||
|
@ -1214,18 +1226,18 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
|||
defer ld.exportMu.Unlock()
|
||||
|
||||
if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
|
||||
return tpkg, nil // cache hit
|
||||
return nil // cache hit
|
||||
}
|
||||
|
||||
lpkg.IllTyped = true // fail safe
|
||||
|
||||
if lpkg.ExportFile == "" {
|
||||
// Errors while building export data will have been printed to stderr.
|
||||
return nil, fmt.Errorf("no export data file")
|
||||
return fmt.Errorf("no export data file")
|
||||
}
|
||||
f, err := os.Open(lpkg.ExportFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
|
@ -1237,7 +1249,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
|||
// queries.)
|
||||
r, err := gcexportdata.NewReader(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
}
|
||||
|
||||
// Build the view.
|
||||
|
@ -1281,7 +1293,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
|||
// (May modify incomplete packages in view but not create new ones.)
|
||||
tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
}
|
||||
if _, ok := view["go.shape"]; ok {
|
||||
// Account for the pseudopackage "go.shape" that gets
|
||||
|
@ -1294,8 +1306,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
|||
|
||||
lpkg.Types = tpkg
|
||||
lpkg.IllTyped = false
|
||||
|
||||
return tpkg, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// impliedLoadMode returns loadMode with its dependencies.
|
||||
|
@ -1311,3 +1322,5 @@ func impliedLoadMode(loadMode LoadMode) LoadMode {
|
|||
func usesExportData(cfg *Config) bool {
|
||||
return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
|
||||
}
|
||||
|
||||
var _ interface{} = io.Discard // assert build toolchain is go1.16 or later
|
||||
|
|
|
@ -2,9 +2,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go,
|
||||
// but it also contains the original source-based importer code for Go1.6.
|
||||
// Once we stop supporting 1.6, we can remove that code.
|
||||
// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go.
|
||||
|
||||
// Package gcimporter provides various functions for reading
|
||||
// gc-generated object files that can be used to implement the
|
||||
|
@ -14,10 +12,8 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter"
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
|
@ -25,11 +21,8 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/scanner"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -154,37 +147,6 @@ func FindPkg(path, srcDir string) (filename, id string) {
|
|||
return
|
||||
}
|
||||
|
||||
// ImportData imports a package by reading the gc-generated export data,
|
||||
// adds the corresponding package object to the packages map indexed by id,
|
||||
// and returns the object.
|
||||
//
|
||||
// The packages map must contains all packages already imported. The data
|
||||
// reader position must be the beginning of the export data section. The
|
||||
// filename is only used in error messages.
|
||||
//
|
||||
// If packages[id] contains the completely imported package, that package
|
||||
// can be used directly, and there is no need to call this function (but
|
||||
// there is also no harm but for extra time used).
|
||||
func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) {
|
||||
// support for parser error handling
|
||||
defer func() {
|
||||
switch r := recover().(type) {
|
||||
case nil:
|
||||
// nothing to do
|
||||
case importError:
|
||||
err = r
|
||||
default:
|
||||
panic(r) // internal error
|
||||
}
|
||||
}()
|
||||
|
||||
var p parser
|
||||
p.init(filename, id, data, packages)
|
||||
pkg = p.parseExport()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Import imports a gc-generated package given its import path and srcDir, adds
|
||||
// the corresponding package object to the packages map, and returns the object.
|
||||
// The packages map must contain all packages already imported.
|
||||
|
@ -245,15 +207,6 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
|
|||
}
|
||||
|
||||
switch hdr {
|
||||
case "$$\n":
|
||||
// Work-around if we don't have a filename; happens only if lookup != nil.
|
||||
// Either way, the filename is only needed for importer error messages, so
|
||||
// this is fine.
|
||||
if filename == "" {
|
||||
filename = path
|
||||
}
|
||||
return ImportData(packages, filename, id, buf)
|
||||
|
||||
case "$$B\n":
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(buf)
|
||||
|
@ -298,319 +251,6 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
|
|||
return
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parser
|
||||
|
||||
// TODO(gri) Imported objects don't have position information.
|
||||
// Ideally use the debug table line info; alternatively
|
||||
// create some fake position (or the position of the
|
||||
// import). That way error messages referring to imported
|
||||
// objects can print meaningful information.
|
||||
|
||||
// parser parses the exports inside a gc compiler-produced
|
||||
// object/archive file and populates its scope with the results.
|
||||
type parser struct {
|
||||
scanner scanner.Scanner
|
||||
tok rune // current token
|
||||
lit string // literal string; only valid for Ident, Int, String tokens
|
||||
id string // package id of imported package
|
||||
sharedPkgs map[string]*types.Package // package id -> package object (across importer)
|
||||
localPkgs map[string]*types.Package // package id -> package object (just this package)
|
||||
}
|
||||
|
||||
func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) {
|
||||
p.scanner.Init(src)
|
||||
p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
|
||||
p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
|
||||
p.scanner.Whitespace = 1<<'\t' | 1<<' '
|
||||
p.scanner.Filename = filename // for good error messages
|
||||
p.next()
|
||||
p.id = id
|
||||
p.sharedPkgs = packages
|
||||
if debug {
|
||||
// check consistency of packages map
|
||||
for _, pkg := range packages {
|
||||
if pkg.Name() == "" {
|
||||
fmt.Printf("no package name for %s\n", pkg.Path())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) next() {
|
||||
p.tok = p.scanner.Scan()
|
||||
switch p.tok {
|
||||
case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·':
|
||||
p.lit = p.scanner.TokenText()
|
||||
default:
|
||||
p.lit = ""
|
||||
}
|
||||
if debug {
|
||||
fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit)
|
||||
}
|
||||
}
|
||||
|
||||
func declTypeName(pkg *types.Package, name string) *types.TypeName {
|
||||
scope := pkg.Scope()
|
||||
if obj := scope.Lookup(name); obj != nil {
|
||||
return obj.(*types.TypeName)
|
||||
}
|
||||
obj := types.NewTypeName(token.NoPos, pkg, name, nil)
|
||||
// a named type may be referred to before the underlying type
|
||||
// is known - set it up
|
||||
types.NewNamed(obj, nil, nil)
|
||||
scope.Insert(obj)
|
||||
return obj
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Error handling
|
||||
|
||||
// Internal errors are boxed as importErrors.
|
||||
type importError struct {
|
||||
pos scanner.Position
|
||||
err error
|
||||
}
|
||||
|
||||
func (e importError) Error() string {
|
||||
return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
|
||||
}
|
||||
|
||||
func (p *parser) error(err interface{}) {
|
||||
if s, ok := err.(string); ok {
|
||||
err = errors.New(s)
|
||||
}
|
||||
// panic with a runtime.Error if err is not an error
|
||||
panic(importError{p.scanner.Pos(), err.(error)})
|
||||
}
|
||||
|
||||
func (p *parser) errorf(format string, args ...interface{}) {
|
||||
p.error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(tok rune) string {
|
||||
lit := p.lit
|
||||
if p.tok != tok {
|
||||
p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit)
|
||||
}
|
||||
p.next()
|
||||
return lit
|
||||
}
|
||||
|
||||
func (p *parser) expectSpecial(tok string) {
|
||||
sep := 'x' // not white space
|
||||
i := 0
|
||||
for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' {
|
||||
sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
|
||||
p.next()
|
||||
i++
|
||||
}
|
||||
if i < len(tok) {
|
||||
p.errorf("expected %q, got %q", tok, tok[0:i])
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) expectKeyword(keyword string) {
|
||||
lit := p.expect(scanner.Ident)
|
||||
if lit != keyword {
|
||||
p.errorf("expected keyword %s, got %q", keyword, lit)
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Qualified and unqualified names
|
||||
|
||||
// parsePackageID parses a PackageId:
|
||||
//
|
||||
// PackageId = string_lit .
|
||||
func (p *parser) parsePackageID() string {
|
||||
id, err := strconv.Unquote(p.expect(scanner.String))
|
||||
if err != nil {
|
||||
p.error(err)
|
||||
}
|
||||
// id == "" stands for the imported package id
|
||||
// (only known at time of package installation)
|
||||
if id == "" {
|
||||
id = p.id
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// parsePackageName parse a PackageName:
|
||||
//
|
||||
// PackageName = ident .
|
||||
func (p *parser) parsePackageName() string {
|
||||
return p.expect(scanner.Ident)
|
||||
}
|
||||
|
||||
// parseDotIdent parses a dotIdentifier:
|
||||
//
|
||||
// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
|
||||
func (p *parser) parseDotIdent() string {
|
||||
ident := ""
|
||||
if p.tok != scanner.Int {
|
||||
sep := 'x' // not white space
|
||||
for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
|
||||
ident += p.lit
|
||||
sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token
|
||||
p.next()
|
||||
}
|
||||
}
|
||||
if ident == "" {
|
||||
p.expect(scanner.Ident) // use expect() for error handling
|
||||
}
|
||||
return ident
|
||||
}
|
||||
|
||||
// parseQualifiedName parses a QualifiedName:
|
||||
//
|
||||
// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) .
|
||||
func (p *parser) parseQualifiedName() (id, name string) {
|
||||
p.expect('@')
|
||||
id = p.parsePackageID()
|
||||
p.expect('.')
|
||||
// Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
|
||||
if p.tok == '?' {
|
||||
p.next()
|
||||
} else {
|
||||
name = p.parseDotIdent()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getPkg returns the package for a given id. If the package is
|
||||
// not found, create the package and add it to the p.localPkgs
|
||||
// and p.sharedPkgs maps. name is the (expected) name of the
|
||||
// package. If name == "", the package name is expected to be
|
||||
// set later via an import clause in the export data.
|
||||
//
|
||||
// id identifies a package, usually by a canonical package path like
|
||||
// "encoding/json" but possibly by a non-canonical import path like
|
||||
// "./json".
|
||||
func (p *parser) getPkg(id, name string) *types.Package {
|
||||
// package unsafe is not in the packages maps - handle explicitly
|
||||
if id == "unsafe" {
|
||||
return types.Unsafe
|
||||
}
|
||||
|
||||
pkg := p.localPkgs[id]
|
||||
if pkg == nil {
|
||||
// first import of id from this package
|
||||
pkg = p.sharedPkgs[id]
|
||||
if pkg == nil {
|
||||
// first import of id by this importer;
|
||||
// add (possibly unnamed) pkg to shared packages
|
||||
pkg = types.NewPackage(id, name)
|
||||
p.sharedPkgs[id] = pkg
|
||||
}
|
||||
// add (possibly unnamed) pkg to local packages
|
||||
if p.localPkgs == nil {
|
||||
p.localPkgs = make(map[string]*types.Package)
|
||||
}
|
||||
p.localPkgs[id] = pkg
|
||||
} else if name != "" {
|
||||
// package exists already and we have an expected package name;
|
||||
// make sure names match or set package name if necessary
|
||||
if pname := pkg.Name(); pname == "" {
|
||||
pkg.SetName(name)
|
||||
} else if pname != name {
|
||||
p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name)
|
||||
}
|
||||
}
|
||||
return pkg
|
||||
}
|
||||
|
||||
// parseExportedName is like parseQualifiedName, but
|
||||
// the package id is resolved to an imported *types.Package.
|
||||
func (p *parser) parseExportedName() (pkg *types.Package, name string) {
|
||||
id, name := p.parseQualifiedName()
|
||||
pkg = p.getPkg(id, "")
|
||||
return
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Types
|
||||
|
||||
// parseBasicType parses a BasicType:
|
||||
//
|
||||
// BasicType = identifier .
|
||||
func (p *parser) parseBasicType() types.Type {
|
||||
id := p.expect(scanner.Ident)
|
||||
obj := types.Universe.Lookup(id)
|
||||
if obj, ok := obj.(*types.TypeName); ok {
|
||||
return obj.Type()
|
||||
}
|
||||
p.errorf("not a basic type: %s", id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseArrayType parses an ArrayType:
|
||||
//
|
||||
// ArrayType = "[" int_lit "]" Type .
|
||||
func (p *parser) parseArrayType(parent *types.Package) types.Type {
|
||||
// "[" already consumed and lookahead known not to be "]"
|
||||
lit := p.expect(scanner.Int)
|
||||
p.expect(']')
|
||||
elem := p.parseType(parent)
|
||||
n, err := strconv.ParseInt(lit, 10, 64)
|
||||
if err != nil {
|
||||
p.error(err)
|
||||
}
|
||||
return types.NewArray(elem, n)
|
||||
}
|
||||
|
||||
// parseMapType parses a MapType:
|
||||
//
|
||||
// MapType = "map" "[" Type "]" Type .
|
||||
func (p *parser) parseMapType(parent *types.Package) types.Type {
|
||||
p.expectKeyword("map")
|
||||
p.expect('[')
|
||||
key := p.parseType(parent)
|
||||
p.expect(']')
|
||||
elem := p.parseType(parent)
|
||||
return types.NewMap(key, elem)
|
||||
}
|
||||
|
||||
// parseName parses a Name:
|
||||
//
|
||||
// Name = identifier | "?" | QualifiedName .
|
||||
//
|
||||
// For unqualified and anonymous names, the returned package is the parent
|
||||
// package unless parent == nil, in which case the returned package is the
|
||||
// package being imported. (The parent package is not nil if the name
|
||||
// is an unqualified struct field or interface method name belonging to a
|
||||
// type declared in another package.)
|
||||
//
|
||||
// For qualified names, the returned package is nil (and not created if
|
||||
// it doesn't exist yet) unless materializePkg is set (which creates an
|
||||
// unnamed package with valid package path). In the latter case, a
|
||||
// subsequent import clause is expected to provide a name for the package.
|
||||
func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) {
|
||||
pkg = parent
|
||||
if pkg == nil {
|
||||
pkg = p.sharedPkgs[p.id]
|
||||
}
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
name = p.lit
|
||||
p.next()
|
||||
case '?':
|
||||
// anonymous
|
||||
p.next()
|
||||
case '@':
|
||||
// exported name prefixed with package path
|
||||
pkg = nil
|
||||
var id string
|
||||
id, name = p.parseQualifiedName()
|
||||
if materializePkg {
|
||||
pkg = p.getPkg(id, "")
|
||||
}
|
||||
default:
|
||||
p.error("name expected")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func deref(typ types.Type) types.Type {
|
||||
if p, _ := typ.(*types.Pointer); p != nil {
|
||||
return p.Elem()
|
||||
|
@ -618,563 +258,6 @@ func deref(typ types.Type) types.Type {
|
|||
return typ
|
||||
}
|
||||
|
||||
// parseField parses a Field:
|
||||
//
|
||||
// Field = Name Type [ string_lit ] .
|
||||
func (p *parser) parseField(parent *types.Package) (*types.Var, string) {
|
||||
pkg, name := p.parseName(parent, true)
|
||||
|
||||
if name == "_" {
|
||||
// Blank fields should be package-qualified because they
|
||||
// are unexported identifiers, but gc does not qualify them.
|
||||
// Assuming that the ident belongs to the current package
|
||||
// causes types to change during re-exporting, leading
|
||||
// to spurious "can't assign A to B" errors from go/types.
|
||||
// As a workaround, pretend all blank fields belong
|
||||
// to the same unique dummy package.
|
||||
const blankpkg = "<_>"
|
||||
pkg = p.getPkg(blankpkg, blankpkg)
|
||||
}
|
||||
|
||||
typ := p.parseType(parent)
|
||||
anonymous := false
|
||||
if name == "" {
|
||||
// anonymous field - typ must be T or *T and T must be a type name
|
||||
switch typ := deref(typ).(type) {
|
||||
case *types.Basic: // basic types are named types
|
||||
pkg = nil // objects defined in Universe scope have no package
|
||||
name = typ.Name()
|
||||
case *types.Named:
|
||||
name = typ.Obj().Name()
|
||||
default:
|
||||
p.errorf("anonymous field expected")
|
||||
}
|
||||
anonymous = true
|
||||
}
|
||||
tag := ""
|
||||
if p.tok == scanner.String {
|
||||
s := p.expect(scanner.String)
|
||||
var err error
|
||||
tag, err = strconv.Unquote(s)
|
||||
if err != nil {
|
||||
p.errorf("invalid struct tag %s: %s", s, err)
|
||||
}
|
||||
}
|
||||
return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag
|
||||
}
|
||||
|
||||
// parseStructType parses a StructType:
|
||||
//
|
||||
// StructType = "struct" "{" [ FieldList ] "}" .
|
||||
// FieldList = Field { ";" Field } .
|
||||
func (p *parser) parseStructType(parent *types.Package) types.Type {
|
||||
var fields []*types.Var
|
||||
var tags []string
|
||||
|
||||
p.expectKeyword("struct")
|
||||
p.expect('{')
|
||||
for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
|
||||
if i > 0 {
|
||||
p.expect(';')
|
||||
}
|
||||
fld, tag := p.parseField(parent)
|
||||
if tag != "" && tags == nil {
|
||||
tags = make([]string, i)
|
||||
}
|
||||
if tags != nil {
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
fields = append(fields, fld)
|
||||
}
|
||||
p.expect('}')
|
||||
|
||||
return types.NewStruct(fields, tags)
|
||||
}
|
||||
|
||||
// parseParameter parses a Parameter:
|
||||
//
|
||||
// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
|
||||
func (p *parser) parseParameter() (par *types.Var, isVariadic bool) {
|
||||
_, name := p.parseName(nil, false)
|
||||
// remove gc-specific parameter numbering
|
||||
if i := strings.Index(name, "·"); i >= 0 {
|
||||
name = name[:i]
|
||||
}
|
||||
if p.tok == '.' {
|
||||
p.expectSpecial("...")
|
||||
isVariadic = true
|
||||
}
|
||||
typ := p.parseType(nil)
|
||||
if isVariadic {
|
||||
typ = types.NewSlice(typ)
|
||||
}
|
||||
// ignore argument tag (e.g. "noescape")
|
||||
if p.tok == scanner.String {
|
||||
p.next()
|
||||
}
|
||||
// TODO(gri) should we provide a package?
|
||||
par = types.NewVar(token.NoPos, nil, name, typ)
|
||||
return
|
||||
}
|
||||
|
||||
// parseParameters parses a Parameters:
|
||||
//
|
||||
// Parameters = "(" [ ParameterList ] ")" .
|
||||
// ParameterList = { Parameter "," } Parameter .
|
||||
func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) {
|
||||
p.expect('(')
|
||||
for p.tok != ')' && p.tok != scanner.EOF {
|
||||
if len(list) > 0 {
|
||||
p.expect(',')
|
||||
}
|
||||
par, variadic := p.parseParameter()
|
||||
list = append(list, par)
|
||||
if variadic {
|
||||
if isVariadic {
|
||||
p.error("... not on final argument")
|
||||
}
|
||||
isVariadic = true
|
||||
}
|
||||
}
|
||||
p.expect(')')
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseSignature parses a Signature:
|
||||
//
|
||||
// Signature = Parameters [ Result ] .
|
||||
// Result = Type | Parameters .
|
||||
func (p *parser) parseSignature(recv *types.Var) *types.Signature {
|
||||
params, isVariadic := p.parseParameters()
|
||||
|
||||
// optional result type
|
||||
var results []*types.Var
|
||||
if p.tok == '(' {
|
||||
var variadic bool
|
||||
results, variadic = p.parseParameters()
|
||||
if variadic {
|
||||
p.error("... not permitted on result type")
|
||||
}
|
||||
}
|
||||
|
||||
return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic)
|
||||
}
|
||||
|
||||
// parseInterfaceType parses an InterfaceType:
|
||||
//
|
||||
// InterfaceType = "interface" "{" [ MethodList ] "}" .
|
||||
// MethodList = Method { ";" Method } .
|
||||
// Method = Name Signature .
|
||||
//
|
||||
// The methods of embedded interfaces are always "inlined"
|
||||
// by the compiler and thus embedded interfaces are never
|
||||
// visible in the export data.
|
||||
func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
|
||||
var methods []*types.Func
|
||||
|
||||
p.expectKeyword("interface")
|
||||
p.expect('{')
|
||||
for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ {
|
||||
if i > 0 {
|
||||
p.expect(';')
|
||||
}
|
||||
pkg, name := p.parseName(parent, true)
|
||||
sig := p.parseSignature(nil)
|
||||
methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig))
|
||||
}
|
||||
p.expect('}')
|
||||
|
||||
// Complete requires the type's embedded interfaces to be fully defined,
|
||||
// but we do not define any
|
||||
return newInterface(methods, nil).Complete()
|
||||
}
|
||||
|
||||
// parseChanType parses a ChanType:
|
||||
//
|
||||
// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
|
||||
func (p *parser) parseChanType(parent *types.Package) types.Type {
|
||||
dir := types.SendRecv
|
||||
if p.tok == scanner.Ident {
|
||||
p.expectKeyword("chan")
|
||||
if p.tok == '<' {
|
||||
p.expectSpecial("<-")
|
||||
dir = types.SendOnly
|
||||
}
|
||||
} else {
|
||||
p.expectSpecial("<-")
|
||||
p.expectKeyword("chan")
|
||||
dir = types.RecvOnly
|
||||
}
|
||||
elem := p.parseType(parent)
|
||||
return types.NewChan(dir, elem)
|
||||
}
|
||||
|
||||
// parseType parses a Type:
|
||||
//
|
||||
// Type =
|
||||
// BasicType | TypeName | ArrayType | SliceType | StructType |
|
||||
// PointerType | FuncType | InterfaceType | MapType | ChanType |
|
||||
// "(" Type ")" .
|
||||
//
|
||||
// BasicType = ident .
|
||||
// TypeName = ExportedName .
|
||||
// SliceType = "[" "]" Type .
|
||||
// PointerType = "*" Type .
|
||||
// FuncType = "func" Signature .
|
||||
func (p *parser) parseType(parent *types.Package) types.Type {
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
switch p.lit {
|
||||
default:
|
||||
return p.parseBasicType()
|
||||
case "struct":
|
||||
return p.parseStructType(parent)
|
||||
case "func":
|
||||
// FuncType
|
||||
p.next()
|
||||
return p.parseSignature(nil)
|
||||
case "interface":
|
||||
return p.parseInterfaceType(parent)
|
||||
case "map":
|
||||
return p.parseMapType(parent)
|
||||
case "chan":
|
||||
return p.parseChanType(parent)
|
||||
}
|
||||
case '@':
|
||||
// TypeName
|
||||
pkg, name := p.parseExportedName()
|
||||
return declTypeName(pkg, name).Type()
|
||||
case '[':
|
||||
p.next() // look ahead
|
||||
if p.tok == ']' {
|
||||
// SliceType
|
||||
p.next()
|
||||
return types.NewSlice(p.parseType(parent))
|
||||
}
|
||||
return p.parseArrayType(parent)
|
||||
case '*':
|
||||
// PointerType
|
||||
p.next()
|
||||
return types.NewPointer(p.parseType(parent))
|
||||
case '<':
|
||||
return p.parseChanType(parent)
|
||||
case '(':
|
||||
// "(" Type ")"
|
||||
p.next()
|
||||
typ := p.parseType(parent)
|
||||
p.expect(')')
|
||||
return typ
|
||||
}
|
||||
p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Declarations
|
||||
|
||||
// parseImportDecl parses an ImportDecl:
|
||||
//
|
||||
// ImportDecl = "import" PackageName PackageId .
|
||||
func (p *parser) parseImportDecl() {
|
||||
p.expectKeyword("import")
|
||||
name := p.parsePackageName()
|
||||
p.getPkg(p.parsePackageID(), name)
|
||||
}
|
||||
|
||||
// parseInt parses an int_lit:
|
||||
//
|
||||
// int_lit = [ "+" | "-" ] { "0" ... "9" } .
|
||||
func (p *parser) parseInt() string {
|
||||
s := ""
|
||||
switch p.tok {
|
||||
case '-':
|
||||
s = "-"
|
||||
p.next()
|
||||
case '+':
|
||||
p.next()
|
||||
}
|
||||
return s + p.expect(scanner.Int)
|
||||
}
|
||||
|
||||
// parseNumber parses a number:
|
||||
//
|
||||
// number = int_lit [ "p" int_lit ] .
|
||||
func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) {
|
||||
// mantissa
|
||||
mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0)
|
||||
if mant == nil {
|
||||
panic("invalid mantissa")
|
||||
}
|
||||
|
||||
if p.lit == "p" {
|
||||
// exponent (base 2)
|
||||
p.next()
|
||||
exp, err := strconv.ParseInt(p.parseInt(), 10, 0)
|
||||
if err != nil {
|
||||
p.error(err)
|
||||
}
|
||||
if exp < 0 {
|
||||
denom := constant.MakeInt64(1)
|
||||
denom = constant.Shift(denom, token.SHL, uint(-exp))
|
||||
typ = types.Typ[types.UntypedFloat]
|
||||
val = constant.BinaryOp(mant, token.QUO, denom)
|
||||
return
|
||||
}
|
||||
if exp > 0 {
|
||||
mant = constant.Shift(mant, token.SHL, uint(exp))
|
||||
}
|
||||
typ = types.Typ[types.UntypedFloat]
|
||||
val = mant
|
||||
return
|
||||
}
|
||||
|
||||
typ = types.Typ[types.UntypedInt]
|
||||
val = mant
|
||||
return
|
||||
}
|
||||
|
||||
// parseConstDecl parses a ConstDecl:
|
||||
//
|
||||
// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
|
||||
// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit .
|
||||
// bool_lit = "true" | "false" .
|
||||
// complex_lit = "(" float_lit "+" float_lit "i" ")" .
|
||||
// rune_lit = "(" int_lit "+" int_lit ")" .
|
||||
// string_lit = `"` { unicode_char } `"` .
|
||||
func (p *parser) parseConstDecl() {
|
||||
p.expectKeyword("const")
|
||||
pkg, name := p.parseExportedName()
|
||||
|
||||
var typ0 types.Type
|
||||
if p.tok != '=' {
|
||||
// constant types are never structured - no need for parent type
|
||||
typ0 = p.parseType(nil)
|
||||
}
|
||||
|
||||
p.expect('=')
|
||||
var typ types.Type
|
||||
var val constant.Value
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
// bool_lit
|
||||
if p.lit != "true" && p.lit != "false" {
|
||||
p.error("expected true or false")
|
||||
}
|
||||
typ = types.Typ[types.UntypedBool]
|
||||
val = constant.MakeBool(p.lit == "true")
|
||||
p.next()
|
||||
|
||||
case '-', scanner.Int:
|
||||
// int_lit
|
||||
typ, val = p.parseNumber()
|
||||
|
||||
case '(':
|
||||
// complex_lit or rune_lit
|
||||
p.next()
|
||||
if p.tok == scanner.Char {
|
||||
p.next()
|
||||
p.expect('+')
|
||||
typ = types.Typ[types.UntypedRune]
|
||||
_, val = p.parseNumber()
|
||||
p.expect(')')
|
||||
break
|
||||
}
|
||||
_, re := p.parseNumber()
|
||||
p.expect('+')
|
||||
_, im := p.parseNumber()
|
||||
p.expectKeyword("i")
|
||||
p.expect(')')
|
||||
typ = types.Typ[types.UntypedComplex]
|
||||
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
|
||||
|
||||
case scanner.Char:
|
||||
// rune_lit
|
||||
typ = types.Typ[types.UntypedRune]
|
||||
val = constant.MakeFromLiteral(p.lit, token.CHAR, 0)
|
||||
p.next()
|
||||
|
||||
case scanner.String:
|
||||
// string_lit
|
||||
typ = types.Typ[types.UntypedString]
|
||||
val = constant.MakeFromLiteral(p.lit, token.STRING, 0)
|
||||
p.next()
|
||||
|
||||
default:
|
||||
p.errorf("expected literal got %s", scanner.TokenString(p.tok))
|
||||
}
|
||||
|
||||
if typ0 == nil {
|
||||
typ0 = typ
|
||||
}
|
||||
|
||||
pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val))
|
||||
}
|
||||
|
||||
// parseTypeDecl parses a TypeDecl:
|
||||
//
|
||||
// TypeDecl = "type" ExportedName Type .
|
||||
func (p *parser) parseTypeDecl() {
|
||||
p.expectKeyword("type")
|
||||
pkg, name := p.parseExportedName()
|
||||
obj := declTypeName(pkg, name)
|
||||
|
||||
// The type object may have been imported before and thus already
|
||||
// have a type associated with it. We still need to parse the type
|
||||
// structure, but throw it away if the object already has a type.
|
||||
// This ensures that all imports refer to the same type object for
|
||||
// a given type declaration.
|
||||
typ := p.parseType(pkg)
|
||||
|
||||
if name := obj.Type().(*types.Named); name.Underlying() == nil {
|
||||
name.SetUnderlying(typ)
|
||||
}
|
||||
}
|
||||
|
||||
// parseVarDecl parses a VarDecl:
|
||||
//
|
||||
// VarDecl = "var" ExportedName Type .
|
||||
func (p *parser) parseVarDecl() {
|
||||
p.expectKeyword("var")
|
||||
pkg, name := p.parseExportedName()
|
||||
typ := p.parseType(pkg)
|
||||
pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ))
|
||||
}
|
||||
|
||||
// parseFunc parses a Func:
|
||||
//
|
||||
// Func = Signature [ Body ] .
|
||||
// Body = "{" ... "}" .
|
||||
func (p *parser) parseFunc(recv *types.Var) *types.Signature {
|
||||
sig := p.parseSignature(recv)
|
||||
if p.tok == '{' {
|
||||
p.next()
|
||||
for i := 1; i > 0; p.next() {
|
||||
switch p.tok {
|
||||
case '{':
|
||||
i++
|
||||
case '}':
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
return sig
|
||||
}
|
||||
|
||||
// parseMethodDecl parses a MethodDecl:
|
||||
//
|
||||
// MethodDecl = "func" Receiver Name Func .
|
||||
// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
|
||||
func (p *parser) parseMethodDecl() {
|
||||
// "func" already consumed
|
||||
p.expect('(')
|
||||
recv, _ := p.parseParameter() // receiver
|
||||
p.expect(')')
|
||||
|
||||
// determine receiver base type object
|
||||
base := deref(recv.Type()).(*types.Named)
|
||||
|
||||
// parse method name, signature, and possibly inlined body
|
||||
_, name := p.parseName(nil, false)
|
||||
sig := p.parseFunc(recv)
|
||||
|
||||
// methods always belong to the same package as the base type object
|
||||
pkg := base.Obj().Pkg()
|
||||
|
||||
// add method to type unless type was imported before
|
||||
// and method exists already
|
||||
// TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small.
|
||||
base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig))
|
||||
}
|
||||
|
||||
// parseFuncDecl parses a FuncDecl:
|
||||
//
|
||||
// FuncDecl = "func" ExportedName Func .
|
||||
func (p *parser) parseFuncDecl() {
|
||||
// "func" already consumed
|
||||
pkg, name := p.parseExportedName()
|
||||
typ := p.parseFunc(nil)
|
||||
pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ))
|
||||
}
|
||||
|
||||
// parseDecl parses a Decl:
|
||||
//
|
||||
// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
|
||||
func (p *parser) parseDecl() {
|
||||
if p.tok == scanner.Ident {
|
||||
switch p.lit {
|
||||
case "import":
|
||||
p.parseImportDecl()
|
||||
case "const":
|
||||
p.parseConstDecl()
|
||||
case "type":
|
||||
p.parseTypeDecl()
|
||||
case "var":
|
||||
p.parseVarDecl()
|
||||
case "func":
|
||||
p.next() // look ahead
|
||||
if p.tok == '(' {
|
||||
p.parseMethodDecl()
|
||||
} else {
|
||||
p.parseFuncDecl()
|
||||
}
|
||||
}
|
||||
}
|
||||
p.expect('\n')
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Export
|
||||
|
||||
// parseExport parses an Export:
|
||||
//
|
||||
// Export = "PackageClause { Decl } "$$" .
|
||||
// PackageClause = "package" PackageName [ "safe" ] "\n" .
|
||||
func (p *parser) parseExport() *types.Package {
|
||||
p.expectKeyword("package")
|
||||
name := p.parsePackageName()
|
||||
if p.tok == scanner.Ident && p.lit == "safe" {
|
||||
// package was compiled with -u option - ignore
|
||||
p.next()
|
||||
}
|
||||
p.expect('\n')
|
||||
|
||||
pkg := p.getPkg(p.id, name)
|
||||
|
||||
for p.tok != '$' && p.tok != scanner.EOF {
|
||||
p.parseDecl()
|
||||
}
|
||||
|
||||
if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' {
|
||||
// don't call next()/expect() since reading past the
|
||||
// export data may cause scanner errors (e.g. NUL chars)
|
||||
p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch)
|
||||
}
|
||||
|
||||
if n := p.scanner.ErrorCount; n != 0 {
|
||||
p.errorf("expected no scanner errors, got %d", n)
|
||||
}
|
||||
|
||||
// Record all locally referenced packages as imports.
|
||||
var imports []*types.Package
|
||||
for id, pkg2 := range p.localPkgs {
|
||||
if pkg2.Name() == "" {
|
||||
p.errorf("%s package has no name", id)
|
||||
}
|
||||
if id == p.id {
|
||||
continue // avoid self-edge
|
||||
}
|
||||
imports = append(imports, pkg2)
|
||||
}
|
||||
sort.Sort(byPath(imports))
|
||||
pkg.SetImports(imports)
|
||||
|
||||
// package was imported completely and without errors
|
||||
pkg.MarkComplete()
|
||||
|
||||
return pkg
|
||||
}
|
||||
|
||||
type byPath []*types.Package
|
||||
|
||||
func (a byPath) Len() int { return len(a) }
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/internal/tokeninternal"
|
||||
"golang.org/x/tools/internal/typeparams"
|
||||
)
|
||||
|
||||
|
@ -138,6 +139,17 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, ver
|
|||
p.doDecl(p.declTodo.popHead())
|
||||
}
|
||||
|
||||
// Produce index of offset of each file record in files.
|
||||
var files intWriter
|
||||
var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i
|
||||
if p.shallow {
|
||||
fileOffset = make([]uint64, len(p.fileInfos))
|
||||
for i, info := range p.fileInfos {
|
||||
fileOffset[i] = uint64(files.Len())
|
||||
p.encodeFile(&files, info.file, info.needed)
|
||||
}
|
||||
}
|
||||
|
||||
// Append indices to data0 section.
|
||||
dataLen := uint64(p.data0.Len())
|
||||
w := p.newWriter()
|
||||
|
@ -163,16 +175,75 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, ver
|
|||
}
|
||||
hdr.uint64(uint64(p.version))
|
||||
hdr.uint64(uint64(p.strings.Len()))
|
||||
if p.shallow {
|
||||
hdr.uint64(uint64(files.Len()))
|
||||
hdr.uint64(uint64(len(fileOffset)))
|
||||
for _, offset := range fileOffset {
|
||||
hdr.uint64(offset)
|
||||
}
|
||||
}
|
||||
hdr.uint64(dataLen)
|
||||
|
||||
// Flush output.
|
||||
io.Copy(out, &hdr)
|
||||
io.Copy(out, &p.strings)
|
||||
if p.shallow {
|
||||
io.Copy(out, &files)
|
||||
}
|
||||
io.Copy(out, &p.data0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeFile writes to w a representation of the file sufficient to
|
||||
// faithfully restore position information about all needed offsets.
|
||||
// Mutates the needed array.
|
||||
func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) {
|
||||
_ = needed[0] // precondition: needed is non-empty
|
||||
|
||||
w.uint64(p.stringOff(file.Name()))
|
||||
|
||||
size := uint64(file.Size())
|
||||
w.uint64(size)
|
||||
|
||||
// Sort the set of needed offsets. Duplicates are harmless.
|
||||
sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
|
||||
|
||||
lines := tokeninternal.GetLines(file) // byte offset of each line start
|
||||
w.uint64(uint64(len(lines)))
|
||||
|
||||
// Rather than record the entire array of line start offsets,
|
||||
// we save only a sparse list of (index, offset) pairs for
|
||||
// the start of each line that contains a needed position.
|
||||
var sparse [][2]int // (index, offset) pairs
|
||||
outer:
|
||||
for i, lineStart := range lines {
|
||||
lineEnd := size
|
||||
if i < len(lines)-1 {
|
||||
lineEnd = uint64(lines[i+1])
|
||||
}
|
||||
// Does this line contains a needed offset?
|
||||
if needed[0] < lineEnd {
|
||||
sparse = append(sparse, [2]int{i, lineStart})
|
||||
for needed[0] < lineEnd {
|
||||
needed = needed[1:]
|
||||
if len(needed) == 0 {
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delta-encode the columns.
|
||||
w.uint64(uint64(len(sparse)))
|
||||
var prev [2]int
|
||||
for _, pair := range sparse {
|
||||
w.uint64(uint64(pair[0] - prev[0]))
|
||||
w.uint64(uint64(pair[1] - prev[1]))
|
||||
prev = pair
|
||||
}
|
||||
}
|
||||
|
||||
// writeIndex writes out an object index. mainIndex indicates whether
|
||||
// we're writing out the main index, which is also read by
|
||||
// non-compiler tools and includes a complete package description
|
||||
|
@ -255,6 +326,12 @@ type iexporter struct {
|
|||
strings intWriter
|
||||
stringIndex map[string]uint64
|
||||
|
||||
// In shallow mode, object positions are encoded as (file, offset).
|
||||
// Each file is recorded as a line-number table.
|
||||
// Only the lines of needed positions are saved faithfully.
|
||||
fileInfo map[*token.File]uint64 // value is index in fileInfos
|
||||
fileInfos []*filePositions
|
||||
|
||||
data0 intWriter
|
||||
declIndex map[types.Object]uint64
|
||||
tparamNames map[types.Object]string // typeparam->exported name
|
||||
|
@ -263,6 +340,11 @@ type iexporter struct {
|
|||
indent int // for tracing support
|
||||
}
|
||||
|
||||
type filePositions struct {
|
||||
file *token.File
|
||||
needed []uint64 // unordered list of needed file offsets
|
||||
}
|
||||
|
||||
func (p *iexporter) trace(format string, args ...interface{}) {
|
||||
if !trace {
|
||||
// Call sites should also be guarded, but having this check here allows
|
||||
|
@ -286,6 +368,25 @@ func (p *iexporter) stringOff(s string) uint64 {
|
|||
return off
|
||||
}
|
||||
|
||||
// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it.
|
||||
func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) {
|
||||
index, ok := p.fileInfo[file]
|
||||
if !ok {
|
||||
index = uint64(len(p.fileInfo))
|
||||
p.fileInfos = append(p.fileInfos, &filePositions{file: file})
|
||||
if p.fileInfo == nil {
|
||||
p.fileInfo = make(map[*token.File]uint64)
|
||||
}
|
||||
p.fileInfo[file] = index
|
||||
}
|
||||
// Record each needed offset.
|
||||
info := p.fileInfos[index]
|
||||
offset := uint64(file.Offset(pos))
|
||||
info.needed = append(info.needed, offset)
|
||||
|
||||
return index, offset
|
||||
}
|
||||
|
||||
// pushDecl adds n to the declaration work queue, if not already present.
|
||||
func (p *iexporter) pushDecl(obj types.Object) {
|
||||
// Package unsafe is known to the compiler and predeclared.
|
||||
|
@ -346,7 +447,13 @@ func (p *iexporter) doDecl(obj types.Object) {
|
|||
case *types.Func:
|
||||
sig, _ := obj.Type().(*types.Signature)
|
||||
if sig.Recv() != nil {
|
||||
panic(internalErrorf("unexpected method: %v", sig))
|
||||
// We shouldn't see methods in the package scope,
|
||||
// but the type checker may repair "func () F() {}"
|
||||
// to "func (Invalid) F()" and then treat it like "func F()",
|
||||
// so allow that. See golang/go#57729.
|
||||
if sig.Recv().Type() != types.Typ[types.Invalid] {
|
||||
panic(internalErrorf("unexpected method: %v", sig))
|
||||
}
|
||||
}
|
||||
|
||||
// Function.
|
||||
|
@ -458,13 +565,30 @@ func (w *exportWriter) tag(tag byte) {
|
|||
}
|
||||
|
||||
func (w *exportWriter) pos(pos token.Pos) {
|
||||
if w.p.version >= iexportVersionPosCol {
|
||||
if w.p.shallow {
|
||||
w.posV2(pos)
|
||||
} else if w.p.version >= iexportVersionPosCol {
|
||||
w.posV1(pos)
|
||||
} else {
|
||||
w.posV0(pos)
|
||||
}
|
||||
}
|
||||
|
||||
// posV2 encoding (used only in shallow mode) records positions as
|
||||
// (file, offset), where file is the index in the token.File table
|
||||
// (which records the file name and newline offsets) and offset is a
|
||||
// byte offset. It effectively ignores //line directives.
|
||||
func (w *exportWriter) posV2(pos token.Pos) {
|
||||
if pos == token.NoPos {
|
||||
w.uint64(0)
|
||||
return
|
||||
}
|
||||
file := w.p.fset.File(pos) // fset must be non-nil
|
||||
index, offset := w.p.fileIndexAndOffset(file, pos)
|
||||
w.uint64(1 + index)
|
||||
w.uint64(offset)
|
||||
}
|
||||
|
||||
func (w *exportWriter) posV1(pos token.Pos) {
|
||||
if w.p.fset == nil {
|
||||
w.int64(0)
|
||||
|
|
|
@ -137,12 +137,23 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
|
|||
}
|
||||
|
||||
sLen := int64(r.uint64())
|
||||
var fLen int64
|
||||
var fileOffset []uint64
|
||||
if insert != nil {
|
||||
// Shallow mode uses a different position encoding.
|
||||
fLen = int64(r.uint64())
|
||||
fileOffset = make([]uint64, r.uint64())
|
||||
for i := range fileOffset {
|
||||
fileOffset[i] = r.uint64()
|
||||
}
|
||||
}
|
||||
dLen := int64(r.uint64())
|
||||
|
||||
whence, _ := r.Seek(0, io.SeekCurrent)
|
||||
stringData := data[whence : whence+sLen]
|
||||
declData := data[whence+sLen : whence+sLen+dLen]
|
||||
r.Seek(sLen+dLen, io.SeekCurrent)
|
||||
fileData := data[whence+sLen : whence+sLen+fLen]
|
||||
declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen]
|
||||
r.Seek(sLen+fLen+dLen, io.SeekCurrent)
|
||||
|
||||
p := iimporter{
|
||||
version: int(version),
|
||||
|
@ -151,6 +162,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
|
|||
|
||||
stringData: stringData,
|
||||
stringCache: make(map[uint64]string),
|
||||
fileOffset: fileOffset,
|
||||
fileData: fileData,
|
||||
fileCache: make([]*token.File, len(fileOffset)),
|
||||
pkgCache: make(map[uint64]*types.Package),
|
||||
|
||||
declData: declData,
|
||||
|
@ -280,6 +294,9 @@ type iimporter struct {
|
|||
|
||||
stringData []byte
|
||||
stringCache map[uint64]string
|
||||
fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i
|
||||
fileData []byte
|
||||
fileCache []*token.File // memoized decoding of file encoded as i
|
||||
pkgCache map[uint64]*types.Package
|
||||
|
||||
declData []byte
|
||||
|
@ -352,6 +369,55 @@ func (p *iimporter) stringAt(off uint64) string {
|
|||
return s
|
||||
}
|
||||
|
||||
func (p *iimporter) fileAt(index uint64) *token.File {
|
||||
file := p.fileCache[index]
|
||||
if file == nil {
|
||||
off := p.fileOffset[index]
|
||||
file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath})
|
||||
p.fileCache[index] = file
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
func (p *iimporter) decodeFile(rd intReader) *token.File {
|
||||
filename := p.stringAt(rd.uint64())
|
||||
size := int(rd.uint64())
|
||||
file := p.fake.fset.AddFile(filename, -1, size)
|
||||
|
||||
// SetLines requires a nondecreasing sequence.
|
||||
// Because it is common for clients to derive the interval
|
||||
// [start, start+len(name)] from a start position, and we
|
||||
// want to ensure that the end offset is on the same line,
|
||||
// we fill in the gaps of the sparse encoding with values
|
||||
// that strictly increase by the largest possible amount.
|
||||
// This allows us to avoid having to record the actual end
|
||||
// offset of each needed line.
|
||||
|
||||
lines := make([]int, int(rd.uint64()))
|
||||
var index, offset int
|
||||
for i, n := 0, int(rd.uint64()); i < n; i++ {
|
||||
index += int(rd.uint64())
|
||||
offset += int(rd.uint64())
|
||||
lines[index] = offset
|
||||
|
||||
// Ensure monotonicity between points.
|
||||
for j := index - 1; j > 0 && lines[j] == 0; j-- {
|
||||
lines[j] = lines[j+1] - 1
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure monotonicity after last point.
|
||||
for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- {
|
||||
size--
|
||||
lines[j] = size
|
||||
}
|
||||
|
||||
if !file.SetLines(lines) {
|
||||
errorf("SetLines failed: %d", lines) // can't happen
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
func (p *iimporter) pkgAt(off uint64) *types.Package {
|
||||
if pkg, ok := p.pkgCache[off]; ok {
|
||||
return pkg
|
||||
|
@ -645,6 +711,9 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) {
|
|||
}
|
||||
|
||||
func (r *importReader) pos() token.Pos {
|
||||
if r.p.insert != nil { // shallow mode
|
||||
return r.posv2()
|
||||
}
|
||||
if r.p.version >= iexportVersionPosCol {
|
||||
r.posv1()
|
||||
} else {
|
||||
|
@ -681,6 +750,15 @@ func (r *importReader) posv1() {
|
|||
}
|
||||
}
|
||||
|
||||
func (r *importReader) posv2() token.Pos {
|
||||
file := r.uint64()
|
||||
if file == 0 {
|
||||
return token.NoPos
|
||||
}
|
||||
tf := r.p.fileAt(file - 1)
|
||||
return tf.Pos(int(r.uint64()))
|
||||
}
|
||||
|
||||
func (r *importReader) typ() types.Type {
|
||||
return r.p.typAt(r.uint64(), nil)
|
||||
}
|
||||
|
|
|
@ -559,18 +559,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
|
|||
|
||||
named.SetTypeParams(r.typeParamNames())
|
||||
|
||||
rhs := r.typ()
|
||||
pk := r.p
|
||||
pk.laterFor(named, func() {
|
||||
// First be sure that the rhs is initialized, if it needs to be initialized.
|
||||
delete(pk.laterFors, named) // prevent cycles
|
||||
if i, ok := pk.laterFors[rhs]; ok {
|
||||
f := pk.laterFns[i]
|
||||
pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
|
||||
f() // initialize RHS
|
||||
}
|
||||
underlying := rhs.Underlying()
|
||||
|
||||
setUnderlying := func(underlying types.Type) {
|
||||
// If the underlying type is an interface, we need to
|
||||
// duplicate its methods so we can replace the receiver
|
||||
// parameter's type (#49906).
|
||||
|
@ -595,7 +584,31 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
|
|||
}
|
||||
|
||||
named.SetUnderlying(underlying)
|
||||
})
|
||||
}
|
||||
|
||||
// Since go.dev/cl/455279, we can assume rhs.Underlying() will
|
||||
// always be non-nil. However, to temporarily support users of
|
||||
// older snapshot releases, we continue to fallback to the old
|
||||
// behavior for now.
|
||||
//
|
||||
// TODO(mdempsky): Remove fallback code and simplify after
|
||||
// allowing time for snapshot users to upgrade.
|
||||
rhs := r.typ()
|
||||
if underlying := rhs.Underlying(); underlying != nil {
|
||||
setUnderlying(underlying)
|
||||
} else {
|
||||
pk := r.p
|
||||
pk.laterFor(named, func() {
|
||||
// First be sure that the rhs is initialized, if it needs to be initialized.
|
||||
delete(pk.laterFors, named) // prevent cycles
|
||||
if i, ok := pk.laterFors[rhs]; ok {
|
||||
f := pk.laterFns[i]
|
||||
pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
|
||||
f() // initialize RHS
|
||||
}
|
||||
setUnderlying(rhs.Underlying())
|
||||
})
|
||||
}
|
||||
|
||||
for i, n := 0, r.Len(); i < n; i++ {
|
||||
named.AddMethod(r.method())
|
||||
|
|
|
@ -58,22 +58,24 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
|
|||
return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
|
||||
}
|
||||
|
||||
// GoVersionString reports the go version string as shown in `go version` command output.
|
||||
// When `go version` outputs in non-standard form, this returns an empty string.
|
||||
func GoVersionString(ctx context.Context, inv Invocation, r *Runner) (string, error) {
|
||||
// GoVersionOutput returns the complete output of the go version command.
|
||||
func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) {
|
||||
inv.Verb = "version"
|
||||
goVersion, err := r.Run(ctx, inv)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return parseGoVersionOutput(goVersion.Bytes()), nil
|
||||
return goVersion.String(), nil
|
||||
}
|
||||
|
||||
func parseGoVersionOutput(data []byte) string {
|
||||
// ParseGoVersionOutput extracts the Go version string
|
||||
// from the output of the "go version" command.
|
||||
// Given an unrecognized form, it returns an empty string.
|
||||
func ParseGoVersionOutput(data string) string {
|
||||
re := regexp.MustCompile(`^go version (go\S+|devel \S+)`)
|
||||
m := re.FindSubmatch(data)
|
||||
m := re.FindStringSubmatch(data)
|
||||
if len(m) != 2 {
|
||||
return "" // unrecognized version
|
||||
}
|
||||
return string(m[1])
|
||||
return m[1]
|
||||
}
|
||||
|
|
|
@ -373,7 +373,7 @@ func (r *Decoder) Int64() int64 {
|
|||
return r.rawVarint()
|
||||
}
|
||||
|
||||
// Int64 decodes and returns a uint64 value from the element bitstream.
|
||||
// Uint64 decodes and returns a uint64 value from the element bitstream.
|
||||
func (r *Decoder) Uint64() uint64 {
|
||||
r.Sync(SyncUint64)
|
||||
return r.rawUvarint()
|
||||
|
|
|
@ -293,7 +293,7 @@ func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
|
|||
// Int encodes and writes an int value into the element bitstream.
|
||||
func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
|
||||
|
||||
// Len encodes and writes a uint value into the element bitstream.
|
||||
// Uint encodes and writes a uint value into the element bitstream.
|
||||
func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
|
||||
|
||||
// Reloc encodes and writes a relocation for the given (section,
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// package tokeninternal provides access to some internal features of the token
|
||||
// package.
|
||||
package tokeninternal
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// GetLines returns the table of line-start offsets from a token.File.
|
||||
func GetLines(file *token.File) []int {
|
||||
// token.File has a Lines method on Go 1.21 and later.
|
||||
if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
|
||||
return file.Lines()
|
||||
}
|
||||
|
||||
// This declaration must match that of token.File.
|
||||
// This creates a risk of dependency skew.
|
||||
// For now we check that the size of the two
|
||||
// declarations is the same, on the (fragile) assumption
|
||||
// that future changes would add fields.
|
||||
type tokenFile119 struct {
|
||||
_ string
|
||||
_ int
|
||||
_ int
|
||||
mu sync.Mutex // we're not complete monsters
|
||||
lines []int
|
||||
_ []struct{}
|
||||
}
|
||||
type tokenFile118 struct {
|
||||
_ *token.FileSet // deleted in go1.19
|
||||
tokenFile119
|
||||
}
|
||||
|
||||
type uP = unsafe.Pointer
|
||||
switch unsafe.Sizeof(*file) {
|
||||
case unsafe.Sizeof(tokenFile118{}):
|
||||
var ptr *tokenFile118
|
||||
*(*uP)(uP(&ptr)) = uP(file)
|
||||
ptr.mu.Lock()
|
||||
defer ptr.mu.Unlock()
|
||||
return ptr.lines
|
||||
|
||||
case unsafe.Sizeof(tokenFile119{}):
|
||||
var ptr *tokenFile119
|
||||
*(*uP)(uP(&ptr)) = uP(file)
|
||||
ptr.mu.Lock()
|
||||
defer ptr.mu.Unlock()
|
||||
return ptr.lines
|
||||
|
||||
default:
|
||||
panic("unexpected token.File size")
|
||||
}
|
||||
}
|
|
@ -279,6 +279,14 @@ type PickResult struct {
|
|||
// type, Done may not be called. May be nil if the balancer does not wish
|
||||
// to be notified when the RPC completes.
|
||||
Done func(DoneInfo)
|
||||
|
||||
// Metadata provides a way for LB policies to inject arbitrary per-call
|
||||
// metadata. Any metadata returned here will be merged with existing
|
||||
// metadata added by the client application.
|
||||
//
|
||||
// LB policies with child policies are responsible for propagating metadata
|
||||
// injected by their children to the ClientConn, as part of Pick().
|
||||
Metatada metadata.MD
|
||||
}
|
||||
|
||||
// TransientFailureError returns e. It exists for backward compatibility and
|
||||
|
|
|
@ -18,14 +18,13 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.14.0
|
||||
// source: grpc/binlog/v1/binarylog.proto
|
||||
|
||||
package grpc_binarylog_v1
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||||
|
@ -41,10 +40,6 @@ const (
|
|||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
// Enumerates the type of event
|
||||
// Note the terminology is different from the RPC semantics
|
||||
// definition, but the same meaning is expressed here.
|
||||
|
|
|
@ -256,7 +256,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts)
|
||||
cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -788,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() {
|
|||
func (ac *addrConn) connect() error {
|
||||
ac.mu.Lock()
|
||||
if ac.state == connectivity.Shutdown {
|
||||
if logger.V(2) {
|
||||
logger.Infof("connect called on shutdown addrConn; ignoring.")
|
||||
}
|
||||
ac.mu.Unlock()
|
||||
return errConnClosing
|
||||
}
|
||||
if ac.state != connectivity.Idle {
|
||||
if logger.V(2) {
|
||||
logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state)
|
||||
}
|
||||
ac.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
@ -928,7 +934,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
|
|||
return cc.sc.healthCheckConfig
|
||||
}
|
||||
|
||||
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
|
||||
return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
|
||||
Ctx: ctx,
|
||||
FullMethodName: method,
|
||||
|
@ -1231,9 +1237,11 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
addr.ServerName = ac.cc.getServerName(addr)
|
||||
hctx, hcancel := context.WithCancel(ac.ctx)
|
||||
|
||||
onClose := grpcsync.OnceFunc(func() {
|
||||
onClose := func(r transport.GoAwayReason) {
|
||||
ac.mu.Lock()
|
||||
defer ac.mu.Unlock()
|
||||
// adjust params based on GoAwayReason
|
||||
ac.adjustParams(r)
|
||||
if ac.state == connectivity.Shutdown {
|
||||
// Already shut down. tearDown() already cleared the transport and
|
||||
// canceled hctx via ac.ctx, and we expected this connection to be
|
||||
|
@ -1254,20 +1262,17 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
|||
// Always go idle and wait for the LB policy to initiate a new
|
||||
// connection attempt.
|
||||
ac.updateConnectivityState(connectivity.Idle, nil)
|
||||
})
|
||||
onGoAway := func(r transport.GoAwayReason) {
|
||||
ac.mu.Lock()
|
||||
ac.adjustParams(r)
|
||||
ac.mu.Unlock()
|
||||
onClose()
|
||||
}
|
||||
|
||||
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
|
||||
defer cancel()
|
||||
copts.ChannelzParentID = ac.channelzID
|
||||
|
||||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose)
|
||||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose)
|
||||
if err != nil {
|
||||
if logger.V(2) {
|
||||
logger.Infof("Creating new client transport to %q: %v", addr, err)
|
||||
}
|
||||
// newTr is either nil, or closed.
|
||||
hcancel()
|
||||
channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
|
||||
|
@ -1371,7 +1376,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
|
|||
if status.Code(err) == codes.Unimplemented {
|
||||
channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
|
||||
} else {
|
||||
channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err)
|
||||
channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -1582,30 +1587,17 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
|
|||
}
|
||||
|
||||
// parseTarget uses RFC 3986 semantics to parse the given target into a
|
||||
// resolver.Target struct containing scheme, authority and endpoint. Query
|
||||
// resolver.Target struct containing scheme, authority and url. Query
|
||||
// params are stripped from the endpoint.
|
||||
func parseTarget(target string) (resolver.Target, error) {
|
||||
u, err := url.Parse(target)
|
||||
if err != nil {
|
||||
return resolver.Target{}, err
|
||||
}
|
||||
// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
|
||||
// value returned from url.Parse() contains a leading "/". Although this is
|
||||
// in accordance with RFC 3986, we do not want to break existing resolver
|
||||
// implementations which expect the endpoint without the leading "/". So, we
|
||||
// end up stripping the leading "/" here. But this will result in an
|
||||
// incorrect parsing for something like "unix:///path/to/socket". Since we
|
||||
// own the "unix" resolver, we can workaround in the unix resolver by using
|
||||
// the `URL` field instead of the `Endpoint` field.
|
||||
endpoint := u.Path
|
||||
if endpoint == "" {
|
||||
endpoint = u.Opaque
|
||||
}
|
||||
endpoint = strings.TrimPrefix(endpoint, "/")
|
||||
|
||||
return resolver.Target{
|
||||
Scheme: u.Scheme,
|
||||
Authority: u.Host,
|
||||
Endpoint: endpoint,
|
||||
URL: *u,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -23,9 +23,9 @@ import (
|
|||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
credinternal "google.golang.org/grpc/internal/credentials"
|
||||
)
|
||||
|
@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor
|
|||
// it will override the virtual host name of authority (e.g. :authority header
|
||||
// field) in requests.
|
||||
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
b, err := os.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ func init() {
|
|||
extraDialOptions = nil
|
||||
}
|
||||
internal.WithBinaryLogger = withBinaryLogger
|
||||
internal.JoinDialOptions = newJoinDialOption
|
||||
}
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
|
@ -111,13 +112,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
|
|||
}
|
||||
}
|
||||
|
||||
type joinDialOption struct {
|
||||
opts []DialOption
|
||||
}
|
||||
|
||||
func (jdo *joinDialOption) apply(do *dialOptions) {
|
||||
for _, opt := range jdo.opts {
|
||||
opt.apply(do)
|
||||
}
|
||||
}
|
||||
|
||||
func newJoinDialOption(opts ...DialOption) DialOption {
|
||||
return &joinDialOption{opts: opts}
|
||||
}
|
||||
|
||||
// WithWriteBufferSize determines how much data can be batched before doing a
|
||||
// write on the wire. The corresponding memory allocation for this buffer will
|
||||
// be twice the size to keep syscalls low. The default value for this buffer is
|
||||
// 32KB.
|
||||
//
|
||||
// Zero will disable the write buffer such that each write will be on underlying
|
||||
// connection. Note: A Send call may not directly translate to a write.
|
||||
// Zero or negative values will disable the write buffer such that each write
|
||||
// will be on underlying connection. Note: A Send call may not directly
|
||||
// translate to a write.
|
||||
func WithWriteBufferSize(s int) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.WriteBufferSize = s
|
||||
|
@ -127,8 +143,9 @@ func WithWriteBufferSize(s int) DialOption {
|
|||
// WithReadBufferSize lets you set the size of read buffer, this determines how
|
||||
// much data can be read at most for each read syscall.
|
||||
//
|
||||
// The default value for this buffer is 32KB. Zero will disable read buffer for
|
||||
// a connection so data framer can access the underlying conn directly.
|
||||
// The default value for this buffer is 32KB. Zero or negative values will
|
||||
// disable read buffer for a connection so data framer can access the
|
||||
// underlying conn directly.
|
||||
func WithReadBufferSize(s int) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.ReadBufferSize = s
|
||||
|
|
|
@ -75,7 +75,9 @@ var registeredCompressor = make(map[string]Compressor)
|
|||
// registered with the same name, the one registered last will take effect.
|
||||
func RegisterCompressor(c Compressor) {
|
||||
registeredCompressor[c.Name()] = c
|
||||
grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
|
||||
if !grpcutil.IsCompressorNameRegistered(c.Name()) {
|
||||
grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// GetCompressor returns Compressor for the given compressor name.
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config)
|
|||
// newLoggerV2 creates a loggerV2 to be used as default logger.
|
||||
// All logs are written to stderr.
|
||||
func newLoggerV2() LoggerV2 {
|
||||
errorW := ioutil.Discard
|
||||
warningW := ioutil.Discard
|
||||
infoW := ioutil.Discard
|
||||
errorW := io.Discard
|
||||
warningW := io.Discard
|
||||
infoW := io.Discard
|
||||
|
||||
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
|
||||
switch logLevel {
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
@ -79,7 +79,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
|
|||
// Build is an internal only method for building the proto message out of the
|
||||
// input event. It's made public to enable other library to reuse as much logic
|
||||
// in TruncatingMethodLogger as possible.
|
||||
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
||||
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
|
||||
m := c.toProto()
|
||||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||
m.Timestamp = timestamp
|
||||
|
@ -87,11 +87,11 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry {
|
|||
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
|
||||
|
||||
switch pay := m.Payload.(type) {
|
||||
case *pb.GrpcLogEntry_ClientHeader:
|
||||
case *binlogpb.GrpcLogEntry_ClientHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_ServerHeader:
|
||||
case *binlogpb.GrpcLogEntry_ServerHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_Message:
|
||||
case *binlogpb.GrpcLogEntry_Message:
|
||||
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||||
}
|
||||
return m
|
||||
|
@ -102,7 +102,7 @@ func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) {
|
|||
ml.sink.Write(ml.Build(c))
|
||||
}
|
||||
|
||||
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
|
||||
if ml.headerMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated
|
|||
// but not counted towards the size limit.
|
||||
continue
|
||||
}
|
||||
currentEntryLen := uint64(len(entry.Value))
|
||||
currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
|
||||
if currentEntryLen > bytesLimit {
|
||||
break
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated
|
|||
return truncated
|
||||
}
|
||||
|
||||
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
|
||||
if ml.messageMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated
|
|||
|
||||
// LogEntryConfig represents the configuration for binary log entry.
|
||||
type LogEntryConfig interface {
|
||||
toProto() *pb.GrpcLogEntry
|
||||
toProto() *binlogpb.GrpcLogEntry
|
||||
}
|
||||
|
||||
// ClientHeader configs the binary log entry to be a ClientHeader entry.
|
||||
|
@ -159,10 +159,10 @@ type ClientHeader struct {
|
|||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
|
||||
// This function doesn't need to set all the fields (e.g. seq ID). The Log
|
||||
// function will set the fields when necessary.
|
||||
clientHeader := &pb.ClientHeader{
|
||||
clientHeader := &binlogpb.ClientHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
MethodName: c.MethodName,
|
||||
Authority: c.Authority,
|
||||
|
@ -170,16 +170,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
|||
if c.Timeout > 0 {
|
||||
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ClientHeader{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Payload: &binlogpb.GrpcLogEntry_ClientHeader{
|
||||
ClientHeader: clientHeader,
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
|
@ -195,19 +195,19 @@ type ServerHeader struct {
|
|||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &pb.ServerHeader{
|
||||
func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Payload: &binlogpb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &binlogpb.ServerHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
|
@ -223,7 +223,7 @@ type ClientMessage struct {
|
|||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
|
@ -238,19 +238,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
|||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Payload: &binlogpb.GrpcLogEntry_Message{
|
||||
Message: &binlogpb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ type ServerMessage struct {
|
|||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
|
@ -278,19 +278,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
|||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Payload: &binlogpb.GrpcLogEntry_Message{
|
||||
Message: &binlogpb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
@ -300,15 +300,15 @@ type ClientHalfClose struct {
|
|||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
Payload: nil, // No payload here.
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
@ -324,7 +324,7 @@ type ServerTrailer struct {
|
|||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
|
||||
st, ok := status.FromError(c.Err)
|
||||
if !ok {
|
||||
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
|
||||
|
@ -340,10 +340,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
|||
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
}
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Payload: &pb.GrpcLogEntry_Trailer{
|
||||
Trailer: &pb.Trailer{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Payload: &binlogpb.GrpcLogEntry_Trailer{
|
||||
Trailer: &binlogpb.Trailer{
|
||||
Metadata: mdToMetadataProto(c.Trailer),
|
||||
StatusCode: uint32(st.Code()),
|
||||
StatusMessage: st.Message(),
|
||||
|
@ -352,9 +352,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
|||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
|
@ -367,15 +367,15 @@ type Cancel struct {
|
|||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *Cancel) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
Payload: nil,
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
@ -392,15 +392,15 @@ func metadataKeyOmit(key string) bool {
|
|||
return strings.HasPrefix(key, "grpc-")
|
||||
}
|
||||
|
||||
func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||||
ret := &pb.Metadata{}
|
||||
func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
|
||||
ret := &binlogpb.Metadata{}
|
||||
for k, vv := range md {
|
||||
if metadataKeyOmit(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
ret.Entry = append(ret.Entry,
|
||||
&pb.MetadataEntry{
|
||||
&binlogpb.MetadataEntry{
|
||||
Key: k,
|
||||
Value: []byte(v),
|
||||
},
|
||||
|
@ -410,26 +410,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
|||
return ret
|
||||
}
|
||||
|
||||
func addrToProto(addr net.Addr) *pb.Address {
|
||||
ret := &pb.Address{}
|
||||
func addrToProto(addr net.Addr) *binlogpb.Address {
|
||||
ret := &binlogpb.Address{}
|
||||
switch a := addr.(type) {
|
||||
case *net.TCPAddr:
|
||||
if a.IP.To4() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV4
|
||||
ret.Type = binlogpb.Address_TYPE_IPV4
|
||||
} else if a.IP.To16() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV6
|
||||
ret.Type = binlogpb.Address_TYPE_IPV6
|
||||
} else {
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||||
// Do not set address and port fields.
|
||||
break
|
||||
}
|
||||
ret.Address = a.IP.String()
|
||||
ret.IpPort = uint32(a.Port)
|
||||
case *net.UnixAddr:
|
||||
ret.Type = pb.Address_TYPE_UNIX
|
||||
ret.Type = binlogpb.Address_TYPE_UNIX
|
||||
ret.Address = a.String()
|
||||
default:
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -42,15 +42,15 @@ type Sink interface {
|
|||
// Write will be called to write the log entry into the sink.
|
||||
//
|
||||
// It should be thread-safe so it can be called in parallel.
|
||||
Write(*pb.GrpcLogEntry) error
|
||||
Write(*binlogpb.GrpcLogEntry) error
|
||||
// Close will be called when the Sink is replaced by a new Sink.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type noopSink struct{}
|
||||
|
||||
func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
|
||||
func (ns *noopSink) Close() error { return nil }
|
||||
func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
|
||||
func (ns *noopSink) Close() error { return nil }
|
||||
|
||||
// newWriterSink creates a binary log sink with the given writer.
|
||||
//
|
||||
|
@ -66,7 +66,7 @@ type writerSink struct {
|
|||
out io.Writer
|
||||
}
|
||||
|
||||
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
||||
func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||||
b, err := proto.Marshal(e)
|
||||
if err != nil {
|
||||
grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
|
||||
|
@ -96,7 +96,7 @@ type bufferedSink struct {
|
|||
done chan struct{}
|
||||
}
|
||||
|
||||
func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
|
||||
func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if !fs.flusherStarted {
|
||||
|
|
|
@ -21,19 +21,42 @@ package envconfig
|
|||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
prefix = "GRPC_GO_"
|
||||
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
|
||||
advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS"
|
||||
)
|
||||
|
||||
var (
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
|
||||
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
|
||||
// AdvertiseCompressors is set if registered compressor should be advertised
|
||||
// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
|
||||
AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false")
|
||||
AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
|
||||
// RingHashCap indicates the maximum ring size which defaults to 4096
|
||||
// entries but may be overridden by setting the environment variable
|
||||
// "GRPC_RING_HASH_CAP". This does not override the default bounds
|
||||
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
||||
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
||||
)
|
||||
|
||||
func boolFromEnv(envVar string, def bool) bool {
|
||||
if def {
|
||||
// The default is true; return true unless the variable is "false".
|
||||
return !strings.EqualFold(os.Getenv(envVar), "false")
|
||||
}
|
||||
// The default is false; return false unless the variable is "true".
|
||||
return strings.EqualFold(os.Getenv(envVar), "true")
|
||||
}
|
||||
|
||||
func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
|
||||
v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
if v < min {
|
||||
return min
|
||||
}
|
||||
if v > max {
|
||||
return max
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ package envconfig
|
|||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -36,16 +35,6 @@ const (
|
|||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
|
||||
|
||||
ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
|
||||
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
|
||||
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
||||
outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION"
|
||||
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
|
||||
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
|
||||
|
||||
c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -64,38 +53,40 @@ var (
|
|||
// XDSRingHash indicates whether ring hash support is enabled, which can be
|
||||
// disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
||||
XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
|
||||
XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
|
||||
// XDSClientSideSecurity is used to control processing of security
|
||||
// configuration on the client-side.
|
||||
//
|
||||
// Note that there is no env var protection for the server-side because we
|
||||
// have a brand new API on the server-side and users explicitly need to use
|
||||
// the new API to get security integration on the server.
|
||||
XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
|
||||
XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
|
||||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster
|
||||
// and DNS cluster is enabled, which can be enabled by setting the
|
||||
// environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
|
||||
// "true".
|
||||
XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false")
|
||||
XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
|
||||
|
||||
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||||
// which can be disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
||||
XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
|
||||
XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
|
||||
// XDSOutlierDetection indicates whether outlier detection support is
|
||||
// enabled, which can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
|
||||
XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false")
|
||||
// XDSFederation indicates whether federation support is enabled.
|
||||
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
|
||||
XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
|
||||
// XDSFederation indicates whether federation support is enabled, which can
|
||||
// be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
|
||||
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false)
|
||||
|
||||
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||||
// support for the RLS CLuster Specifier is enabled, which can be enabled by
|
||||
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||||
// "true".
|
||||
XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
|
||||
XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false)
|
||||
|
||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
|
||||
)
|
||||
|
|
|
@ -77,6 +77,9 @@ var (
|
|||
// ClearGlobalDialOptions clears the array of extra DialOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
ClearGlobalDialOptions func()
|
||||
// JoinDialOptions combines the dial options passed as arguments into a
|
||||
// single dial option.
|
||||
JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
|
||||
// JoinServerOptions combines the server options passed as arguments into a
|
||||
// single server option.
|
||||
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
|
||||
|
|
|
@ -116,7 +116,7 @@ type dnsBuilder struct{}
|
|||
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
host, port, err := parseTarget(target.Endpoint, defaultPort)
|
||||
host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
|||
disableServiceConfig: opts.DisableServiceConfig,
|
||||
}
|
||||
|
||||
if target.Authority == "" {
|
||||
if target.URL.Host == "" {
|
||||
d.resolver = defaultResolver
|
||||
} else {
|
||||
d.resolver, err = customAuthorityResolver(target.Authority)
|
||||
d.resolver, err = customAuthorityResolver(target.URL.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -20,13 +20,20 @@
|
|||
// name without scheme back to gRPC as resolved address.
|
||||
package passthrough
|
||||
|
||||
import "google.golang.org/grpc/resolver"
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const scheme = "passthrough"
|
||||
|
||||
type passthroughBuilder struct{}
|
||||
|
||||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
if target.Endpoint() == "" && opts.Dialer == nil {
|
||||
return nil, errors.New("passthrough: received empty target in Build()")
|
||||
}
|
||||
r := &passthroughResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
|
@ -45,7 +52,7 @@ type passthroughResolver struct {
|
|||
}
|
||||
|
||||
func (r *passthroughResolver) start() {
|
||||
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
|
||||
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
|
||||
}
|
||||
|
||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
|
||||
|
|
|
@ -34,8 +34,8 @@ type builder struct {
|
|||
}
|
||||
|
||||
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
if target.Authority != "" {
|
||||
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
|
||||
if target.URL.Host != "" {
|
||||
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
|
||||
}
|
||||
|
||||
// gRPC was parsing the dial target manually before PR #4817, and we
|
||||
|
|
|
@ -191,7 +191,7 @@ type goAway struct {
|
|||
code http2.ErrCode
|
||||
debugData []byte
|
||||
headsUp bool
|
||||
closeConn bool
|
||||
closeConn error // if set, loopyWriter will exit, resulting in conn closure
|
||||
}
|
||||
|
||||
func (*goAway) isTransportResponseFrame() bool { return false }
|
||||
|
@ -209,6 +209,14 @@ type outFlowControlSizeRequest struct {
|
|||
|
||||
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
|
||||
|
||||
// closeConnection is an instruction to tell the loopy writer to flush the
|
||||
// framer and exit, which will cause the transport's connection to be closed
|
||||
// (by the client or server). The transport itself will close after the reader
|
||||
// encounters the EOF caused by the connection closure.
|
||||
type closeConnection struct{}
|
||||
|
||||
func (closeConnection) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type outStreamState int
|
||||
|
||||
const (
|
||||
|
@ -408,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
|
|||
select {
|
||||
case <-c.ch:
|
||||
case <-c.done:
|
||||
return nil, ErrConnClosing
|
||||
return nil, errors.New("transport closed by client")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -519,18 +527,9 @@ const minBatchSize = 1000
|
|||
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
|
||||
// if the batch size is too low to give stream goroutines a chance to fill it up.
|
||||
func (l *loopyWriter) run() (err error) {
|
||||
defer func() {
|
||||
if err == ErrConnClosing {
|
||||
// Don't log ErrConnClosing as error since it happens
|
||||
// 1. When the connection is closed by some other known issue.
|
||||
// 2. User closed the connection.
|
||||
// 3. A graceful close of connection.
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: loopyWriter.run returning. %v", err)
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
// Always flush the writer before exiting in case there are pending frames
|
||||
// to be sent.
|
||||
defer l.framer.writer.Flush()
|
||||
for {
|
||||
it, err := l.cbuf.get(true)
|
||||
if err != nil {
|
||||
|
@ -574,7 +573,6 @@ func (l *loopyWriter) run() (err error) {
|
|||
}
|
||||
l.framer.writer.Flush()
|
||||
break hasdata
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -655,19 +653,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
|||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
str.itl.enqueue(h)
|
||||
return l.originateStream(str)
|
||||
return l.originateStream(str, h)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) originateStream(str *outStream) error {
|
||||
hdr := str.itl.dequeue().(*headerFrame)
|
||||
if err := hdr.initStream(str.id); err != nil {
|
||||
if err == ErrConnClosing {
|
||||
return err
|
||||
}
|
||||
// Other errors(errStreamDrain) need not close transport.
|
||||
func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
|
||||
// l.draining is set when handling GoAway. In which case, we want to avoid
|
||||
// creating new streams.
|
||||
if l.draining {
|
||||
// TODO: provide a better error with the reason we are in draining.
|
||||
hdr.onOrphaned(errStreamDrain)
|
||||
return nil
|
||||
}
|
||||
if err := hdr.initStream(str.id); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -763,8 +762,8 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
if l.draining && len(l.estdStreams) == 0 {
|
||||
return errors.New("finished processing active streams while in draining mode")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -799,7 +798,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
|||
if l.side == clientSide {
|
||||
l.draining = true
|
||||
if len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
return errors.New("received GOAWAY with no active streams")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -817,6 +816,13 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) closeConnectionHandler() error {
|
||||
// Exit loopyWriter entirely by returning an error here. This will lead to
|
||||
// the transport closing the connection, and, ultimately, transport
|
||||
// closure.
|
||||
return ErrConnClosing
|
||||
}
|
||||
|
||||
func (l *loopyWriter) handle(i interface{}) error {
|
||||
switch i := i.(type) {
|
||||
case *incomingWindowUpdate:
|
||||
|
@ -845,6 +851,8 @@ func (l *loopyWriter) handle(i interface{}) error {
|
|||
return l.goAwayHandler(i)
|
||||
case *outFlowControlSizeRequest:
|
||||
return l.outFlowControlSizeRequestHandler(i)
|
||||
case closeConnection:
|
||||
return l.closeConnectionHandler()
|
||||
default:
|
||||
return fmt.Errorf("transport: unknown control message type %T", i)
|
||||
}
|
||||
|
|
|
@ -47,3 +47,9 @@ const (
|
|||
defaultClientMaxHeaderListSize = uint32(16 << 20)
|
||||
defaultServerMaxHeaderListSize = uint32(16 << 20)
|
||||
)
|
||||
|
||||
// MaxStreamID is the upper bound for the stream ID before the current
|
||||
// transport gracefully closes and new transport is created for subsequent RPCs.
|
||||
// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
|
||||
// integer. It's exported so that tests can override it.
|
||||
var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
|
||||
|
|
|
@ -46,24 +46,32 @@ import (
|
|||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||
// from inside an http.Handler. It requires that the http Server
|
||||
// supports HTTP/2.
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
|
||||
// inside an http.Handler, or writes an HTTP error to w and returns an error.
|
||||
// It requires that the http Server supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
return nil, errors.New("gRPC requires HTTP/2")
|
||||
msg := "gRPC requires HTTP/2"
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
return nil, errors.New("invalid gRPC request method")
|
||||
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
// TODO: do we assume contentType is lowercase? we did before
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
|
||||
if !validContentType {
|
||||
return nil, errors.New("invalid gRPC request content-type")
|
||||
msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
|
||||
http.Error(w, msg, http.StatusUnsupportedMediaType)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
||||
msg := "gRPC requires a ResponseWriter supporting http.Flusher"
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
st := &serverHandlerTransport{
|
||||
|
@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
|
|||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
to, err := decodeTimeout(v)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
|
||||
msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, status.Error(codes.Internal, msg)
|
||||
}
|
||||
st.timeoutSet = true
|
||||
st.timeout = to
|
||||
|
@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
|
|||
for _, v := range vv {
|
||||
v, err := decodeMetadataHeader(k, v)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
|
||||
msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, status.Error(codes.Internal, msg)
|
||||
}
|
||||
metakv = append(metakv, k, v)
|
||||
}
|
||||
|
@ -141,12 +153,15 @@ type serverHandlerTransport struct {
|
|||
stats []stats.Handler
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close() {
|
||||
ht.closeOnce.Do(ht.closeCloseChanOnce)
|
||||
func (ht *serverHandlerTransport) Close(err error) {
|
||||
ht.closeOnce.Do(func() {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("Closing serverHandlerTransport: %v", err)
|
||||
}
|
||||
close(ht.closedCh)
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
|
||||
|
||||
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
||||
|
||||
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
||||
|
@ -236,7 +251,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
|||
})
|
||||
}
|
||||
}
|
||||
ht.Close()
|
||||
ht.Close(errors.New("finished writing status"))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -346,7 +361,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
|||
case <-ht.req.Context().Done():
|
||||
}
|
||||
cancel()
|
||||
ht.Close()
|
||||
ht.Close(errors.New("request is done processing"))
|
||||
}()
|
||||
|
||||
req := ht.req
|
||||
|
|
|
@ -59,11 +59,15 @@ var clientConnectionCounter uint64
|
|||
|
||||
// http2Client implements the ClientTransport interface with HTTP2.
|
||||
type http2Client struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||||
userAgent string
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||||
userAgent string
|
||||
// address contains the resolver returned address for this transport.
|
||||
// If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
|
||||
// passed to `NewStream`, when determining the :authority header.
|
||||
address resolver.Address
|
||||
md metadata.MD
|
||||
conn net.Conn // underlying communication channel
|
||||
loopy *loopyWriter
|
||||
|
@ -136,8 +140,7 @@ type http2Client struct {
|
|||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
|
||||
onGoAway func(GoAwayReason)
|
||||
onClose func()
|
||||
onClose func(GoAwayReason)
|
||||
|
||||
bufferPool *bufferPool
|
||||
|
||||
|
@ -193,7 +196,7 @@ func isTemporary(err error) bool {
|
|||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||||
// and starts to receive messages on it. Non-nil error returns if construction
|
||||
// fails.
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
|
||||
scheme := "http"
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer func() {
|
||||
|
@ -213,7 +216,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||
if opts.FailOnNonTempDialError {
|
||||
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
|
||||
}
|
||||
return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
|
||||
return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
|
||||
}
|
||||
|
||||
// Any further errors will close the underlying connection
|
||||
|
@ -238,8 +241,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||
go func(conn net.Conn) {
|
||||
defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
|
||||
<-newClientCtx.Done() // Block until connectCtx expires or the defer above executes.
|
||||
if connectCtx.Err() != nil {
|
||||
if err := connectCtx.Err(); err != nil {
|
||||
// connectCtx expired before exiting the function. Hard close the connection.
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("newClientTransport: aborting due to connectCtx: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
}(conn)
|
||||
|
@ -314,6 +320,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||
cancel: cancel,
|
||||
userAgent: opts.UserAgent,
|
||||
registeredCompressors: grpcutil.RegisteredCompressors(),
|
||||
address: addr,
|
||||
conn: conn,
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
localAddr: conn.LocalAddr(),
|
||||
|
@ -335,7 +342,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||
streamQuota: defaultMaxStreamsClient,
|
||||
streamsQuotaAvailable: make(chan struct{}, 1),
|
||||
czData: new(channelzData),
|
||||
onGoAway: onGoAway,
|
||||
keepaliveEnabled: keepaliveEnabled,
|
||||
bufferPool: newBufferPool(),
|
||||
onClose: onClose,
|
||||
|
@ -440,10 +446,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
|||
go func() {
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
err := t.loopy.run()
|
||||
if err != nil {
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
}
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
|
||||
}
|
||||
// Do not close the transport. Let reader goroutine handle it since
|
||||
// there might be data in the buffers.
|
||||
|
@ -702,6 +706,18 @@ func (e NewStreamError) Error() string {
|
|||
// streams. All non-nil errors returned will be *NewStreamError.
|
||||
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
|
||||
ctx = peer.NewContext(ctx, t.getPeer())
|
||||
|
||||
// ServerName field of the resolver returned address takes precedence over
|
||||
// Host field of CallHdr to determine the :authority header. This is because,
|
||||
// the ServerName field takes precedence for server authentication during
|
||||
// TLS handshake, and the :authority header should match the value used
|
||||
// for server authentication.
|
||||
if t.address.ServerName != "" {
|
||||
newCallHdr := *callHdr
|
||||
newCallHdr.Host = t.address.ServerName
|
||||
callHdr = &newCallHdr
|
||||
}
|
||||
|
||||
headerFields, err := t.createHeaderFields(ctx, callHdr)
|
||||
if err != nil {
|
||||
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
|
||||
|
@ -726,15 +742,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|||
endStream: false,
|
||||
initStream: func(id uint32) error {
|
||||
t.mu.Lock()
|
||||
if state := t.state; state != reachable {
|
||||
// TODO: handle transport closure in loopy instead and remove this
|
||||
// initStream is never called when transport is draining.
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
// Do a quick cleanup.
|
||||
err := error(errStreamDrain)
|
||||
if state == closing {
|
||||
err = ErrConnClosing
|
||||
}
|
||||
cleanup(err)
|
||||
return err
|
||||
cleanup(ErrConnClosing)
|
||||
return ErrConnClosing
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||||
|
@ -752,6 +765,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|||
}
|
||||
firstTry := true
|
||||
var ch chan struct{}
|
||||
transportDrainRequired := false
|
||||
checkForStreamQuota := func(it interface{}) bool {
|
||||
if t.streamQuota <= 0 { // Can go negative if server decreases it.
|
||||
if firstTry {
|
||||
|
@ -767,6 +781,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|||
h := it.(*headerFrame)
|
||||
h.streamID = t.nextID
|
||||
t.nextID += 2
|
||||
|
||||
// Drain client transport if nextID > MaxStreamID which signals gRPC that
|
||||
// the connection is closed and a new one must be created for subsequent RPCs.
|
||||
transportDrainRequired = t.nextID > MaxStreamID
|
||||
|
||||
s.id = h.streamID
|
||||
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
|
||||
t.mu.Lock()
|
||||
|
@ -846,6 +865,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
|
|||
sh.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
}
|
||||
if transportDrainRequired {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: t.nextID > MaxStreamID. Draining")
|
||||
}
|
||||
t.GracefulClose()
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
@ -934,9 +959,14 @@ func (t *http2Client) Close(err error) {
|
|||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing: %v", err)
|
||||
}
|
||||
// Call t.onClose ASAP to prevent the client from attempting to create new
|
||||
// streams.
|
||||
t.onClose()
|
||||
if t.state != draining {
|
||||
t.onClose(GoAwayInvalid)
|
||||
}
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
|
@ -986,11 +1016,15 @@ func (t *http2Client) GracefulClose() {
|
|||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: GracefulClose called")
|
||||
}
|
||||
t.onClose(GoAwayInvalid)
|
||||
t.state = draining
|
||||
active := len(t.activeStreams)
|
||||
t.mu.Unlock()
|
||||
if active == 0 {
|
||||
t.Close(ErrConnClosing)
|
||||
t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
|
||||
return
|
||||
}
|
||||
t.controlBuf.put(&incomingGoAway{})
|
||||
|
@ -1148,7 +1182,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
|||
statusCode, ok := http2ErrConvTab[f.ErrCode]
|
||||
if !ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
||||
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode)
|
||||
}
|
||||
statusCode = codes.Unknown
|
||||
}
|
||||
|
@ -1266,8 +1300,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
|||
// Notify the clientconn about the GOAWAY before we set the state to
|
||||
// draining, to allow the client to stop attempting to create streams
|
||||
// before disallowing new streams on this connection.
|
||||
t.onGoAway(t.goAwayReason)
|
||||
t.state = draining
|
||||
if t.state != draining {
|
||||
t.onClose(t.goAwayReason)
|
||||
t.state = draining
|
||||
}
|
||||
}
|
||||
// All streams with IDs greater than the GoAwayId
|
||||
// and smaller than the previous GoAway ID should be killed.
|
||||
|
@ -1756,3 +1792,9 @@ func (t *http2Client) getOutFlowWindow() int64 {
|
|||
return -2
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Client) stateForTesting() transportState {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
return t.state
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package transport
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
@ -41,6 +42,7 @@ import (
|
|||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
|
@ -101,13 +103,13 @@ type http2Server struct {
|
|||
|
||||
mu sync.Mutex // guard the following
|
||||
|
||||
// drainChan is initialized when Drain() is called the first time.
|
||||
// After which the server writes out the first GoAway(with ID 2^31-1) frame.
|
||||
// Then an independent goroutine will be launched to later send the second GoAway.
|
||||
// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
|
||||
// Thus call to Drain() will be a no-op if drainChan is already initialized since draining is
|
||||
// already underway.
|
||||
drainChan chan struct{}
|
||||
// drainEvent is initialized when Drain() is called the first time. After
|
||||
// which the server writes out the first GoAway(with ID 2^31-1) frame. Then
|
||||
// an independent goroutine will be launched to later send the second
|
||||
// GoAway. During this time we don't want to write another first GoAway(with
|
||||
// ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
|
||||
// already initialized since draining is already underway.
|
||||
drainEvent *grpcsync.Event
|
||||
state transportState
|
||||
activeStreams map[uint32]*Stream
|
||||
// idle is the time instant when the connection went idle.
|
||||
|
@ -293,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
t.Close()
|
||||
t.Close(err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -331,10 +333,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
go func() {
|
||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
|
||||
if err := t.loopy.run(); err != nil {
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
}
|
||||
err := t.loopy.run()
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err)
|
||||
}
|
||||
t.conn.Close()
|
||||
t.controlBuf.finish()
|
||||
|
@ -344,8 +345,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
|||
return t, nil
|
||||
}
|
||||
|
||||
// operateHeader takes action on the decoded headers.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
|
||||
// operateHeaders takes action on the decoded headers. Returns an error if fatal
|
||||
// error encountered and transport needs to close, otherwise returns nil.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error {
|
||||
// Acquire max stream ID lock for entire duration
|
||||
t.maxStreamMu.Lock()
|
||||
defer t.maxStreamMu.Unlock()
|
||||
|
@ -361,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
rstCode: http2.ErrCodeFrameSize,
|
||||
onWrite: func() {},
|
||||
})
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||
// illegal gRPC stream id.
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||||
}
|
||||
return true
|
||||
return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
|
||||
}
|
||||
t.maxStreamID = streamID
|
||||
|
||||
|
@ -381,13 +380,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
}
|
||||
var (
|
||||
// If a gRPC Response-Headers has already been received, then it means
|
||||
// that the peer is speaking gRPC and we are in gRPC mode.
|
||||
isGRPC = false
|
||||
mdata = make(map[string][]string)
|
||||
httpMethod string
|
||||
// headerError is set if an error is encountered while parsing the headers
|
||||
headerError bool
|
||||
// if false, content-type was missing or invalid
|
||||
isGRPC = false
|
||||
contentType = ""
|
||||
mdata = make(map[string][]string)
|
||||
httpMethod string
|
||||
// these are set if an error is encountered while parsing the headers
|
||||
protocolError bool
|
||||
headerError *status.Status
|
||||
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
|
@ -398,6 +398,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
case "content-type":
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
|
||||
if !validContentType {
|
||||
contentType = hf.Value
|
||||
break
|
||||
}
|
||||
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
||||
|
@ -413,7 +414,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
timeoutSet = true
|
||||
var err error
|
||||
if timeout, err = decodeTimeout(hf.Value); err != nil {
|
||||
headerError = true
|
||||
headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
|
||||
}
|
||||
// "Transports must consider requests containing the Connection header
|
||||
// as malformed." - A41
|
||||
|
@ -421,14 +422,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec")
|
||||
}
|
||||
headerError = true
|
||||
protocolError = true
|
||||
default:
|
||||
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
|
||||
break
|
||||
}
|
||||
v, err := decodeMetadataHeader(hf.Name, hf.Value)
|
||||
if err != nil {
|
||||
headerError = true
|
||||
headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
|
||||
logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
||||
break
|
||||
}
|
||||
|
@ -447,23 +448,43 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
logger.Errorf("transport: %v", errMsg)
|
||||
}
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: 400,
|
||||
httpStatus: http.StatusBadRequest,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: status.New(codes.Internal, errMsg),
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
if !isGRPC || headerError {
|
||||
if protocolError {
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: streamID,
|
||||
rst: true,
|
||||
rstCode: http2.ErrCodeProtocol,
|
||||
onWrite: func() {},
|
||||
})
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if !isGRPC {
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: http.StatusUnsupportedMediaType,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
if headerError != nil {
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: http.StatusBadRequest,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: headerError,
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// "If :authority is missing, Host must be renamed to :authority." - A41
|
||||
|
@ -503,7 +524,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
if t.state != reachable {
|
||||
t.mu.Unlock()
|
||||
s.cancel()
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||||
t.mu.Unlock()
|
||||
|
@ -514,7 +535,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
onWrite: func() {},
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if httpMethod != http.MethodPost {
|
||||
t.mu.Unlock()
|
||||
|
@ -530,7 +551,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if t.inTapHandle != nil {
|
||||
var err error
|
||||
|
@ -550,7 +571,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
status: stat,
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
t.activeStreams[streamID] = s
|
||||
|
@ -597,7 +618,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
|||
wq: s.wq,
|
||||
})
|
||||
handle(s)
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleStreams receives incoming streams using the given handler. This is
|
||||
|
@ -630,19 +651,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
|||
continue
|
||||
}
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
t.Close()
|
||||
t.Close(err)
|
||||
return
|
||||
}
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||||
}
|
||||
t.Close()
|
||||
t.Close(err)
|
||||
return
|
||||
}
|
||||
switch frame := frame.(type) {
|
||||
case *http2.MetaHeadersFrame:
|
||||
if t.operateHeaders(frame, handle, traceCtx) {
|
||||
t.Close()
|
||||
if err := t.operateHeaders(frame, handle, traceCtx); err != nil {
|
||||
t.Close(err)
|
||||
break
|
||||
}
|
||||
case *http2.DataFrame:
|
||||
|
@ -843,8 +861,8 @@ const (
|
|||
|
||||
func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||||
if f.IsAck() {
|
||||
if f.Data == goAwayPing.data && t.drainChan != nil {
|
||||
close(t.drainChan)
|
||||
if f.Data == goAwayPing.data && t.drainEvent != nil {
|
||||
t.drainEvent.Fire()
|
||||
return
|
||||
}
|
||||
// Maybe it's a BDP ping.
|
||||
|
@ -886,10 +904,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
|
|||
|
||||
if t.pingStrikes > maxPingStrikes {
|
||||
// Send goaway and close the connection.
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: Got too many pings from the client, closing the connection.")
|
||||
}
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1153,7 +1168,7 @@ func (t *http2Server) keepalive() {
|
|||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing server transport due to maximum connection age.")
|
||||
}
|
||||
t.Close()
|
||||
t.controlBuf.put(closeConnection{})
|
||||
case <-t.done:
|
||||
}
|
||||
return
|
||||
|
@ -1169,10 +1184,7 @@ func (t *http2Server) keepalive() {
|
|||
continue
|
||||
}
|
||||
if outstandingPing && kpTimeoutLeft <= 0 {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing server transport due to idleness.")
|
||||
}
|
||||
t.Close()
|
||||
t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
|
||||
return
|
||||
}
|
||||
if !outstandingPing {
|
||||
|
@ -1199,12 +1211,15 @@ func (t *http2Server) keepalive() {
|
|||
// Close starts shutting down the http2Server transport.
|
||||
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
|
||||
// could cause some resource issue. Revisit this later.
|
||||
func (t *http2Server) Close() {
|
||||
func (t *http2Server) Close(err error) {
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing: %v", err)
|
||||
}
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
|
@ -1295,10 +1310,10 @@ func (t *http2Server) RemoteAddr() net.Addr {
|
|||
func (t *http2Server) Drain() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.drainChan != nil {
|
||||
if t.drainEvent != nil {
|
||||
return
|
||||
}
|
||||
t.drainChan = make(chan struct{})
|
||||
t.drainEvent = grpcsync.NewEvent()
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
|
||||
}
|
||||
|
||||
|
@ -1319,19 +1334,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
|||
// Stop accepting more streams now.
|
||||
t.state = draining
|
||||
sid := t.maxStreamID
|
||||
retErr := g.closeConn
|
||||
if len(t.activeStreams) == 0 {
|
||||
g.closeConn = true
|
||||
retErr = errors.New("second GOAWAY written and no active streams left to process")
|
||||
}
|
||||
t.mu.Unlock()
|
||||
t.maxStreamMu.Unlock()
|
||||
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if g.closeConn {
|
||||
if retErr != nil {
|
||||
// Abruptly close the connection following the GoAway (via
|
||||
// loopywriter). But flush out what's inside the buffer first.
|
||||
t.framer.writer.Flush()
|
||||
return false, fmt.Errorf("transport: Connection closing")
|
||||
return false, retErr
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
@ -1353,7 +1369,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
|||
timer := time.NewTimer(time.Minute)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-t.drainChan:
|
||||
case <-t.drainEvent.Done():
|
||||
case <-timer.C:
|
||||
case <-t.done:
|
||||
return
|
||||
|
|
|
@ -583,8 +583,8 @@ type ConnectOptions struct {
|
|||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose)
|
||||
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
|
@ -701,7 +701,7 @@ type ServerTransport interface {
|
|||
// Close tears down the transport. Once it is called, the transport
|
||||
// should not be accessed any more. All the pending streams and their
|
||||
// handlers will be terminated asynchronously.
|
||||
Close()
|
||||
Close(err error)
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
RemoteAddr() net.Addr
|
||||
|
|
|
@ -58,12 +58,18 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
|
|||
pw.mu.Unlock()
|
||||
}
|
||||
|
||||
func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
|
||||
// doneChannelzWrapper performs the following:
|
||||
// - increments the calls started channelz counter
|
||||
// - wraps the done function in the passed in result to increment the calls
|
||||
// failed or calls succeeded channelz counter before invoking the actual
|
||||
// done function.
|
||||
func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) {
|
||||
acw.mu.Lock()
|
||||
ac := acw.ac
|
||||
acw.mu.Unlock()
|
||||
ac.incrCallsStarted()
|
||||
return func(b balancer.DoneInfo) {
|
||||
done := result.Done
|
||||
result.Done = func(b balancer.DoneInfo) {
|
||||
if b.Err != nil && b.Err != io.EOF {
|
||||
ac.incrCallsFailed()
|
||||
} else {
|
||||
|
@ -82,7 +88,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f
|
|||
// - the current picker returns other errors and failfast is false.
|
||||
// - the subConn returned by the current picker is not READY
|
||||
// When one of these situations happens, pick blocks until the picker gets updated.
|
||||
func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {
|
||||
func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) {
|
||||
var ch chan struct{}
|
||||
|
||||
var lastPickErr error
|
||||
|
@ -90,7 +96,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
|||
pw.mu.Lock()
|
||||
if pw.done {
|
||||
pw.mu.Unlock()
|
||||
return nil, nil, ErrClientConnClosing
|
||||
return nil, balancer.PickResult{}, ErrClientConnClosing
|
||||
}
|
||||
|
||||
if pw.picker == nil {
|
||||
|
@ -111,9 +117,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
|||
}
|
||||
switch ctx.Err() {
|
||||
case context.DeadlineExceeded:
|
||||
return nil, nil, status.Error(codes.DeadlineExceeded, errStr)
|
||||
return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr)
|
||||
case context.Canceled:
|
||||
return nil, nil, status.Error(codes.Canceled, errStr)
|
||||
return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr)
|
||||
}
|
||||
case <-ch:
|
||||
}
|
||||
|
@ -125,7 +131,6 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
|||
pw.mu.Unlock()
|
||||
|
||||
pickResult, err := p.Pick(info)
|
||||
|
||||
if err != nil {
|
||||
if err == balancer.ErrNoSubConnAvailable {
|
||||
continue
|
||||
|
@ -136,7 +141,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
|||
if istatus.IsRestrictedControlPlaneCode(st) {
|
||||
err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
|
||||
}
|
||||
return nil, nil, dropError{error: err}
|
||||
return nil, balancer.PickResult{}, dropError{error: err}
|
||||
}
|
||||
// For all other errors, wait for ready RPCs should block and other
|
||||
// RPCs should fail with unavailable.
|
||||
|
@ -144,7 +149,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
|||
lastPickErr = err
|
||||
continue
|
||||
}
|
||||
return nil, nil, status.Error(codes.Unavailable, err.Error())
|
||||
return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
|
||||
}
|
||||
|
||||
acw, ok := pickResult.SubConn.(*acBalancerWrapper)
|
||||
|
@ -154,9 +159,10 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
|
|||
}
|
||||
if t := acw.getAddrConn().getReadyTransport(); t != nil {
|
||||
if channelz.IsOn() {
|
||||
return t, doneChannelzWrapper(acw, pickResult.Done), nil
|
||||
doneChannelzWrapper(acw, &pickResult)
|
||||
return t, pickResult, nil
|
||||
}
|
||||
return t, pickResult.Done, nil
|
||||
return t, pickResult, nil
|
||||
}
|
||||
if pickResult.Done != nil {
|
||||
// Calling done with nil error, no bytes sent and no bytes received.
|
||||
|
|
|
@ -51,7 +51,7 @@ type pickfirstBalancer struct {
|
|||
|
||||
func (b *pickfirstBalancer) ResolverError(err error) {
|
||||
if logger.V(2) {
|
||||
logger.Infof("pickfirstBalancer: ResolverError called with error %v", err)
|
||||
logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err)
|
||||
}
|
||||
if b.subConn == nil {
|
||||
b.state = connectivity.TransientFailure
|
||||
|
@ -102,8 +102,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
|
|||
b.subConn = subConn
|
||||
b.state = connectivity.Idle
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: connectivity.Idle,
|
||||
Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}},
|
||||
ConnectivityState: connectivity.Connecting,
|
||||
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
|
||||
})
|
||||
b.subConn.Connect()
|
||||
return nil
|
||||
|
|
|
@ -57,7 +57,8 @@ LEGACY_SOURCES=(
|
|||
${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
|
||||
${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
|
||||
profiling/proto/service.proto
|
||||
reflection/grpc_reflection_v1alpha/reflection.proto
|
||||
${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
|
||||
${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
|
||||
)
|
||||
|
||||
# Generates only the new gRPC Service symbols
|
||||
|
@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/
|
|||
# see grpc_testing_not_regenerate/README.md for details.
|
||||
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
|
||||
|
||||
# grpc/testing does not have a go_package option.
|
||||
mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/
|
||||
mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/
|
||||
|
||||
cp -R ${WORKDIR}/out/google.golang.org/grpc/* .
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"context"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
@ -247,9 +248,6 @@ type Target struct {
|
|||
Scheme string
|
||||
// Deprecated: use URL.Host instead.
|
||||
Authority string
|
||||
// Deprecated: use URL.Path or URL.Opaque instead. The latter is set when
|
||||
// the former is empty.
|
||||
Endpoint string
|
||||
// URL contains the parsed dial target with an optional default scheme added
|
||||
// to it if the original dial target contained no scheme or contained an
|
||||
// unregistered scheme. Any query params specified in the original dial
|
||||
|
@ -257,6 +255,24 @@ type Target struct {
|
|||
URL url.URL
|
||||
}
|
||||
|
||||
// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
|
||||
// or `URL.Opaque`. The latter is used when the former is empty.
|
||||
func (t Target) Endpoint() string {
|
||||
endpoint := t.URL.Path
|
||||
if endpoint == "" {
|
||||
endpoint = t.URL.Opaque
|
||||
}
|
||||
// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
|
||||
// value returned from url.Parse() contains a leading "/". Although this is
|
||||
// in accordance with RFC 3986, we do not want to break existing resolver
|
||||
// implementations which expect the endpoint without the leading "/". So, we
|
||||
// end up stripping the leading "/" here. But this will result in an
|
||||
// incorrect parsing for something like "unix:///path/to/socket". Since we
|
||||
// own the "unix" resolver, we can workaround in the unix resolver by using
|
||||
// the `URL` field.
|
||||
return strings.TrimPrefix(endpoint, "/")
|
||||
}
|
||||
|
||||
// Builder creates a resolver that will be used to watch name resolution updates.
|
||||
type Builder interface {
|
||||
// Build creates a new resolver for the given target.
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
|
|||
return &gzipCompressor{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
w, err := gzip.NewWriterLevel(ioutil.Discard, level)
|
||||
w, err := gzip.NewWriterLevel(io.Discard, level)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
|
|||
z.Close()
|
||||
d.pool.Put(z)
|
||||
}()
|
||||
return ioutil.ReadAll(z)
|
||||
return io.ReadAll(z)
|
||||
}
|
||||
|
||||
func (d *gzipDecompressor) Type() string {
|
||||
|
@ -297,7 +296,8 @@ func (o FailFastCallOption) before(c *callInfo) error {
|
|||
func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
|
||||
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
|
||||
// in bytes the client can receive.
|
||||
// in bytes the client can receive. If this is not set, gRPC uses the default
|
||||
// 4MB.
|
||||
func MaxCallRecvMsgSize(bytes int) CallOption {
|
||||
return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
|
||||
}
|
||||
|
@ -320,7 +320,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
|
|||
func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
|
||||
|
||||
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
|
||||
// in bytes the client can send.
|
||||
// in bytes the client can send. If this is not set, gRPC uses the default
|
||||
// `math.MaxInt32`.
|
||||
func MaxCallSendMsgSize(bytes int) CallOption {
|
||||
return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
|
||||
}
|
||||
|
@ -711,7 +712,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
|||
d, size, err = decompress(compressor, d, maxReceiveMessageSize)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
|
||||
}
|
||||
if size > maxReceiveMessageSize {
|
||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||
|
@ -746,7 +747,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize
|
|||
}
|
||||
// Read from LimitReader with limit max+1. So if the underlying
|
||||
// reader is over limit, the result will be bigger than max.
|
||||
d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
|
||||
return d, len(d), err
|
||||
}
|
||||
|
||||
|
@ -759,7 +760,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf
|
|||
return err
|
||||
}
|
||||
if err := c.Unmarshal(d, m); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
|
||||
}
|
||||
if payInfo != nil {
|
||||
payInfo.uncompressedBytes = d
|
||||
|
|
|
@ -233,10 +233,11 @@ func newJoinServerOption(opts ...ServerOption) ServerOption {
|
|||
return &joinServerOption{opts: opts}
|
||||
}
|
||||
|
||||
// WriteBufferSize determines how much data can be batched before doing a write on the wire.
|
||||
// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
|
||||
// The default value for this buffer is 32KB.
|
||||
// Zero will disable the write buffer such that each write will be on underlying connection.
|
||||
// WriteBufferSize determines how much data can be batched before doing a write
|
||||
// on the wire. The corresponding memory allocation for this buffer will be
|
||||
// twice the size to keep syscalls low. The default value for this buffer is
|
||||
// 32KB. Zero or negative values will disable the write buffer such that each
|
||||
// write will be on underlying connection.
|
||||
// Note: A Send call may not directly translate to a write.
|
||||
func WriteBufferSize(s int) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
|
@ -244,11 +245,10 @@ func WriteBufferSize(s int) ServerOption {
|
|||
})
|
||||
}
|
||||
|
||||
// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
|
||||
// for one read syscall.
|
||||
// The default value for this buffer is 32KB.
|
||||
// Zero will disable read buffer for a connection so data framer can access the underlying
|
||||
// conn directly.
|
||||
// ReadBufferSize lets you set the size of read buffer, this determines how much
|
||||
// data can be read at most for one read syscall. The default value for this
|
||||
// buffer is 32KB. Zero or negative values will disable read buffer for a
|
||||
// connection so data framer can access the underlying conn directly.
|
||||
func ReadBufferSize(s int) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.readBufferSize = s
|
||||
|
@ -942,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
|||
}
|
||||
|
||||
func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
defer st.Close()
|
||||
defer st.Close(errors.New("finished serving streams for the server transport"))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
var roundRobinCounter uint32
|
||||
|
@ -1008,7 +1008,8 @@ var _ http.Handler = (*Server)(nil)
|
|||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
// Errors returned from transport.NewServerHandlerTransport have
|
||||
// already been written to w.
|
||||
return
|
||||
}
|
||||
if !s.addConn(listenerAddressForServeHTTP, st) {
|
||||
|
@ -1046,7 +1047,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
|
|||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.conns == nil {
|
||||
st.Close()
|
||||
st.Close(errors.New("Server.addConn called when server has already been stopped"))
|
||||
return false
|
||||
}
|
||||
if s.drain {
|
||||
|
@ -1150,21 +1151,16 @@ func chainUnaryServerInterceptors(s *Server) {
|
|||
|
||||
func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) {
|
||||
// the struct ensures the variables are allocated together, rather than separately, since we
|
||||
// know they should be garbage collected together. This saves 1 allocation and decreases
|
||||
// time/call by about 10% on the microbenchmark.
|
||||
var state struct {
|
||||
i int
|
||||
next UnaryHandler
|
||||
}
|
||||
state.next = func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
if state.i == len(interceptors)-1 {
|
||||
return interceptors[state.i](ctx, req, info, handler)
|
||||
}
|
||||
state.i++
|
||||
return interceptors[state.i-1](ctx, req, info, state.next)
|
||||
}
|
||||
return state.next(ctx, req)
|
||||
return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
|
||||
}
|
||||
}
|
||||
|
||||
func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
|
||||
if curr == len(interceptors)-1 {
|
||||
return finalHandler
|
||||
}
|
||||
return func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1303,7 +1299,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
if err != nil {
|
||||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e)
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -1470,21 +1466,16 @@ func chainStreamServerInterceptors(s *Server) {
|
|||
|
||||
func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
|
||||
return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
|
||||
// the struct ensures the variables are allocated together, rather than separately, since we
|
||||
// know they should be garbage collected together. This saves 1 allocation and decreases
|
||||
// time/call by about 10% on the microbenchmark.
|
||||
var state struct {
|
||||
i int
|
||||
next StreamHandler
|
||||
}
|
||||
state.next = func(srv interface{}, ss ServerStream) error {
|
||||
if state.i == len(interceptors)-1 {
|
||||
return interceptors[state.i](srv, ss, info, handler)
|
||||
}
|
||||
state.i++
|
||||
return interceptors[state.i-1](srv, ss, info, state.next)
|
||||
}
|
||||
return state.next(srv, ss)
|
||||
return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
|
||||
}
|
||||
}
|
||||
|
||||
func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
|
||||
if curr == len(interceptors)-1 {
|
||||
return finalHandler
|
||||
}
|
||||
return func(srv interface{}, stream ServerStream) error {
|
||||
return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1819,7 +1810,7 @@ func (s *Server) Stop() {
|
|||
}
|
||||
for _, cs := range conns {
|
||||
for st := range cs {
|
||||
st.Close()
|
||||
st.Close(errors.New("Server.Stop called"))
|
||||
}
|
||||
}
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
|
|
|
@ -226,7 +226,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
|||
var rsc jsonSC
|
||||
err := json.Unmarshal([]byte(js), &rsc)
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
sc := ServiceConfig{
|
||||
|
@ -254,7 +254,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
|||
}
|
||||
d, err := parseDuration(m.Timeout)
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
|
||||
|
@ -263,7 +263,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
|||
Timeout: d,
|
||||
}
|
||||
if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
|
||||
logger.Warningf("grpc: unmarshaling service config %s: %v", js, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
if m.MaxRequestMessageBytes != nil {
|
||||
|
@ -283,13 +283,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
|||
for i, n := range *m.Name {
|
||||
path, err := n.generatePath()
|
||||
if err != nil {
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
|
||||
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
|
||||
if _, ok := paths[path]; ok {
|
||||
err = errDuplicatedName
|
||||
logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err)
|
||||
logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err)
|
||||
return &serviceconfig.ParseResult{Err: err}
|
||||
}
|
||||
paths[path] = struct{}{}
|
||||
|
|
|
@ -416,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
|
|||
ctx = trace.NewContext(ctx, trInfo.tr)
|
||||
}
|
||||
|
||||
if cs.cc.parsedTarget.Scheme == "xds" {
|
||||
if cs.cc.parsedTarget.URL.Scheme == "xds" {
|
||||
// Add extra metadata (metadata that will be added by transport) to context
|
||||
// so the balancer can see them.
|
||||
ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
|
||||
|
@ -438,7 +438,7 @@ func (a *csAttempt) getTransport() error {
|
|||
cs := a.cs
|
||||
|
||||
var err error
|
||||
a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||||
a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
|
||||
if err != nil {
|
||||
if de, ok := err.(dropError); ok {
|
||||
err = de.error
|
||||
|
@ -455,6 +455,25 @@ func (a *csAttempt) getTransport() error {
|
|||
func (a *csAttempt) newStream() error {
|
||||
cs := a.cs
|
||||
cs.callHdr.PreviousAttempts = cs.numRetries
|
||||
|
||||
// Merge metadata stored in PickResult, if any, with existing call metadata.
|
||||
// It is safe to overwrite the csAttempt's context here, since all state
|
||||
// maintained in it are local to the attempt. When the attempt has to be
|
||||
// retried, a new instance of csAttempt will be created.
|
||||
if a.pickResult.Metatada != nil {
|
||||
// We currently do not have a function it the metadata package which
|
||||
// merges given metadata with existing metadata in a context. Existing
|
||||
// function `AppendToOutgoingContext()` takes a variadic argument of key
|
||||
// value pairs.
|
||||
//
|
||||
// TODO: Make it possible to retrieve key value pairs from metadata.MD
|
||||
// in a form passable to AppendToOutgoingContext(), or create a version
|
||||
// of AppendToOutgoingContext() that accepts a metadata.MD.
|
||||
md, _ := metadata.FromOutgoingContext(a.ctx)
|
||||
md = metadata.Join(md, a.pickResult.Metatada)
|
||||
a.ctx = metadata.NewOutgoingContext(a.ctx, md)
|
||||
}
|
||||
|
||||
s, err := a.t.NewStream(a.ctx, cs.callHdr)
|
||||
if err != nil {
|
||||
nse, ok := err.(*transport.NewStreamError)
|
||||
|
@ -529,12 +548,12 @@ type clientStream struct {
|
|||
// csAttempt implements a single transport stream attempt within a
|
||||
// clientStream.
|
||||
type csAttempt struct {
|
||||
ctx context.Context
|
||||
cs *clientStream
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
done func(balancer.DoneInfo)
|
||||
ctx context.Context
|
||||
cs *clientStream
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
pickResult balancer.PickResult
|
||||
|
||||
finished bool
|
||||
dc Decompressor
|
||||
|
@ -1103,12 +1122,12 @@ func (a *csAttempt) finish(err error) {
|
|||
tr = a.s.Trailer()
|
||||
}
|
||||
|
||||
if a.done != nil {
|
||||
if a.pickResult.Done != nil {
|
||||
br := false
|
||||
if a.s != nil {
|
||||
br = a.s.BytesReceived()
|
||||
}
|
||||
a.done(balancer.DoneInfo{
|
||||
a.pickResult.Done(balancer.DoneInfo{
|
||||
Err: err,
|
||||
Trailer: tr,
|
||||
BytesSent: a.s != nil,
|
||||
|
@ -1464,6 +1483,9 @@ type ServerStream interface {
|
|||
// It is safe to have a goroutine calling SendMsg and another goroutine
|
||||
// calling RecvMsg on the same stream at the same time, but it is not safe
|
||||
// to call SendMsg on the same stream in different goroutines.
|
||||
//
|
||||
// It is not safe to modify the message after calling SendMsg. Tracing
|
||||
// libraries and stats handlers may use the message lazily.
|
||||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message into m or the stream is
|
||||
// done. It returns io.EOF when the client has performed a CloseSend. On
|
||||
|
|
|
@ -19,4 +19,4 @@
|
|||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.51.0"
|
||||
const Version = "1.53.0"
|
||||
|
|
|
@ -66,6 +66,17 @@ elif [[ "$#" -ne 0 ]]; then
|
|||
die "Unknown argument(s): $*"
|
||||
fi
|
||||
|
||||
# - Check that generated proto files are up to date.
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
PATH="/home/travis/bin:${PATH}" make proto && \
|
||||
git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
fi
|
||||
|
||||
if [[ -n "${VET_ONLY_PROTO}" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# - Ensure all source files contain a copyright message.
|
||||
# (Done in two parts because Darwin "git grep" has broken support for compound
|
||||
# exclusion matches.)
|
||||
|
@ -93,13 +104,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.
|
|||
|
||||
misspell -error .
|
||||
|
||||
# - Check that generated proto files are up to date.
|
||||
if [[ -z "${VET_SKIP_PROTO}" ]]; then
|
||||
PATH="/home/travis/bin:${PATH}" make proto && \
|
||||
git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
fi
|
||||
|
||||
# - gofmt, goimports, golint (with exceptions for generated code), go vet,
|
||||
# go mod tidy.
|
||||
# Perform these checks on each module inside gRPC.
|
||||
|
@ -111,7 +115,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do
|
|||
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
|
||||
golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
|
||||
|
||||
go mod tidy
|
||||
go mod tidy -compat=1.17
|
||||
git status --porcelain 2>&1 | fail_on_output || \
|
||||
(git status; git --no-pager diff; exit 1)
|
||||
popd
|
||||
|
@ -121,8 +125,9 @@ done
|
|||
#
|
||||
# TODO(dfawley): don't use deprecated functions in examples or first-party
|
||||
# plugins.
|
||||
# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs.
|
||||
SC_OUT="$(mktemp)"
|
||||
staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true
|
||||
staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true
|
||||
# Error if anything other than deprecation warnings are printed.
|
||||
not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
|
||||
# Only ignore the following deprecated types/fields/functions.
|
||||
|
|
|
@ -1,168 +0,0 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/empty.proto
|
||||
|
||||
package emptypb
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
type Empty struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *Empty) Reset() {
|
||||
*x = Empty{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_empty_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Empty) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Empty) ProtoMessage() {}
|
||||
|
||||
func (x *Empty) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_empty_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
|
||||
func (*Empty) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
var File_google_protobuf_empty_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_protobuf_empty_proto_rawDesc = []byte{
|
||||
0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07,
|
||||
0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a,
|
||||
0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b,
|
||||
0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2,
|
||||
0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
|
||||
0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_protobuf_empty_proto_rawDescOnce sync.Once
|
||||
file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_protobuf_empty_proto_rawDescGZIP() []byte {
|
||||
file_google_protobuf_empty_proto_rawDescOnce.Do(func() {
|
||||
file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData)
|
||||
})
|
||||
return file_google_protobuf_empty_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_protobuf_empty_proto_goTypes = []interface{}{
|
||||
(*Empty)(nil), // 0: google.protobuf.Empty
|
||||
}
|
||||
var file_google_protobuf_empty_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_protobuf_empty_proto_init() }
|
||||
func file_google_protobuf_empty_proto_init() {
|
||||
if File_google_protobuf_empty_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Empty); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_protobuf_empty_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_protobuf_empty_proto_goTypes,
|
||||
DependencyIndexes: file_google_protobuf_empty_proto_depIdxs,
|
||||
MessageInfos: file_google_protobuf_empty_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_protobuf_empty_proto = out.File
|
||||
file_google_protobuf_empty_proto_rawDesc = nil
|
||||
file_google_protobuf_empty_proto_goTypes = nil
|
||||
file_google_protobuf_empty_proto_depIdxs = nil
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
'|Ę&{tÄU|gGę(ěŹCy=+¨śňcű:u:/pś#~žü["±4¤!nŮAŞDK<Šuf˙hĹażÂ:şü¸ˇ´B/ŁŘ¤ą¤ň_<C588>hÎŰSăT*wĚxĽŻťą-ç|ťŕŔÓ<C594>ŃÄäóĚ㣗A$$â6ŁÁâG)8nĎpűĆˡ3ĚšśoďĎvŽB–3ż]xÝ“Ó2l§G•|qRŢŻ
ö2
5R–Ó×Ç$´ń˝YčˇŢÝ™l‘Ë«yAI"ŰŚ<C5B0>®íĂ»ąĽkÄ|Kĺţ[9ĆâŇĺ=°ú˙źń|@S•3ó#ćťx?ľV„,ľ‚SĆÝőśwPíogŇ6&V6 ©D.dBŠ7
|
|
@ -0,0 +1,8 @@
|
|||
*~
|
||||
.*.swp
|
||||
*.out
|
||||
*.test
|
||||
*.pem
|
||||
*.cov
|
||||
jose-util/jose-util
|
||||
jose-util.t.err
|
|
@ -0,0 +1,45 @@
|
|||
language: go
|
||||
|
||||
sudo: false
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
go:
|
||||
- '1.14.x'
|
||||
- '1.15.x'
|
||||
- tip
|
||||
|
||||
go_import_path: gopkg.in/square/go-jose.v2
|
||||
|
||||
before_script:
|
||||
- export PATH=$HOME/.local/bin:$PATH
|
||||
|
||||
before_install:
|
||||
# Install encrypted gitcookies to get around bandwidth-limits
|
||||
# that is causing Travis-CI builds to fail. For more info, see
|
||||
# https://github.com/golang/go/issues/12933
|
||||
- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true
|
||||
- bash .gitcookies.sh || true
|
||||
- go get github.com/wadey/gocovmerge
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get github.com/stretchr/testify/require
|
||||
- go get github.com/google/go-cmp/cmp
|
||||
- go get golang.org/x/tools/cmd/cover || true
|
||||
- go get code.google.com/p/go.tools/cmd/cover || true
|
||||
- pip install cram --user
|
||||
|
||||
script:
|
||||
- go test . -v -covermode=count -coverprofile=profile.cov
|
||||
- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov
|
||||
- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov
|
||||
- go test ./json -v # no coverage for forked encoding/json package
|
||||
- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util
|
||||
- cd ..
|
||||
|
||||
after_success:
|
||||
- gocovmerge *.cov */*.cov > merged.coverprofile
|
||||
- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci
|
|
@ -0,0 +1,14 @@
|
|||
# Contributing
|
||||
|
||||
If you would like to contribute code to go-jose you can do so through GitHub by
|
||||
forking the repository and sending a pull request.
|
||||
|
||||
When submitting code, please make every effort to follow existing conventions
|
||||
and style in order to keep the code as readable as possible. Please also make
|
||||
sure all tests pass by running `go test`, and format your code with `go fmt`.
|
||||
We also recommend using `golint` and `errcheck`.
|
||||
|
||||
Before your code can be accepted into the project you must also sign the
|
||||
[Individual Contributor License Agreement][1].
|
||||
|
||||
[1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,118 @@
|
|||
# Go JOSE
|
||||
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v1)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
||||
[](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
|
||||
[](https://travis-ci.org/go-jose/go-jose)
|
||||
[](https://coveralls.io/r/go-jose/go-jose)
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. This includes support for JSON Web Encryption,
|
||||
JSON Web Signature, and JSON Web Token standards.
|
||||
|
||||
**Disclaimer**: This library contains encryption software that is subject to
|
||||
the U.S. Export Administration Regulations. You may not export, re-export,
|
||||
transfer or download this code or any part of it in violation of any United
|
||||
States law, directive or regulation. In particular this software may not be
|
||||
exported or re-exported in any form or on any media to Iran, North Sudan,
|
||||
Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
|
||||
US maintained blocked list.
|
||||
|
||||
## Overview
|
||||
|
||||
The implementation follows the
|
||||
[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
|
||||
[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
|
||||
[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519).
|
||||
Tables of supported algorithms are shown below. The library supports both
|
||||
the compact and full serialization formats, and has optional support for
|
||||
multiple recipients. It also comes with a small command-line utility
|
||||
([`jose-util`](https://github.com/go-jose/go-jose/tree/v2/jose-util))
|
||||
for dealing with JOSE messages in a shell.
|
||||
|
||||
**Note**: We use a forked version of the `encoding/json` package from the Go
|
||||
standard library which uses case-sensitive matching for member names (instead
|
||||
of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
|
||||
This is to avoid differences in interpretation of messages between go-jose and
|
||||
libraries in other languages.
|
||||
|
||||
### Versions
|
||||
|
||||
We use [gopkg.in](https://gopkg.in) for versioning.
|
||||
|
||||
[Version 2](https://gopkg.in/go-jose/go-jose.v2)
|
||||
([branch](https://github.com/go-jose/go-jose/tree/v2),
|
||||
[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current version:
|
||||
|
||||
import "gopkg.in/go-jose/go-jose.v2"
|
||||
|
||||
The old `v1` branch ([go-jose.v1](https://gopkg.in/go-jose/go-jose.v1)) will
|
||||
still receive backported bug fixes and security fixes, but otherwise
|
||||
development is frozen. All new feature development takes place on the `v2`
|
||||
branch. Version 2 also contains additional sub-packages such as the
|
||||
[jwt](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) implementation
|
||||
contributed by [@shaxbee](https://github.com/shaxbee).
|
||||
|
||||
### Supported algorithms
|
||||
|
||||
See below for a table of supported algorithms. Algorithm identifiers match
|
||||
the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
|
||||
standard where possible. The Godoc reference has a list of constants.
|
||||
|
||||
Key encryption | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
RSA-PKCS#1v1.5 | RSA1_5
|
||||
RSA-OAEP | RSA-OAEP, RSA-OAEP-256
|
||||
AES key wrap | A128KW, A192KW, A256KW
|
||||
AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
|
||||
ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
|
||||
ECDH-ES (direct) | ECDH-ES<sup>1</sup>
|
||||
Direct encryption | dir<sup>1</sup>
|
||||
|
||||
<sup>1. Not supported in multi-recipient mode</sup>
|
||||
|
||||
Signing / MAC | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
|
||||
RSASSA-PSS | PS256, PS384, PS512
|
||||
HMAC | HS256, HS384, HS512
|
||||
ECDSA | ES256, ES384, ES512
|
||||
Ed25519 | EdDSA<sup>2</sup>
|
||||
|
||||
<sup>2. Only available in version 2 of the package</sup>
|
||||
|
||||
Content encryption | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
|
||||
AES-GCM | A128GCM, A192GCM, A256GCM
|
||||
|
||||
Compression | Algorithm identifiers(s)
|
||||
:------------------------- | -------------------------------
|
||||
DEFLATE (RFC 1951) | DEF
|
||||
|
||||
### Supported key types
|
||||
|
||||
See below for a table of supported key types. These are understood by the
|
||||
library, and can be passed to corresponding functions such as `NewEncrypter` or
|
||||
`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
|
||||
allows attaching a key id.
|
||||
|
||||
Algorithm(s) | Corresponding types
|
||||
:------------------------- | -------------------------------
|
||||
RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
|
||||
ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
|
||||
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey)
|
||||
AES, HMAC | []byte
|
||||
|
||||
<sup>1. Only available in version 2 of the package</sup>
|
||||
|
||||
## Examples
|
||||
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v1)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
||||
|
||||
Examples can be found in the Godoc
|
||||
reference for this package. The
|
||||
[`jose-util`](https://github.com/go-jose/go-jose/tree/v2/jose-util)
|
||||
subdirectory also contains a small command-line utility which might be useful
|
||||
as an example.
|
|
@ -0,0 +1,592 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/aes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
josecipher "gopkg.in/go-jose/go-jose.v2/cipher"
|
||||
"gopkg.in/go-jose/go-jose.v2/json"
|
||||
)
|
||||
|
||||
// A generic RSA-based encrypter/verifier
|
||||
type rsaEncrypterVerifier struct {
|
||||
publicKey *rsa.PublicKey
|
||||
}
|
||||
|
||||
// A generic RSA-based decrypter/signer
|
||||
type rsaDecrypterSigner struct {
|
||||
privateKey *rsa.PrivateKey
|
||||
}
|
||||
|
||||
// A generic EC-based encrypter/verifier
|
||||
type ecEncrypterVerifier struct {
|
||||
publicKey *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
type edEncrypterVerifier struct {
|
||||
publicKey ed25519.PublicKey
|
||||
}
|
||||
|
||||
// A key generator for ECDH-ES
|
||||
type ecKeyGenerator struct {
|
||||
size int
|
||||
algID string
|
||||
publicKey *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
// A generic EC-based decrypter/signer
|
||||
type ecDecrypterSigner struct {
|
||||
privateKey *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
type edDecrypterSigner struct {
|
||||
privateKey ed25519.PrivateKey
|
||||
}
|
||||
|
||||
// newRSARecipient creates recipientKeyInfo based on the given key.
|
||||
func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch keyAlg {
|
||||
case RSA1_5, RSA_OAEP, RSA_OAEP_256:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if publicKey == nil {
|
||||
return recipientKeyInfo{}, errors.New("invalid public key")
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &rsaEncrypterVerifier{
|
||||
publicKey: publicKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newRSASigner creates a recipientSigInfo based on the given key.
|
||||
func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case RS256, RS384, RS512, PS256, PS384, PS512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: staticPublicKey(&JSONWebKey{
|
||||
Key: privateKey.Public(),
|
||||
}),
|
||||
signer: &rsaDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
|
||||
if sigAlg != EdDSA {
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: staticPublicKey(&JSONWebKey{
|
||||
Key: privateKey.Public(),
|
||||
}),
|
||||
signer: &edDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newECDHRecipient creates recipientKeyInfo based on the given key.
|
||||
func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch keyAlg {
|
||||
case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
|
||||
return recipientKeyInfo{}, errors.New("invalid public key")
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &ecEncrypterVerifier{
|
||||
publicKey: publicKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newECDSASigner creates a recipientSigInfo based on the given key.
|
||||
func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case ES256, ES384, ES512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: staticPublicKey(&JSONWebKey{
|
||||
Key: privateKey.Public(),
|
||||
}),
|
||||
signer: &ecDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt the given payload and update the object.
|
||||
func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
encryptedKey, err := ctx.encrypt(cek, alg)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: encryptedKey,
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt the given payload. Based on the key encryption algorithm,
|
||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
||||
func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
|
||||
switch alg {
|
||||
case RSA1_5:
|
||||
return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek)
|
||||
case RSA_OAEP:
|
||||
return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{})
|
||||
case RSA_OAEP_256:
|
||||
return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{})
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Decrypt the given payload and return the content encryption key.
|
||||
func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
|
||||
}
|
||||
|
||||
// Decrypt the given payload. Based on the key encryption algorithm,
|
||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
||||
func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
|
||||
// Note: The random reader on decrypt operations is only used for blinding,
|
||||
// so stubbing is meanlingless (hence the direct use of rand.Reader).
|
||||
switch alg {
|
||||
case RSA1_5:
|
||||
defer func() {
|
||||
// DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
|
||||
// because of an index out of bounds error, which we want to ignore.
|
||||
// This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
|
||||
// only exists for preventing crashes with unpatched versions.
|
||||
// See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
|
||||
// See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
|
||||
_ = recover()
|
||||
}()
|
||||
|
||||
// Perform some input validation.
|
||||
keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
|
||||
if keyBytes != len(jek) {
|
||||
// Input size is incorrect, the encrypted payload should always match
|
||||
// the size of the public modulus (e.g. using a 2048 bit key will
|
||||
// produce 256 bytes of output). Reject this since it's invalid input.
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
cek, _, err := generator.genKey()
|
||||
if err != nil {
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
|
||||
// prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
|
||||
// the Million Message Attack on Cryptographic Message Syntax". We are
|
||||
// therefore deliberately ignoring errors here.
|
||||
_ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
|
||||
|
||||
return cek, nil
|
||||
case RSA_OAEP:
|
||||
// Use rand.Reader for RSA blinding
|
||||
return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
||||
case RSA_OAEP_256:
|
||||
// Use rand.Reader for RSA blinding
|
||||
return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case RS256, PS256:
|
||||
hash = crypto.SHA256
|
||||
case RS384, PS384:
|
||||
hash = crypto.SHA384
|
||||
case RS512, PS512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return Signature{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
var out []byte
|
||||
var err error
|
||||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
|
||||
case PS256, PS384, PS512:
|
||||
out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthEqualsHash,
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
return Signature{
|
||||
Signature: out,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case RS256, PS256:
|
||||
hash = crypto.SHA256
|
||||
case RS384, PS384:
|
||||
hash = crypto.SHA384
|
||||
case RS512, PS512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
|
||||
case PS256, PS384, PS512:
|
||||
return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
|
||||
}
|
||||
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Encrypt the given payload and update the object.
|
||||
func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
switch alg {
|
||||
case ECDH_ES:
|
||||
// ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
|
||||
return recipientInfo{
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
||||
default:
|
||||
return recipientInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
generator := ecKeyGenerator{
|
||||
algID: string(alg),
|
||||
publicKey: ctx.publicKey,
|
||||
}
|
||||
|
||||
switch alg {
|
||||
case ECDH_ES_A128KW:
|
||||
generator.size = 16
|
||||
case ECDH_ES_A192KW:
|
||||
generator.size = 24
|
||||
case ECDH_ES_A256KW:
|
||||
generator.size = 32
|
||||
}
|
||||
|
||||
kek, header, err := generator.genKey()
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(kek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
jek, err := josecipher.KeyWrap(block, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: jek,
|
||||
header: &header,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get key size for EC key generator
|
||||
func (ctx ecKeyGenerator) keySize() int {
|
||||
return ctx.size
|
||||
}
|
||||
|
||||
// Get a content encryption key for ECDH-ES
|
||||
func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
||||
priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader)
|
||||
if err != nil {
|
||||
return nil, rawHeader{}, err
|
||||
}
|
||||
|
||||
out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
|
||||
|
||||
b, err := json.Marshal(&JSONWebKey{
|
||||
Key: &priv.PublicKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
headers := rawHeader{
|
||||
headerEPK: makeRawMessage(b),
|
||||
}
|
||||
|
||||
return out, headers, nil
|
||||
}
|
||||
|
||||
// Decrypt the given payload and return the content encryption key.
|
||||
func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
epk, err := headers.getEPK()
|
||||
if err != nil {
|
||||
return nil, errors.New("go-jose/go-jose: invalid epk header")
|
||||
}
|
||||
if epk == nil {
|
||||
return nil, errors.New("go-jose/go-jose: missing epk header")
|
||||
}
|
||||
|
||||
publicKey, ok := epk.Key.(*ecdsa.PublicKey)
|
||||
if publicKey == nil || !ok {
|
||||
return nil, errors.New("go-jose/go-jose: invalid epk header")
|
||||
}
|
||||
|
||||
if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
|
||||
return nil, errors.New("go-jose/go-jose: invalid public key in epk header")
|
||||
}
|
||||
|
||||
apuData, err := headers.getAPU()
|
||||
if err != nil {
|
||||
return nil, errors.New("go-jose/go-jose: invalid apu header")
|
||||
}
|
||||
apvData, err := headers.getAPV()
|
||||
if err != nil {
|
||||
return nil, errors.New("go-jose/go-jose: invalid apv header")
|
||||
}
|
||||
|
||||
deriveKey := func(algID string, size int) []byte {
|
||||
return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
|
||||
}
|
||||
|
||||
var keySize int
|
||||
|
||||
algorithm := headers.getAlgorithm()
|
||||
switch algorithm {
|
||||
case ECDH_ES:
|
||||
// ECDH-ES uses direct key agreement, no key unwrapping necessary.
|
||||
return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
|
||||
case ECDH_ES_A128KW:
|
||||
keySize = 16
|
||||
case ECDH_ES_A192KW:
|
||||
keySize = 24
|
||||
case ECDH_ES_A256KW:
|
||||
keySize = 32
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
key := deriveKey(string(algorithm), keySize)
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
||||
}
|
||||
|
||||
func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
if alg != EdDSA {
|
||||
return Signature{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0))
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
return Signature{
|
||||
Signature: sig,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
if alg != EdDSA {
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
ok := ed25519.Verify(ctx.publicKey, payload, signature)
|
||||
if !ok {
|
||||
return errors.New("go-jose/go-jose: ed25519 signature failed to verify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
var expectedBitSize int
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case ES256:
|
||||
expectedBitSize = 256
|
||||
hash = crypto.SHA256
|
||||
case ES384:
|
||||
expectedBitSize = 384
|
||||
hash = crypto.SHA384
|
||||
case ES512:
|
||||
expectedBitSize = 521
|
||||
hash = crypto.SHA512
|
||||
}
|
||||
|
||||
curveBits := ctx.privateKey.Curve.Params().BitSize
|
||||
if expectedBitSize != curveBits {
|
||||
return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed)
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
keyBytes := curveBits / 8
|
||||
if curveBits%8 > 0 {
|
||||
keyBytes++
|
||||
}
|
||||
|
||||
// We serialize the outputs (r and s) into big-endian byte arrays and pad
|
||||
// them with zeros on the left to make sure the sizes work out. Both arrays
|
||||
// must be keyBytes long, and the output must be 2*keyBytes long.
|
||||
rBytes := r.Bytes()
|
||||
rBytesPadded := make([]byte, keyBytes)
|
||||
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
||||
|
||||
sBytes := s.Bytes()
|
||||
sBytesPadded := make([]byte, keyBytes)
|
||||
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
||||
|
||||
out := append(rBytesPadded, sBytesPadded...)
|
||||
|
||||
return Signature{
|
||||
Signature: out,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
var keySize int
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case ES256:
|
||||
keySize = 32
|
||||
hash = crypto.SHA256
|
||||
case ES384:
|
||||
keySize = 48
|
||||
hash = crypto.SHA384
|
||||
case ES512:
|
||||
keySize = 66
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if len(signature) != 2*keySize {
|
||||
return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
r := big.NewInt(0).SetBytes(signature[:keySize])
|
||||
s := big.NewInt(0).SetBytes(signature[keySize:])
|
||||
|
||||
match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
|
||||
if !match {
|
||||
return errors.New("go-jose/go-jose: ecdsa signature failed to verify")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue