Bump c/storage to v1.53.0, c/image to v5.30.0
As the title says. The third step in the vendor dance. Bumps c/storage v1.53.0 and c/image to v5.30.0 all in preparation of Podman v5.0 [NO NEW TESTS NEEDED] Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
This commit is contained in:
parent
27a4ef1396
commit
7ef6b56fd2
|
|
@ -7,9 +7,9 @@ require (
|
|||
github.com/containerd/containerd v1.7.13
|
||||
github.com/containernetworking/cni v1.1.2
|
||||
github.com/containernetworking/plugins v1.4.0
|
||||
github.com/containers/image/v5 v5.29.3-0.20240227090231-5bef5e1e1506
|
||||
github.com/containers/image/v5 v5.30.0
|
||||
github.com/containers/ocicrypt v1.1.9
|
||||
github.com/containers/storage v1.52.1-0.20240227215008-a083950a778f
|
||||
github.com/containers/storage v1.53.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/cyphar/filepath-securejoin v0.2.4
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
|
|
@ -41,7 +41,7 @@ require (
|
|||
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
go.etcd.io/bbolt v1.3.9
|
||||
golang.org/x/crypto v0.21.0
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||
golang.org/x/sync v0.6.0
|
||||
golang.org/x/sys v0.18.0
|
||||
golang.org/x/term v0.18.0
|
||||
|
|
@ -69,28 +69,28 @@ require (
|
|||
github.com/docker/docker-credential-helpers v0.8.1 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.2 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.21.0 // indirect
|
||||
github.com/go-openapi/errors v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/loads v0.21.2 // indirect
|
||||
github.com/go-openapi/runtime v0.26.0 // indirect
|
||||
github.com/go-openapi/spec v0.20.9 // indirect
|
||||
github.com/go-openapi/strfmt v0.22.0 // indirect
|
||||
github.com/go-openapi/swag v0.22.9 // indirect
|
||||
github.com/go-openapi/strfmt v0.22.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.10 // indirect
|
||||
github.com/go-openapi/validate v0.22.1 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-containerregistry v0.17.0 // indirect
|
||||
github.com/google/go-containerregistry v0.19.0 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect
|
||||
github.com/google/uuid v1.5.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
|
|
@ -120,7 +120,7 @@ require (
|
|||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/sigstore/fulcio v1.4.3 // indirect
|
||||
github.com/sigstore/rekor v1.2.2 // indirect
|
||||
github.com/sigstore/sigstore v1.8.1 // indirect
|
||||
github.com/sigstore/sigstore v1.8.2 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.15.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
|
|
@ -129,17 +129,17 @@ require (
|
|||
github.com/vbatts/tar-split v0.11.5 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.7.2 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
go.mongodb.org/mongo-driver v1.13.1 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.19.0 // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/mod v0.15.0 // indirect
|
||||
golang.org/x/net v0.22.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.17.0 // indirect
|
||||
golang.org/x/tools v0.18.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/grpc v1.59.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
|
|
|
|||
|
|
@ -53,14 +53,14 @@ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl3
|
|||
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
|
||||
github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic=
|
||||
github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0=
|
||||
github.com/containers/image/v5 v5.29.3-0.20240227090231-5bef5e1e1506 h1:Wdkl9fdxXYp2zaiw0GWC7fF8DSqD72B5jhILY0qK/sU=
|
||||
github.com/containers/image/v5 v5.29.3-0.20240227090231-5bef5e1e1506/go.mod h1:2/7sa5zJsx3Gl4v2MHkBrSMxsQePJcx9EDehbxmxlKE=
|
||||
github.com/containers/image/v5 v5.30.0 h1:CmHeSwI6W2kTRWnUsxATDFY5TEX4b58gPkaQcEyrLIA=
|
||||
github.com/containers/image/v5 v5.30.0/go.mod h1:gSD8MVOyqBspc0ynLsuiMR9qmt8UQ4jpVImjmK0uXfk=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM=
|
||||
github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
|
||||
github.com/containers/storage v1.52.1-0.20240227215008-a083950a778f h1:1praFMNpAwJWq5usBh1T8FkXoRsHTJ4rhU/2NpNhvhI=
|
||||
github.com/containers/storage v1.52.1-0.20240227215008-a083950a778f/go.mod h1:mFA6QpUoT9qTa3q2DD1CvSo3Az3syNkw1P9X+4nUYdY=
|
||||
github.com/containers/storage v1.53.0 h1:VSES3C/u1pxjTJIXvLrSmyP7OBtDky04oGu07UvdTEA=
|
||||
github.com/containers/storage v1.53.0/go.mod h1:pujcoOSc+upx15Jirdkebhtd8uJiLwbSd/mYT6zDJK8=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
|
|
@ -99,8 +99,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
|||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/go-jose/go-jose/v3 v3.0.2 h1:2Edjn8Nrb44UvTdp84KU0bBPs1cO7noRCybtS3eJEUQ=
|
||||
github.com/go-jose/go-jose/v3 v3.0.2/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
|
|
@ -112,8 +112,8 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy
|
|||
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=
|
||||
github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
|
||||
github.com/go-openapi/errors v0.21.1 h1:rVisxQPdETctjlYntm0Ek4dKf68nAQocCloCT50vWuI=
|
||||
github.com/go-openapi/errors v0.21.1/go.mod h1:LyiY9bgc7AVVh6wtVvMYEyoj3KJYNoRw92mmvnMWgj8=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
|
|
@ -134,14 +134,14 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6
|
|||
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
|
||||
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
|
||||
github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI=
|
||||
github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4=
|
||||
github.com/go-openapi/strfmt v0.22.2 h1:DPYOrm6gexCfZZfXUaXFS4+Jw6HAaIIG0SZ5630f8yw=
|
||||
github.com/go-openapi/strfmt v0.22.2/go.mod h1:HB/b7TCm91rno75Dembc1dFW/0FPLk5CEXsoF9ReNc4=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
|
||||
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
|
||||
github.com/go-openapi/swag v0.22.10 h1:4y86NVn7Z2yYd6pfS4Z+Nyh3aAUL3Nul+LMbhFKy0gA=
|
||||
github.com/go-openapi/swag v0.22.10/go.mod h1:Cnn8BYtRlx6BNE3DPN86f/xkapGIcLWzh3CLEb4C1jI=
|
||||
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
|
||||
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
|
|
@ -209,8 +209,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.17.0 h1:5p+zYs/R4VGHkhyvgWurWrpJ2hW4Vv9fQI+GzdcwXLk=
|
||||
github.com/google/go-containerregistry v0.17.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
|
||||
github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic=
|
||||
github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
|
||||
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
|
||||
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
|
@ -220,8 +220,8 @@ github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHa
|
|||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
|
||||
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk=
|
||||
|
|
@ -367,8 +367,8 @@ github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ
|
|||
github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
|
||||
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
|
||||
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
|
||||
github.com/sigstore/sigstore v1.8.1 h1:mAVposMb14oplk2h/bayPmIVdzbq2IhCgy4g6R0ZSjo=
|
||||
github.com/sigstore/sigstore v1.8.1/go.mod h1:02SL1158BSj15bZyOFz7m+/nJzLZfFd9A8ab3Kz7w/E=
|
||||
github.com/sigstore/sigstore v1.8.2 h1:0Ttjcn3V0fVQXlYq7+oHaaHkGFIt3ywm7SF4JTU/l8c=
|
||||
github.com/sigstore/sigstore v1.8.2/go.mod h1:CHVcSyknCcjI4K2ZhS1SI28r0tcQyBlwtALG536x1DY=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
|
|
@ -419,10 +419,8 @@ github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZla
|
|||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
||||
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
|
||||
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
||||
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
|
||||
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
|
||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
|
|
@ -435,8 +433,8 @@ go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
|||
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
|
||||
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
|
||||
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
|
||||
go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
|
||||
go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
|
||||
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
|
|
@ -456,26 +454,27 @@ go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lI
|
|||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -493,8 +492,10 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
|
|||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
@ -505,6 +506,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -536,11 +538,17 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -548,9 +556,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
|
|
@ -568,8 +576,9 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
|||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
|
||||
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
|
|||
|
|
@ -23,9 +23,9 @@ var (
|
|||
// compressionBufferSize is the buffer size used to compress a blob
|
||||
compressionBufferSize = 1048576
|
||||
|
||||
// expectedCompressionFormats is used to check if a blob with a specified media type is compressed
|
||||
// expectedBaseCompressionFormats is used to check if a blob with a specified media type is compressed
|
||||
// using the algorithm that the media type says it should be compressed with
|
||||
expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
|
||||
expectedBaseCompressionFormats = map[string]*compressiontypes.Algorithm{
|
||||
imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
|
||||
imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
|
||||
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
|
||||
|
|
@ -62,8 +62,8 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
|
|||
res.srcCompressorName = internalblobinfocache.Uncompressed
|
||||
}
|
||||
|
||||
if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() {
|
||||
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name())
|
||||
if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() {
|
||||
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedBaseFormat.Name(), format.Name())
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
|
@ -172,7 +172,8 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
|
|||
// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
|
||||
func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
|
||||
if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
|
||||
ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() {
|
||||
ic.compressionFormat != nil &&
|
||||
(ic.compressionFormat.Name() != detected.format.Name() && ic.compressionFormat.Name() != detected.format.BaseVariantName()) {
|
||||
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
|
||||
// re-compressed using the desired format.
|
||||
logrus.Debugf("Blob will be converted")
|
||||
|
|
|
|||
|
|
@ -23,11 +23,11 @@ type digestingReader struct {
|
|||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
var digester digest.Digester
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
return nil, fmt.Errorf("invalid digest specification %q: %w", expectedDigest, err)
|
||||
}
|
||||
digestAlgorithm := expectedDigest.Algorithm()
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
|
||||
return nil, fmt.Errorf("invalid digest specification %q: unsupported digest algorithm %q", expectedDigest, digestAlgorithm)
|
||||
}
|
||||
digester = digestAlgorithm.Digester()
|
||||
|
||||
|
|
|
|||
|
|
@ -383,7 +383,11 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
|
|||
|
||||
compressionAlgos := set.New[string]()
|
||||
for _, srcInfo := range ic.src.LayerInfos() {
|
||||
if _, c := compressionEditsFromMIMEType(srcInfo); c != nil {
|
||||
_, c, err := compressionEditsFromBlobInfo(srcInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c != nil {
|
||||
compressionAlgos.Add(c.Name())
|
||||
}
|
||||
}
|
||||
|
|
@ -636,21 +640,28 @@ type diffIDResult struct {
|
|||
err error
|
||||
}
|
||||
|
||||
// compressionEditsFromMIMEType returns a (CompressionOperation, CompressionAlgorithm) value pair suitable
|
||||
// for types.BlobInfo, based on a MIME type of srcInfo.
|
||||
func compressionEditsFromMIMEType(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm) {
|
||||
// compressionEditsFromBlobInfo returns a (CompressionOperation, CompressionAlgorithm) value pair suitable
|
||||
// for types.BlobInfo.
|
||||
func compressionEditsFromBlobInfo(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm, error) {
|
||||
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
|
||||
// package (but we should preferably replace/change UpdatedImage instead of productizing
|
||||
// this workaround).
|
||||
switch srcInfo.MediaType {
|
||||
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
|
||||
return types.PreserveOriginal, &compression.Gzip
|
||||
return types.PreserveOriginal, &compression.Gzip, nil
|
||||
case imgspecv1.MediaTypeImageLayerZstd:
|
||||
return types.PreserveOriginal, &compression.Zstd
|
||||
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return types.PreserveOriginal, nil, err
|
||||
}
|
||||
if tocDigest != nil {
|
||||
return types.PreserveOriginal, &compression.ZstdChunked, nil
|
||||
}
|
||||
return types.PreserveOriginal, &compression.Zstd, nil
|
||||
case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer:
|
||||
return types.Decompress, nil
|
||||
return types.Decompress, nil, nil
|
||||
default:
|
||||
return types.PreserveOriginal, nil
|
||||
return types.PreserveOriginal, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -666,7 +677,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||
// (Sadly UpdatedImage() is documented to not update MediaTypes from
|
||||
// ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
|
||||
if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil {
|
||||
srcInfo.CompressionOperation, srcInfo.CompressionAlgorithm = compressionEditsFromMIMEType(srcInfo)
|
||||
op, algo, err := compressionEditsFromBlobInfo(srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
srcInfo.CompressionOperation = op
|
||||
srcInfo.CompressionAlgorithm = algo
|
||||
}
|
||||
|
||||
ic.c.printCopyInfo("blob", srcInfo)
|
||||
|
|
|
|||
|
|
@ -978,13 +978,10 @@ func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, t
|
|||
// This function can return nil reader when no url is supported by this function. In this case, the caller
|
||||
// should fallback to fetch the non-external blob (i.e. pull from the registry).
|
||||
func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
|
||||
var (
|
||||
resp *http.Response
|
||||
err error
|
||||
)
|
||||
if len(urls) == 0 {
|
||||
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
||||
}
|
||||
var remoteErrors []error
|
||||
for _, u := range urls {
|
||||
blobURL, err := url.Parse(u)
|
||||
if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") {
|
||||
|
|
@ -993,24 +990,28 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R
|
|||
// NOTE: we must not authenticate on additional URLs as those
|
||||
// can be abused to leak credentials or tokens. Please
|
||||
// refer to CVE-2020-15157 for more information.
|
||||
resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
remoteErrors = append(remoteErrors, err)
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
remoteErrors = append(remoteErrors, err)
|
||||
logrus.Debug(err)
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
break
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
}
|
||||
}
|
||||
if resp == nil && err == nil {
|
||||
if remoteErrors == nil {
|
||||
return nil, 0, nil // fallback to non-external blob
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
err := fmt.Errorf("failed fetching external blob from all urls: %w", remoteErrors[0])
|
||||
for _, e := range remoteErrors[1:] {
|
||||
err = fmt.Errorf("%s, %w", err, e)
|
||||
}
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
func getBlobSize(resp *http.Response) int64 {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,8 @@ func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions
|
|||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
if candidateCompression == nil || (options.RequiredCompression.Name() != candidateCompression.Name()) {
|
||||
if candidateCompression == nil ||
|
||||
(options.RequiredCompression.Name() != candidateCompression.Name() && options.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -169,7 +169,8 @@ func NormalizedMIMEType(input string) string {
|
|||
|
||||
// CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values.
|
||||
func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool {
|
||||
switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
// Compare the discussion about BaseVariantName in MIMETypeSupportsCompressionAlgorithm().
|
||||
switch algo.Name() {
|
||||
case compressiontypes.GzipAlgorithmName:
|
||||
return true
|
||||
default:
|
||||
|
|
@ -182,7 +183,9 @@ func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes
|
|||
if CompressionAlgorithmIsUniversallySupported(algo) {
|
||||
return true
|
||||
}
|
||||
switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
// This does not use BaseVariantName: Plausibly a manifest format might support zstd but not have annotation fields.
|
||||
// The logic might have to be more complex (and more ad-hoc) if more manifest formats, with more capabilities, emerge.
|
||||
switch algo.Name() {
|
||||
case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName:
|
||||
return mimeType == imgspecv1.MediaTypeImageManifest
|
||||
default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere
|
||||
|
|
|
|||
|
|
@ -103,8 +103,8 @@ func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, an
|
|||
*annotationsMap = map[string]string{}
|
||||
}
|
||||
for _, algo := range compressionAlgorithms {
|
||||
switch algo.Name() {
|
||||
case compression.ZstdAlgorithmName, compression.ZstdChunkedAlgorithmName: // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
switch algo.BaseVariantName() {
|
||||
case compression.ZstdAlgorithmName:
|
||||
(*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
|
||||
default:
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
|
|||
if variants != nil {
|
||||
name := mtsUncompressed
|
||||
if algorithm != nil {
|
||||
name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
|
||||
name = algorithm.BaseVariantName()
|
||||
}
|
||||
if res, ok := variants[name]; ok {
|
||||
if res != mtsUnsupportedMIMEType {
|
||||
|
|
|
|||
|
|
@ -19,19 +19,19 @@ type Algorithm = types.Algorithm
|
|||
|
||||
var (
|
||||
// Gzip compression.
|
||||
Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName,
|
||||
Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, "",
|
||||
[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
|
||||
// Bzip2 compression.
|
||||
Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName,
|
||||
Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, "",
|
||||
[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
|
||||
// Xz compression.
|
||||
Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName,
|
||||
Xz = internal.NewAlgorithm(types.XzAlgorithmName, "",
|
||||
[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
|
||||
// Zstd compression.
|
||||
Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
|
||||
Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, "",
|
||||
[]byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
|
||||
// ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files.
|
||||
ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
|
||||
// ZstdChunked is a Zstd compression with chunk metadata which allows random access to individual files.
|
||||
ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName,
|
||||
nil, ZstdDecompressor, compressor.ZstdCompressor)
|
||||
|
||||
compressionAlgorithms = map[string]Algorithm{
|
||||
|
|
|
|||
20
common/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
generated
vendored
20
common/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
generated
vendored
|
|
@ -13,19 +13,24 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
|
|||
// Algorithm is a compression algorithm that can be used for CompressStream.
|
||||
type Algorithm struct {
|
||||
name string
|
||||
mime string
|
||||
baseVariantName string
|
||||
prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
|
||||
decompressor DecompressorFunc
|
||||
compressor CompressorFunc
|
||||
}
|
||||
|
||||
// NewAlgorithm creates an Algorithm instance.
|
||||
// nontrivialBaseVariantName is typically "".
|
||||
// This function exists so that Algorithm instances can only be created by code that
|
||||
// is allowed to import this internal subpackage.
|
||||
func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
|
||||
func NewAlgorithm(name, nontrivialBaseVariantName string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
|
||||
baseVariantName := name
|
||||
if nontrivialBaseVariantName != "" {
|
||||
baseVariantName = nontrivialBaseVariantName
|
||||
}
|
||||
return Algorithm{
|
||||
name: name,
|
||||
mime: mime,
|
||||
baseVariantName: baseVariantName,
|
||||
prefix: prefix,
|
||||
decompressor: decompressor,
|
||||
compressor: compressor,
|
||||
|
|
@ -37,10 +42,11 @@ func (c Algorithm) Name() string {
|
|||
return c.name
|
||||
}
|
||||
|
||||
// InternalUnstableUndocumentedMIMEQuestionMark ???
|
||||
// DO NOT USE THIS anywhere outside of c/image until it is properly documented.
|
||||
func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string {
|
||||
return c.mime
|
||||
// BaseVariantName returns the name of the “base variant” of the compression algorithm.
|
||||
// It is either equal to Name() of the same algorithm, or equal to Name() of some other Algorithm (the “base variant”).
|
||||
// This supports a single level of “is-a” relationship between compression algorithms, e.g. where "zstd:chunked" data is valid "zstd" data.
|
||||
func (c Algorithm) BaseVariantName() string {
|
||||
return c.baseVariantName
|
||||
}
|
||||
|
||||
// AlgorithmCompressor returns the compressor field of algo.
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ const (
|
|||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = "-dev"
|
||||
VersionDev = ""
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
1.52.1-dev
|
||||
1.53.0
|
||||
|
|
|
|||
|
|
@ -310,16 +310,6 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
|
|||
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
|
||||
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
|
||||
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
|
||||
// If custom --imagestore is selected never
|
||||
// ditch the original graphRoot, instead add it as
|
||||
// additionalImageStore so its images can still be
|
||||
// read and used.
|
||||
if options.ImageStore != "" {
|
||||
graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore)
|
||||
options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore)
|
||||
// complete base name with driver name included
|
||||
options.ImageStore = filepath.Join(options.ImageStore, "overlay")
|
||||
}
|
||||
opts, err := parseOptions(options.DriverOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -863,22 +853,15 @@ func (d *Driver) Status() [][2]string {
|
|||
// Metadata returns meta data about the overlay driver such as
|
||||
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
|
||||
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
||||
dir, imagestore, _ := d.dir2(id)
|
||||
dir := d.dir(id)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workDirBase := dir
|
||||
if imagestore != "" {
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workDirBase = imagestore
|
||||
}
|
||||
|
||||
metadata := map[string]string{
|
||||
"WorkDir": path.Join(workDirBase, "work"),
|
||||
"MergedDir": path.Join(workDirBase, "merged"),
|
||||
"UpperDir": path.Join(workDirBase, "diff"),
|
||||
"WorkDir": path.Join(dir, "work"),
|
||||
"MergedDir": path.Join(dir, "merged"),
|
||||
"UpperDir": path.Join(dir, "diff"),
|
||||
}
|
||||
|
||||
lowerDirs, err := d.getLowerDirs(id)
|
||||
|
|
@ -896,7 +879,7 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
|
|||
// is being shutdown. For now, we just have to unmount the bind mounted
|
||||
// we had created.
|
||||
func (d *Driver) Cleanup() error {
|
||||
_ = os.RemoveAll(d.getStagingDir())
|
||||
_ = os.RemoveAll(filepath.Join(d.home, stagingDir))
|
||||
return mount.Unmount(d.home)
|
||||
}
|
||||
|
||||
|
|
@ -992,8 +975,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
|
|||
return d.create(id, parent, opts, true)
|
||||
}
|
||||
|
||||
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) {
|
||||
dir, imageStore, _ := d.dir2(id)
|
||||
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) (retErr error) {
|
||||
dir, homedir, _ := d.dir2(id, readOnly)
|
||||
|
||||
disableQuota := readOnly
|
||||
|
||||
uidMaps := d.uidMaps
|
||||
gidMaps := d.gidMaps
|
||||
|
|
@ -1004,7 +989,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
}
|
||||
|
||||
// Make the link directory if it does not exist
|
||||
if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil {
|
||||
if err := idtools.MkdirAllAs(path.Join(homedir, linkDir), 0o755, 0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -1021,20 +1006,8 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
workDirBase := dir
|
||||
if imageStore != "" {
|
||||
workDirBase = imageStore
|
||||
if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if parent != "" {
|
||||
parentBase, parentImageStore, inAdditionalStore := d.dir2(parent)
|
||||
// If parentBase path is additional image store, select the image contained in parentBase.
|
||||
// See https://github.com/containers/podman/issues/19748
|
||||
if parentImageStore != "" && !inAdditionalStore {
|
||||
parentBase = parentImageStore
|
||||
}
|
||||
parentBase := d.dir(parent)
|
||||
st, err := system.Stat(filepath.Join(parentBase, "diff"))
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -1055,11 +1028,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
if imageStore != "" {
|
||||
if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Clean up on failure
|
||||
|
|
@ -1067,11 +1035,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
if err2 := os.RemoveAll(dir); err2 != nil {
|
||||
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
|
||||
}
|
||||
if imageStore != "" {
|
||||
if err2 := os.RemoveAll(workDirBase); err2 != nil {
|
||||
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", workDirBase, err2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
@ -1094,11 +1057,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
if err := d.quotaCtl.SetQuota(dir, quota); err != nil {
|
||||
return err
|
||||
}
|
||||
if imageStore != "" {
|
||||
if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
perms := defaultPerms
|
||||
|
|
@ -1107,12 +1065,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
}
|
||||
|
||||
if parent != "" {
|
||||
parentBase, parentImageStore, inAdditionalStore := d.dir2(parent)
|
||||
// If parentBase path is additional image store, select the image contained in parentBase.
|
||||
// See https://github.com/containers/podman/issues/19748
|
||||
if parentImageStore != "" && !inAdditionalStore {
|
||||
parentBase = parentImageStore
|
||||
}
|
||||
parentBase := d.dir(parent)
|
||||
st, err := system.Stat(filepath.Join(parentBase, "diff"))
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -1120,17 +1073,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
perms = os.FileMode(st.Mode())
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lid := generateID(idLength)
|
||||
|
||||
linkBase := path.Join("..", id, "diff")
|
||||
if imageStore != "" {
|
||||
linkBase = path.Join(imageStore, "diff")
|
||||
}
|
||||
if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil {
|
||||
if err := os.Symlink(linkBase, path.Join(homedir, linkDir, lid)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -1139,10 +1089,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
|||
return err
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := idtools.MkdirAs(path.Join(workDirBase, "merged"), 0o700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -1224,26 +1174,39 @@ func (d *Driver) getLower(parent string) (string, error) {
|
|||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
p, _, _ := d.dir2(id)
|
||||
p, _, _ := d.dir2(id, false)
|
||||
return p
|
||||
}
|
||||
|
||||
func (d *Driver) dir2(id string) (string, string, bool) {
|
||||
newpath := path.Join(d.home, id)
|
||||
imageStore := ""
|
||||
func (d *Driver) getAllImageStores() []string {
|
||||
additionalImageStores := d.AdditionalImageStores()
|
||||
if d.imageStore != "" {
|
||||
imageStore = path.Join(d.imageStore, id)
|
||||
additionalImageStores = append([]string{d.imageStore}, additionalImageStores...)
|
||||
}
|
||||
return additionalImageStores
|
||||
}
|
||||
|
||||
func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
|
||||
var homedir string
|
||||
|
||||
if useImageStore && d.imageStore != "" {
|
||||
homedir = path.Join(d.imageStore, d.name)
|
||||
} else {
|
||||
homedir = d.home
|
||||
}
|
||||
|
||||
newpath := path.Join(homedir, id)
|
||||
|
||||
if _, err := os.Stat(newpath); err != nil {
|
||||
for _, p := range d.AdditionalImageStores() {
|
||||
for _, p := range d.getAllImageStores() {
|
||||
l := path.Join(p, d.name, id)
|
||||
_, err = os.Stat(l)
|
||||
if err == nil {
|
||||
return l, imageStore, true
|
||||
return l, homedir, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return newpath, imageStore, false
|
||||
return newpath, homedir, false
|
||||
}
|
||||
|
||||
func (d *Driver) getLowerDirs(id string) ([]string, error) {
|
||||
|
|
@ -1453,14 +1416,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
|||
}
|
||||
|
||||
func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
|
||||
dir, imageStore, inAdditionalStore := d.dir2(id)
|
||||
dir, _, inAdditionalStore := d.dir2(id, false)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
workDirBase := dir
|
||||
if imageStore != "" {
|
||||
workDirBase = imageStore
|
||||
}
|
||||
|
||||
readWrite := !inAdditionalStore
|
||||
|
||||
if !d.SupportsShifting() || options.DisableShifting {
|
||||
|
|
@ -1565,7 +1525,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
}()
|
||||
|
||||
composeFsLayers := []string{}
|
||||
composeFsLayersDir := filepath.Join(workDirBase, "composefs-layers")
|
||||
composeFsLayersDir := filepath.Join(dir, "composefs-layers")
|
||||
maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) {
|
||||
composefsBlob := d.getComposefsData(lowerID)
|
||||
_, err = os.Stat(composefsBlob)
|
||||
|
|
@ -1599,7 +1559,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
return dest, nil
|
||||
}
|
||||
|
||||
diffDir := path.Join(workDirBase, "diff")
|
||||
diffDir := path.Join(dir, "diff")
|
||||
|
||||
if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil {
|
||||
return "", err
|
||||
|
|
@ -1617,7 +1577,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
lower := ""
|
||||
newpath := path.Join(d.home, l)
|
||||
if st, err := os.Stat(newpath); err != nil {
|
||||
for _, p := range d.AdditionalImageStores() {
|
||||
for _, p := range d.getAllImageStores() {
|
||||
lower = path.Join(p, d.name, l)
|
||||
if st2, err2 := os.Stat(lower); err2 == nil {
|
||||
if !permsKnown {
|
||||
|
|
@ -1685,16 +1645,16 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
optsList = append(optsList, "metacopy=on", "redirect_dir=on")
|
||||
}
|
||||
|
||||
if len(absLowers) == 0 {
|
||||
absLowers = append(absLowers, path.Join(dir, "empty"))
|
||||
}
|
||||
|
||||
// user namespace requires this to move a directory from lower to upper.
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(absLowers) == 0 {
|
||||
absLowers = append(absLowers, path.Join(dir, "empty"))
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
|
||||
if !inAdditionalStore {
|
||||
return "", err
|
||||
|
|
@ -1705,7 +1665,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
}
|
||||
}
|
||||
|
||||
mergedDir := path.Join(workDirBase, "merged")
|
||||
mergedDir := path.Join(dir, "merged")
|
||||
// Create the driver merged dir
|
||||
if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
|
||||
return "", err
|
||||
}
|
||||
if count := d.ctr.Increment(mergedDir); count > 1 {
|
||||
return mergedDir, nil
|
||||
}
|
||||
|
|
@ -1719,7 +1683,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
}
|
||||
}()
|
||||
|
||||
workdir := path.Join(workDirBase, "work")
|
||||
workdir := path.Join(dir, "work")
|
||||
|
||||
if d.options.mountProgram == "" && unshare.IsRootless() {
|
||||
optsList = append(optsList, "userxattr")
|
||||
|
|
@ -1869,7 +1833,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
|
||||
// Put unmounts the mount path created for the give id.
|
||||
func (d *Driver) Put(id string) error {
|
||||
dir, _, inAdditionalStore := d.dir2(id)
|
||||
dir, _, inAdditionalStore := d.dir2(id, false)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -2038,8 +2002,9 @@ func (g *overlayFileGetter) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) getStagingDir() string {
|
||||
return filepath.Join(d.home, stagingDir)
|
||||
func (d *Driver) getStagingDir(id string) string {
|
||||
_, homedir, _ := d.dir2(id, d.imageStore != "")
|
||||
return filepath.Join(homedir, stagingDir)
|
||||
}
|
||||
|
||||
// DiffGetter returns a FileGetCloser that can read files from the directory that
|
||||
|
|
@ -2096,11 +2061,12 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
|||
var applyDir string
|
||||
|
||||
if id == "" {
|
||||
err := os.MkdirAll(d.getStagingDir(), 0o700)
|
||||
stagingDir := d.getStagingDir(id)
|
||||
err := os.MkdirAll(stagingDir, 0o700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
applyDir, err = os.MkdirTemp(d.getStagingDir(), "")
|
||||
applyDir, err = os.MkdirTemp(stagingDir, "")
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
|
|
@ -2144,7 +2110,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
|||
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
|
||||
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
|
||||
stagingDirectory := diffOutput.Target
|
||||
if filepath.Dir(stagingDirectory) != d.getStagingDir() {
|
||||
if filepath.Dir(stagingDirectory) != d.getStagingDir(id) {
|
||||
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
|
||||
}
|
||||
diffPath, err := d.getDiffPath(id)
|
||||
|
|
@ -2230,12 +2196,8 @@ func (d *Driver) getComposefsData(id string) string {
|
|||
}
|
||||
|
||||
func (d *Driver) getDiffPath(id string) (string, error) {
|
||||
dir, imagestore, _ := d.dir2(id)
|
||||
base := dir
|
||||
if imagestore != "" {
|
||||
base = imagestore
|
||||
}
|
||||
return redirectDiffIfAdditionalLayer(path.Join(base, "diff"))
|
||||
dir := d.dir(id)
|
||||
return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"))
|
||||
}
|
||||
|
||||
func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
|
||||
|
|
@ -2326,12 +2288,8 @@ func (d *Driver) AdditionalImageStores() []string {
|
|||
// by toContainer to those specified by toHost.
|
||||
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
var err error
|
||||
dir, imagestore, _ := d.dir2(id)
|
||||
base := dir
|
||||
if imagestore != "" {
|
||||
base = imagestore
|
||||
}
|
||||
diffDir := filepath.Join(base, "diff")
|
||||
dir := d.dir(id)
|
||||
diffDir := filepath.Join(dir, "diff")
|
||||
|
||||
rootUID, rootGID := 0, 0
|
||||
if toHost != nil {
|
||||
|
|
|
|||
|
|
@ -31,8 +31,9 @@ func init() {
|
|||
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
|
||||
d := &Driver{
|
||||
name: "vfs",
|
||||
homes: []string{home},
|
||||
home: home,
|
||||
idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
|
||||
imageStore: options.ImageStore,
|
||||
}
|
||||
|
||||
rootIDs := d.idMappings.RootPair()
|
||||
|
|
@ -47,7 +48,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
key = strings.ToLower(key)
|
||||
switch key {
|
||||
case "vfs.imagestore", ".imagestore":
|
||||
d.homes = append(d.homes, strings.Split(val, ",")...)
|
||||
d.additionalHomes = append(d.additionalHomes, strings.Split(val, ",")...)
|
||||
continue
|
||||
case "vfs.mountopt":
|
||||
return nil, fmt.Errorf("vfs driver does not support mount options")
|
||||
|
|
@ -62,12 +63,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
return nil, fmt.Errorf("vfs driver does not support %s options", key)
|
||||
}
|
||||
}
|
||||
// If --imagestore is provided, lets add writable graphRoot
|
||||
// to vfs's additional image store, as it is done for
|
||||
// `overlay` driver.
|
||||
if options.ImageStore != "" {
|
||||
d.homes = append(d.homes, options.ImageStore)
|
||||
}
|
||||
|
||||
d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d)
|
||||
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater)
|
||||
|
||||
|
|
@ -80,11 +76,13 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
|
||||
type Driver struct {
|
||||
name string
|
||||
homes []string
|
||||
home string
|
||||
additionalHomes []string
|
||||
idMappings *idtools.IDMappings
|
||||
ignoreChownErrors bool
|
||||
naiveDiff graphdriver.DiffDriver
|
||||
updater graphdriver.LayerIDMapUpdater
|
||||
imageStore string
|
||||
}
|
||||
|
||||
func (d *Driver) String() string {
|
||||
|
|
@ -158,7 +156,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
|
|||
idMappings = opts.IDMappings
|
||||
}
|
||||
|
||||
dir := d.dir(id)
|
||||
dir := d.dir2(id, ro)
|
||||
rootIDs := idMappings.RootPair()
|
||||
if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil {
|
||||
return err
|
||||
|
|
@ -204,18 +202,32 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
for i, home := range d.homes {
|
||||
if i > 0 {
|
||||
home = filepath.Join(home, d.String())
|
||||
func (d *Driver) dir2(id string, useImageStore bool) string {
|
||||
var homedir string
|
||||
|
||||
if useImageStore && d.imageStore != "" {
|
||||
homedir = filepath.Join(d.imageStore, d.String(), "dir", filepath.Base(id))
|
||||
} else {
|
||||
homedir = filepath.Join(d.home, "dir", filepath.Base(id))
|
||||
}
|
||||
candidate := filepath.Join(home, "dir", filepath.Base(id))
|
||||
if _, err := os.Stat(homedir); err != nil {
|
||||
additionalHomes := d.additionalHomes[:]
|
||||
if d.imageStore != "" {
|
||||
additionalHomes = append(additionalHomes, d.imageStore)
|
||||
}
|
||||
for _, home := range additionalHomes {
|
||||
candidate := filepath.Join(home, d.String(), "dir", filepath.Base(id))
|
||||
fi, err := os.Stat(candidate)
|
||||
if err == nil && fi.IsDir() {
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
return filepath.Join(d.homes[0], "dir", filepath.Base(id))
|
||||
}
|
||||
return homedir
|
||||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
return d.dir2(id, false)
|
||||
}
|
||||
|
||||
// Remove deletes the content from the directory for a given id.
|
||||
|
|
@ -263,7 +275,7 @@ func (d *Driver) Exists(id string) bool {
|
|||
|
||||
// List layers (not including additional image stores)
|
||||
func (d *Driver) ListLayers() ([]string, error) {
|
||||
entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir"))
|
||||
entries, err := os.ReadDir(filepath.Join(d.home, "dir"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -285,8 +297,8 @@ func (d *Driver) ListLayers() ([]string, error) {
|
|||
|
||||
// AdditionalImageStores returns additional image stores supported by the driver
|
||||
func (d *Driver) AdditionalImageStores() []string {
|
||||
if len(d.homes) > 1 {
|
||||
return d.homes[1:]
|
||||
if len(d.additionalHomes) > 0 {
|
||||
return d.additionalHomes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -334,10 +334,71 @@ type rwLayerStore interface {
|
|||
GarbageCollect() error
|
||||
}
|
||||
|
||||
type multipleLockFile struct {
|
||||
lockfiles []*lockfile.LockFile
|
||||
}
|
||||
|
||||
func (l multipleLockFile) Lock() {
|
||||
for _, lock := range l.lockfiles {
|
||||
lock.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
func (l multipleLockFile) RLock() {
|
||||
for _, lock := range l.lockfiles {
|
||||
lock.RLock()
|
||||
}
|
||||
}
|
||||
|
||||
func (l multipleLockFile) Unlock() {
|
||||
for _, lock := range l.lockfiles {
|
||||
lock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (l multipleLockFile) ModifiedSince(lastWrite lockfile.LastWrite) (lockfile.LastWrite, bool, error) {
|
||||
// Look up only the first lockfile, since this is the value returned by RecordWrite().
|
||||
return l.lockfiles[0].ModifiedSince(lastWrite)
|
||||
}
|
||||
|
||||
func (l multipleLockFile) AssertLockedForWriting() {
|
||||
for _, lock := range l.lockfiles {
|
||||
lock.AssertLockedForWriting()
|
||||
}
|
||||
}
|
||||
|
||||
func (l multipleLockFile) GetLastWrite() (lockfile.LastWrite, error) {
|
||||
return l.lockfiles[0].GetLastWrite()
|
||||
}
|
||||
|
||||
func (l multipleLockFile) RecordWrite() (lockfile.LastWrite, error) {
|
||||
var lastWrite *lockfile.LastWrite
|
||||
for _, lock := range l.lockfiles {
|
||||
lw, err := lock.RecordWrite()
|
||||
if err != nil {
|
||||
return lw, err
|
||||
}
|
||||
// Return the first value we get so we know that
|
||||
// all the locks have a write time >= to this one.
|
||||
if lastWrite == nil {
|
||||
lastWrite = &lw
|
||||
}
|
||||
}
|
||||
return *lastWrite, nil
|
||||
}
|
||||
|
||||
func (l multipleLockFile) IsReadWrite() bool {
|
||||
return l.lockfiles[0].IsReadWrite()
|
||||
}
|
||||
|
||||
func newMultipleLockFile(l ...*lockfile.LockFile) *multipleLockFile {
|
||||
return &multipleLockFile{lockfiles: l}
|
||||
}
|
||||
|
||||
type layerStore struct {
|
||||
// The following fields are only set when constructing layerStore, and must never be modified afterwards.
|
||||
// They are safe to access without any other locking.
|
||||
lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
|
||||
lockfile *multipleLockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
|
||||
mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held.
|
||||
rundir string
|
||||
jsonPath [numLayerLocationIndex]string
|
||||
|
|
@ -1023,22 +1084,37 @@ func (r *layerStore) saveMounts() error {
|
|||
return r.loadMounts()
|
||||
}
|
||||
|
||||
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
|
||||
func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
|
||||
if err := os.MkdirAll(rundir, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := os.MkdirAll(layerdir, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if imagedir != "" {
|
||||
if err := os.MkdirAll(imagedir, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Note: While the containers.lock file is in rundir for transient stores
|
||||
// we don't want to do this here, because the non-transient layers in
|
||||
// layers.json might be used externally as a read-only layer (using e.g.
|
||||
// additionalimagestores), and that would look for the lockfile in the
|
||||
// same directory
|
||||
var lockFiles []*lockfile.LockFile
|
||||
lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockFiles = append(lockFiles, lockFile)
|
||||
if imagedir != "" {
|
||||
lockFile, err := lockfile.GetLockFile(filepath.Join(imagedir, "layers.lock"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockFiles = append(lockFiles, lockFile)
|
||||
}
|
||||
|
||||
mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -1048,7 +1124,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
|
|||
volatileDir = rundir
|
||||
}
|
||||
rlstore := layerStore{
|
||||
lockfile: lockFile,
|
||||
lockfile: newMultipleLockFile(lockFiles...),
|
||||
mountsLockfile: mountsLockfile,
|
||||
rundir: rundir,
|
||||
jsonPath: [numLayerLocationIndex]string{
|
||||
|
|
@ -1085,7 +1161,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
|
|||
return nil, err
|
||||
}
|
||||
rlstore := layerStore{
|
||||
lockfile: lockfile,
|
||||
lockfile: newMultipleLockFile(lockfile),
|
||||
mountsLockfile: nil,
|
||||
rundir: rundir,
|
||||
jsonPath: [numLayerLocationIndex]string{
|
||||
|
|
|
|||
|
|
@ -416,7 +416,7 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
|
|||
return err
|
||||
}
|
||||
for _, key := range xattrs {
|
||||
if strings.HasPrefix(key, "user.") {
|
||||
if strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "user.overlay.") {
|
||||
value, err := system.Lgetxattr(path, key)
|
||||
if err != nil {
|
||||
if errors.Is(err, system.E2BIG) {
|
||||
|
|
|
|||
|
|
@ -41,6 +41,7 @@ import (
|
|||
|
||||
const (
|
||||
maxNumberMissingChunks = 1024
|
||||
autoMergePartsThreshold = 128 // if the gap between two ranges is below this threshold, automatically merge them.
|
||||
newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY)
|
||||
containersOverrideXattr = "user.containers.override_stat"
|
||||
bigDataKey = "zstd-chunked-manifest"
|
||||
|
|
@ -1180,22 +1181,12 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
|
|||
}
|
||||
|
||||
func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
|
||||
getGap := func(missingParts []missingPart, i int) int {
|
||||
getGap := func(missingParts []missingPart, i int) uint64 {
|
||||
prev := missingParts[i-1].SourceChunk.Offset + missingParts[i-1].SourceChunk.Length
|
||||
return int(missingParts[i].SourceChunk.Offset - prev)
|
||||
}
|
||||
getCost := func(missingParts []missingPart, i int) int {
|
||||
cost := getGap(missingParts, i)
|
||||
if missingParts[i-1].OriginFile != nil {
|
||||
cost += int(missingParts[i-1].SourceChunk.Length)
|
||||
}
|
||||
if missingParts[i].OriginFile != nil {
|
||||
cost += int(missingParts[i].SourceChunk.Length)
|
||||
}
|
||||
return cost
|
||||
return missingParts[i].SourceChunk.Offset - prev
|
||||
}
|
||||
|
||||
// simple case: merge chunks from the same file.
|
||||
// simple case: merge chunks from the same file. Useful to reduce the number of parts to work with later.
|
||||
newMissingParts := missingParts[0:1]
|
||||
prevIndex := 0
|
||||
for i := 1; i < len(missingParts); i++ {
|
||||
|
|
@ -1215,28 +1206,50 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
|
|||
}
|
||||
missingParts = newMissingParts
|
||||
|
||||
if len(missingParts) <= target {
|
||||
return missingParts
|
||||
type gap struct {
|
||||
from int
|
||||
to int
|
||||
cost uint64
|
||||
}
|
||||
|
||||
// this implementation doesn't account for duplicates, so it could merge
|
||||
// more than necessary to reach the specified target. Since target itself
|
||||
// is a heuristic value, it doesn't matter.
|
||||
costs := make([]int, len(missingParts)-1)
|
||||
for i := 1; i < len(missingParts); i++ {
|
||||
costs[i-1] = getCost(missingParts, i)
|
||||
var requestGaps []gap
|
||||
lastOffset := int(-1)
|
||||
numberSourceChunks := 0
|
||||
for i, c := range missingParts {
|
||||
if c.OriginFile != nil || c.Hole {
|
||||
// it does not require a network request
|
||||
continue
|
||||
}
|
||||
sort.Ints(costs)
|
||||
|
||||
toShrink := len(missingParts) - target
|
||||
if toShrink >= len(costs) {
|
||||
toShrink = len(costs) - 1
|
||||
numberSourceChunks++
|
||||
if lastOffset >= 0 {
|
||||
prevEnd := missingParts[lastOffset].SourceChunk.Offset + missingParts[lastOffset].SourceChunk.Length
|
||||
cost := c.SourceChunk.Offset - prevEnd
|
||||
g := gap{
|
||||
from: lastOffset,
|
||||
to: i,
|
||||
cost: cost,
|
||||
}
|
||||
requestGaps = append(requestGaps, g)
|
||||
}
|
||||
lastOffset = i
|
||||
}
|
||||
sort.Slice(requestGaps, func(i, j int) bool {
|
||||
return requestGaps[i].cost < requestGaps[j].cost
|
||||
})
|
||||
toMergeMap := make([]bool, len(missingParts))
|
||||
remainingToMerge := numberSourceChunks - target
|
||||
for _, g := range requestGaps {
|
||||
if remainingToMerge < 0 && g.cost > autoMergePartsThreshold {
|
||||
continue
|
||||
}
|
||||
for i := g.from + 1; i <= g.to; i++ {
|
||||
toMergeMap[i] = true
|
||||
}
|
||||
remainingToMerge--
|
||||
}
|
||||
targetValue := costs[toShrink]
|
||||
|
||||
newMissingParts = missingParts[0:1]
|
||||
for i := 1; i < len(missingParts); i++ {
|
||||
if getCost(missingParts, i) > targetValue {
|
||||
if !toMergeMap[i] {
|
||||
newMissingParts = append(newMissingParts, missingParts[i])
|
||||
} else {
|
||||
gap := getGap(missingParts, i)
|
||||
|
|
@ -1268,6 +1281,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
|
|||
}
|
||||
}
|
||||
|
||||
missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
|
||||
calculateChunksToRequest()
|
||||
|
||||
// There are some missing files. Prepare a multirange request for the missing chunks.
|
||||
|
|
@ -1281,14 +1295,13 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
|
|||
}
|
||||
|
||||
if _, ok := err.(ErrBadRequest); ok {
|
||||
requested := len(missingParts)
|
||||
// If the server cannot handle at least 64 chunks in a single request, just give up.
|
||||
if requested < 64 {
|
||||
if len(chunksToRequest) < 64 {
|
||||
return err
|
||||
}
|
||||
|
||||
// Merge more chunks to request
|
||||
missingParts = mergeMissingChunks(missingParts, requested/2)
|
||||
missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2)
|
||||
calculateChunksToRequest()
|
||||
continue
|
||||
}
|
||||
|
|
@ -1999,7 +2012,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}
|
||||
// There are some missing files. Prepare a multirange request for the missing chunks.
|
||||
if len(missingParts) > 0 {
|
||||
missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks)
|
||||
if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil {
|
||||
return output, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -972,11 +972,13 @@ func (s *store) load() error {
|
|||
if err := os.MkdirAll(gipath, 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
ris, err := newImageStore(gipath)
|
||||
imageStore, err := newImageStore(gipath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.imageStore = ris
|
||||
s.imageStore = imageStore
|
||||
|
||||
s.rwImageStores = []rwImageStore{imageStore}
|
||||
|
||||
gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
|
||||
if err := os.MkdirAll(gcpath, 0o700); err != nil {
|
||||
|
|
@ -994,13 +996,16 @@ func (s *store) load() error {
|
|||
|
||||
s.containerStore = rcs
|
||||
|
||||
for _, store := range driver.AdditionalImageStores() {
|
||||
additionalImageStores := s.graphDriver.AdditionalImageStores()
|
||||
if s.imageStoreDir != "" {
|
||||
additionalImageStores = append([]string{s.graphRoot}, additionalImageStores...)
|
||||
}
|
||||
|
||||
for _, store := range additionalImageStores {
|
||||
gipath := filepath.Join(store, driverPrefix+"images")
|
||||
var ris roImageStore
|
||||
if s.imageStoreDir != "" && store == s.graphRoot {
|
||||
// If --imagestore was set and current store
|
||||
// is `graphRoot` then mount it as a `rw` additional
|
||||
// store instead of `readonly` additional store.
|
||||
// both the graphdriver and the imagestore must be used read-write.
|
||||
if store == s.imageStoreDir || store == s.graphRoot {
|
||||
imageStore, err := newImageStore(gipath)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -1085,15 +1090,9 @@ func (s *store) stopUsingGraphDriver() {
|
|||
// Almost all users should use startUsingGraphDriver instead.
|
||||
// The caller must hold s.graphLock.
|
||||
func (s *store) createGraphDriverLocked() (drivers.Driver, error) {
|
||||
driverRoot := s.imageStoreDir
|
||||
imageStoreBase := s.graphRoot
|
||||
if driverRoot == "" {
|
||||
driverRoot = s.graphRoot
|
||||
imageStoreBase = ""
|
||||
}
|
||||
config := drivers.Options{
|
||||
Root: driverRoot,
|
||||
ImageStore: imageStoreBase,
|
||||
Root: s.graphRoot,
|
||||
ImageStore: s.imageStoreDir,
|
||||
RunRoot: s.runRoot,
|
||||
DriverPriority: s.graphDriverPriority,
|
||||
DriverOptions: s.graphOptions,
|
||||
|
|
@ -1123,15 +1122,15 @@ func (s *store) getLayerStoreLocked() (rwLayerStore, error) {
|
|||
if err := os.MkdirAll(rlpath, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imgStoreRoot := s.imageStoreDir
|
||||
if imgStoreRoot == "" {
|
||||
imgStoreRoot = s.graphRoot
|
||||
}
|
||||
glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers")
|
||||
glpath := filepath.Join(s.graphRoot, driverPrefix+"layers")
|
||||
if err := os.MkdirAll(glpath, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore)
|
||||
ilpath := ""
|
||||
if s.imageStoreDir != "" {
|
||||
ilpath = filepath.Join(s.imageStoreDir, driverPrefix+"layers")
|
||||
}
|
||||
rls, err := s.newLayerStore(rlpath, glpath, ilpath, s.graphDriver, s.transientStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -1162,8 +1161,10 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) {
|
|||
if err := os.MkdirAll(rlpath, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, store := range s.graphDriver.AdditionalImageStores() {
|
||||
glpath := filepath.Join(store, driverPrefix+"layers")
|
||||
|
||||
rls, err := newROLayerStore(rlpath, glpath, s.graphDriver)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -2590,7 +2591,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
|
|||
if err := s.writeToAllStores(func(rlstore rwLayerStore) error {
|
||||
// Delete image from all available imagestores configured to be used.
|
||||
imageFound := false
|
||||
for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) {
|
||||
for _, is := range s.rwImageStores {
|
||||
if is != s.imageStore {
|
||||
// This is an additional writeable image store
|
||||
// so we must perform lock
|
||||
|
|
|
|||
|
|
@ -1,10 +0,0 @@
|
|||
Serious about security
|
||||
======================
|
||||
|
||||
Square recognizes the important contributions the security research community
|
||||
can make. We therefore encourage reporting security issues with the code
|
||||
contained in this repository.
|
||||
|
||||
If you believe you have discovered a security vulnerability, please follow the
|
||||
guidelines at <https://bugcrowd.com/squareopensource>.
|
||||
|
||||
|
|
@ -1,6 +1,23 @@
|
|||
# v3.0.2
|
||||
|
||||
## Fixed
|
||||
|
||||
- DecryptMulti: handle decompression error (#19)
|
||||
|
||||
## Changed
|
||||
|
||||
- jwe/CompactSerialize: improve performance (#67)
|
||||
- Increase the default number of PBKDF2 iterations to 600k (#48)
|
||||
- Return the proper algorithm for ECDSA keys (#45)
|
||||
|
||||
## Added
|
||||
|
||||
- Add Thumbprint support for opaque signers (#38)
|
||||
|
||||
# v3.0.1
|
||||
|
||||
Fixed:
|
||||
## Fixed
|
||||
|
||||
- Security issue: an attacker specifying a large "p2c" value can cause
|
||||
JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large
|
||||
amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the
|
||||
|
|
|
|||
|
|
@ -1,15 +1,18 @@
|
|||
# Go JOSE
|
||||
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
|
||||
[](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
|
||||
[](https://travis-ci.org/go-jose/go-jose)
|
||||
[](https://coveralls.io/r/go-jose/go-jose)
|
||||
[](https://pkg.go.dev/github.com/go-jose/go-jose/v3)
|
||||
[](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt)
|
||||
[](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
|
||||
[](https://github.com/go-jose/go-jose/actions)
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. This includes support for JSON Web Encryption,
|
||||
JSON Web Signature, and JSON Web Token standards.
|
||||
|
||||
**Help Wanted!** If you'd like to help us develop this library please reach
|
||||
out to css (at) css.bio. While I'm still working on keeping this maintained,
|
||||
I have limited time for in-depth development and could use some additional help.
|
||||
|
||||
**Disclaimer**: This library contains encryption software that is subject to
|
||||
the U.S. Export Administration Regulations. You may not export, re-export,
|
||||
transfer or download this code or any part of it in violation of any United
|
||||
|
|
@ -21,13 +24,13 @@ US maintained blocked list.
|
|||
## Overview
|
||||
|
||||
The implementation follows the
|
||||
[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
|
||||
[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
|
||||
[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
|
||||
[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516),
|
||||
[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
|
||||
[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
|
||||
Tables of supported algorithms are shown below. The library supports both
|
||||
the compact and JWS/JWE JSON Serialization formats, and has optional support for
|
||||
multiple recipients. It also comes with a small command-line utility
|
||||
([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util))
|
||||
([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util))
|
||||
for dealing with JOSE messages in a shell.
|
||||
|
||||
**Note**: We use a forked version of the `encoding/json` package from the Go
|
||||
|
|
@ -38,29 +41,19 @@ libraries in other languages.
|
|||
|
||||
### Versions
|
||||
|
||||
[Version 2](https://gopkg.in/go-jose/go-jose.v2)
|
||||
([branch](https://github.com/go-jose/go-jose/tree/v2),
|
||||
[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version:
|
||||
|
||||
import "gopkg.in/go-jose/go-jose.v2"
|
||||
|
||||
[Version 3](https://github.com/go-jose/go-jose)
|
||||
([branch](https://github.com/go-jose/go-jose/tree/master),
|
||||
[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet):
|
||||
([branch](https://github.com/go-jose/go-jose/tree/v3),
|
||||
[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v3), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version:
|
||||
|
||||
import "github.com/go-jose/go-jose/v3"
|
||||
|
||||
All new feature development takes place on the `master` branch, which we are
|
||||
preparing to release as version 3 soon. Version 2 will continue to receive
|
||||
critical bug and security fixes. Note that starting with version 3 we are
|
||||
using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher.
|
||||
|
||||
Version 1 (on the `v1` branch) is frozen and not supported anymore.
|
||||
The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which
|
||||
are still useable but not actively developed anymore.
|
||||
|
||||
### Supported algorithms
|
||||
|
||||
See below for a table of supported algorithms. Algorithm identifiers match
|
||||
the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
|
||||
the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518)
|
||||
standard where possible. The Godoc reference has a list of constants.
|
||||
|
||||
Key encryption | Algorithm identifier(s)
|
||||
|
|
@ -103,20 +96,20 @@ allows attaching a key id.
|
|||
|
||||
Algorithm(s) | Corresponding types
|
||||
:------------------------- | -------------------------------
|
||||
RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
|
||||
ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
|
||||
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey)
|
||||
RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey)
|
||||
ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey)
|
||||
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey)
|
||||
AES, HMAC | []byte
|
||||
|
||||
<sup>1. Only available in version 2 or later of the package</sup>
|
||||
|
||||
## Examples
|
||||
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
|
||||
[](https://pkg.go.dev/github.com/go-jose/go-jose/v3)
|
||||
[](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt)
|
||||
|
||||
Examples can be found in the Godoc
|
||||
reference for this package. The
|
||||
[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)
|
||||
[`jose-util`](https://github.com/go-jose/go-jose/tree/v3/jose-util)
|
||||
subdirectory also contains a small command-line utility which might be useful
|
||||
as an example as well.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,13 @@
|
|||
# Security Policy
|
||||
This document explains how to contact the Let's Encrypt security team to report security vulnerabilities.
|
||||
|
||||
## Supported Versions
|
||||
| Version | Supported |
|
||||
| ------- | ----------|
|
||||
| >= v3 | ✓ |
|
||||
| v2 | ✗ |
|
||||
| v1 | ✗ |
|
||||
|
||||
## Reporting a vulnerability
|
||||
|
||||
Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email.
|
||||
|
|
@ -285,6 +285,9 @@ func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm
|
|||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
// TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the
|
||||
// random parameter is legacy and ignored, and it can be nil.
|
||||
// https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1
|
||||
out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
|
||||
case PS256, PS384, PS512:
|
||||
out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import (
|
|||
"crypto/rsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
|
@ -76,14 +75,24 @@ type recipientKeyInfo struct {
|
|||
type EncrypterOptions struct {
|
||||
Compression CompressionAlgorithm
|
||||
|
||||
// Optional map of additional keys to be inserted into the protected header
|
||||
// of a JWS object. Some specifications which make use of JWS like to insert
|
||||
// additional values here. All values must be JSON-serializable.
|
||||
// Optional map of name/value pairs to be inserted into the protected
|
||||
// header of a JWS object. Some specifications which make use of
|
||||
// JWS require additional values here.
|
||||
//
|
||||
// Values will be serialized by [json.Marshal] and must be valid inputs to
|
||||
// that function.
|
||||
//
|
||||
// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
|
||||
// if necessary. It returns itself and so can be used in a fluent style.
|
||||
// if necessary, and returns the updated EncrypterOptions.
|
||||
//
|
||||
// The v parameter will be serialized by [json.Marshal] and must be a valid
|
||||
// input to that function.
|
||||
//
|
||||
// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
|
||||
func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
|
||||
if eo.ExtraHeaders == nil {
|
||||
eo.ExtraHeaders = map[HeaderKey]interface{}{}
|
||||
|
|
@ -112,6 +121,16 @@ func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
|
|||
// be generated.
|
||||
type Recipient struct {
|
||||
Algorithm KeyAlgorithm
|
||||
// Key must have one of these types:
|
||||
// - ed25519.PublicKey
|
||||
// - *ecdsa.PublicKey
|
||||
// - *rsa.PublicKey
|
||||
// - *JSONWebKey
|
||||
// - JSONWebKey
|
||||
// - []byte (a symmetric key)
|
||||
// - Any type that satisfies the OpaqueKeyEncrypter interface
|
||||
//
|
||||
// The type of Key must match the value of Algorithm.
|
||||
Key interface{}
|
||||
KeyID string
|
||||
PBES2Count int
|
||||
|
|
@ -150,16 +169,17 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions)
|
|||
switch rcpt.Algorithm {
|
||||
case DIRECT:
|
||||
// Direct encryption mode must be treated differently
|
||||
if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
|
||||
keyBytes, ok := rawKey.([]byte)
|
||||
if !ok {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
if encrypter.cipher.keySize() != len(rawKey.([]byte)) {
|
||||
if encrypter.cipher.keySize() != len(keyBytes) {
|
||||
return nil, ErrInvalidKeySize
|
||||
}
|
||||
encrypter.keyGenerator = staticKeyGenerator{
|
||||
key: rawKey.([]byte),
|
||||
key: keyBytes,
|
||||
}
|
||||
recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte))
|
||||
recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes)
|
||||
recipientInfo.keyID = keyID
|
||||
if rcpt.KeyID != "" {
|
||||
recipientInfo.keyID = rcpt.KeyID
|
||||
|
|
@ -168,16 +188,16 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions)
|
|||
return encrypter, nil
|
||||
case ECDH_ES:
|
||||
// ECDH-ES (w/o key wrapping) is similar to DIRECT mode
|
||||
typeOf := reflect.TypeOf(rawKey)
|
||||
if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
|
||||
keyDSA, ok := rawKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
encrypter.keyGenerator = ecKeyGenerator{
|
||||
size: encrypter.cipher.keySize(),
|
||||
algID: string(enc),
|
||||
publicKey: rawKey.(*ecdsa.PublicKey),
|
||||
publicKey: keyDSA,
|
||||
}
|
||||
recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey))
|
||||
recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA)
|
||||
recipientInfo.keyID = keyID
|
||||
if rcpt.KeyID != "" {
|
||||
recipientInfo.keyID = rcpt.KeyID
|
||||
|
|
@ -270,9 +290,8 @@ func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKey
|
|||
recipient, err := makeJWERecipient(alg, encryptionKey.Key)
|
||||
recipient.keyID = encryptionKey.KeyID
|
||||
return recipient, err
|
||||
}
|
||||
if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok {
|
||||
return newOpaqueKeyEncrypter(alg, encrypter)
|
||||
case OpaqueKeyEncrypter:
|
||||
return newOpaqueKeyEncrypter(alg, encryptionKey)
|
||||
}
|
||||
return recipientKeyInfo{}, ErrUnsupportedKeyType
|
||||
}
|
||||
|
|
@ -300,12 +319,12 @@ func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
|
|||
return newDecrypter(decryptionKey.Key)
|
||||
case *JSONWebKey:
|
||||
return newDecrypter(decryptionKey.Key)
|
||||
}
|
||||
if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok {
|
||||
return &opaqueKeyDecrypter{decrypter: okd}, nil
|
||||
}
|
||||
case OpaqueKeyDecrypter:
|
||||
return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil
|
||||
default:
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation of encrypt method producing a JWE object.
|
||||
func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) {
|
||||
|
|
@ -403,9 +422,24 @@ func (ctx *genericEncrypter) Options() EncrypterOptions {
|
|||
}
|
||||
}
|
||||
|
||||
// Decrypt and validate the object and return the plaintext. Note that this
|
||||
// function does not support multi-recipient, if you desire multi-recipient
|
||||
// Decrypt and validate the object and return the plaintext. This
|
||||
// function does not support multi-recipient. If you desire multi-recipient
|
||||
// decryption use DecryptMulti instead.
|
||||
//
|
||||
// The decryptionKey argument must contain a private or symmetric key
|
||||
// and must have one of these types:
|
||||
// - *ecdsa.PrivateKey
|
||||
// - *rsa.PrivateKey
|
||||
// - *JSONWebKey
|
||||
// - JSONWebKey
|
||||
// - *JSONWebKeySet
|
||||
// - JSONWebKeySet
|
||||
// - []byte (a symmetric key)
|
||||
// - string (a symmetric key)
|
||||
// - Any type that satisfies the OpaqueKeyDecrypter interface.
|
||||
//
|
||||
// Note that ed25519 is only available for signatures, not encryption, so is
|
||||
// not an option here.
|
||||
func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
|
||||
headers := obj.mergedHeaders(nil)
|
||||
|
||||
|
|
@ -462,15 +496,21 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
|
|||
// The "zip" header parameter may only be present in the protected header.
|
||||
if comp := obj.protected.getCompression(); comp != "" {
|
||||
plaintext, err = decompress(comp, plaintext)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return plaintext, err
|
||||
return plaintext, nil
|
||||
}
|
||||
|
||||
// DecryptMulti decrypts and validates the object and returns the plaintexts,
|
||||
// with support for multiple recipients. It returns the index of the recipient
|
||||
// for which the decryption was successful, the merged headers for that recipient,
|
||||
// and the plaintext.
|
||||
//
|
||||
// The decryptionKey argument must have one of the types allowed for the
|
||||
// decryptionKey argument of Decrypt().
|
||||
func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
|
||||
globalHeaders := obj.mergedHeaders(nil)
|
||||
|
||||
|
|
@ -532,7 +572,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade
|
|||
|
||||
// The "zip" header parameter may only be present in the protected header.
|
||||
if comp := obj.protected.getCompression(); comp != "" {
|
||||
plaintext, _ = decompress(comp, plaintext)
|
||||
plaintext, err = decompress(comp, plaintext)
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
sanitized, err := headers.sanitized()
|
||||
|
|
|
|||
|
|
@ -15,13 +15,11 @@
|
|||
*/
|
||||
|
||||
/*
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. It implements encryption and signing based on
|
||||
the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web
|
||||
Token support available in a sub-package. The library supports both the compact
|
||||
and JWS/JWE JSON Serialization formats, and has optional support for multiple
|
||||
recipients.
|
||||
|
||||
*/
|
||||
package jose
|
||||
|
|
|
|||
|
|
@ -189,3 +189,36 @@ func base64URLDecode(value string) ([]byte, error) {
|
|||
value = strings.TrimRight(value, "=")
|
||||
return base64.RawURLEncoding.DecodeString(value)
|
||||
}
|
||||
|
||||
func base64EncodeLen(sl []byte) int {
|
||||
return base64.RawURLEncoding.EncodedLen(len(sl))
|
||||
}
|
||||
|
||||
func base64JoinWithDots(inputs ...[]byte) string {
|
||||
if len(inputs) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Count of dots.
|
||||
totalCount := len(inputs) - 1
|
||||
|
||||
for _, input := range inputs {
|
||||
totalCount += base64EncodeLen(input)
|
||||
}
|
||||
|
||||
out := make([]byte, totalCount)
|
||||
startEncode := 0
|
||||
for i, input := range inputs {
|
||||
base64.RawURLEncoding.Encode(out[startEncode:], input)
|
||||
|
||||
if i == len(inputs)-1 {
|
||||
continue
|
||||
}
|
||||
|
||||
startEncode += base64EncodeLen(input)
|
||||
out[startEncode] = '.'
|
||||
startEncode++
|
||||
}
|
||||
|
||||
return string(out)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,14 +75,13 @@ import (
|
|||
//
|
||||
// The JSON null value unmarshals into an interface, map, pointer, or slice
|
||||
// by setting that Go value to nil. Because null is often used in JSON to mean
|
||||
// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
|
||||
// “not present,” unmarshaling a JSON null into any other Go type has no effect
|
||||
// on the value and produces no error.
|
||||
//
|
||||
// When unmarshaling quoted strings, invalid UTF-8 or
|
||||
// invalid UTF-16 surrogate pairs are not treated as an error.
|
||||
// Instead, they are replaced by the Unicode replacement
|
||||
// character U+FFFD.
|
||||
//
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
// Check for well-formedness.
|
||||
// Avoids filling out half a data structure
|
||||
|
|
|
|||
|
|
@ -58,6 +58,7 @@ import (
|
|||
// becomes a member of the object unless
|
||||
// - the field's tag is "-", or
|
||||
// - the field is empty and its tag specifies the "omitempty" option.
|
||||
//
|
||||
// The empty values are false, 0, any
|
||||
// nil pointer or interface value, and any array, slice, map, or string of
|
||||
// length zero. The object's default key string is the struct field name
|
||||
|
|
@ -133,7 +134,6 @@ import (
|
|||
// JSON cannot represent cyclic data structures and Marshal does not
|
||||
// handle them. Passing cyclic structures to Marshal will result in
|
||||
// an infinite recursion.
|
||||
//
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
e := &encodeState{}
|
||||
err := e.marshal(v)
|
||||
|
|
|
|||
|
|
@ -240,7 +240,6 @@ var _ Unmarshaler = (*RawMessage)(nil)
|
|||
// Number, for JSON numbers
|
||||
// string, for JSON string literals
|
||||
// nil, for JSON null
|
||||
//
|
||||
type Token interface{}
|
||||
|
||||
const (
|
||||
|
|
|
|||
|
|
@ -252,13 +252,13 @@ func (obj JSONWebEncryption) CompactSerialize() (string, error) {
|
|||
|
||||
serializedProtected := mustSerializeJSON(obj.protected)
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%s.%s.%s.%s.%s",
|
||||
base64.RawURLEncoding.EncodeToString(serializedProtected),
|
||||
base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey),
|
||||
base64.RawURLEncoding.EncodeToString(obj.iv),
|
||||
base64.RawURLEncoding.EncodeToString(obj.ciphertext),
|
||||
base64.RawURLEncoding.EncodeToString(obj.tag)), nil
|
||||
return base64JoinWithDots(
|
||||
serializedProtected,
|
||||
obj.recipients[0].encryptedKey,
|
||||
obj.iv,
|
||||
obj.ciphertext,
|
||||
obj.tag,
|
||||
), nil
|
||||
}
|
||||
|
||||
// FullSerialize serializes an object using the full JSON serialization format.
|
||||
|
|
|
|||
|
|
@ -67,9 +67,21 @@ type rawJSONWebKey struct {
|
|||
X5tSHA256 string `json:"x5t#S256,omitempty"`
|
||||
}
|
||||
|
||||
// JSONWebKey represents a public or private key in JWK format.
|
||||
// JSONWebKey represents a public or private key in JWK format. It can be
|
||||
// marshaled into JSON and unmarshaled from JSON.
|
||||
type JSONWebKey struct {
|
||||
// Cryptographic key, can be a symmetric or asymmetric key.
|
||||
// Key is the Go in-memory representation of this key. It must have one
|
||||
// of these types:
|
||||
// - ed25519.PublicKey
|
||||
// - ed25519.PrivateKey
|
||||
// - *ecdsa.PublicKey
|
||||
// - *ecdsa.PrivateKey
|
||||
// - *rsa.PublicKey
|
||||
// - *rsa.PrivateKey
|
||||
// - []byte (a symmetric key)
|
||||
//
|
||||
// When marshaling this JSONWebKey into JSON, the "kty" header parameter
|
||||
// will be automatically set based on the type of this field.
|
||||
Key interface{}
|
||||
// Key identifier, parsed from `kid` header.
|
||||
KeyID string
|
||||
|
|
@ -389,6 +401,8 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
|
|||
input, err = rsaThumbprintInput(key.N, key.E)
|
||||
case ed25519.PrivateKey:
|
||||
input, err = edThumbprintInput(ed25519.PublicKey(key[32:]))
|
||||
case OpaqueSigner:
|
||||
return key.Public().Thumbprint(hash)
|
||||
default:
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -314,15 +314,18 @@ func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) {
|
|||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
serializedProtected := base64.RawURLEncoding.EncodeToString(mustSerializeJSON(obj.Signatures[0].protected))
|
||||
payload := ""
|
||||
signature := base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature)
|
||||
serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
|
||||
|
||||
var payload []byte
|
||||
if !detached {
|
||||
payload = base64.RawURLEncoding.EncodeToString(obj.payload)
|
||||
payload = obj.payload
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s.%s.%s", serializedProtected, payload, signature), nil
|
||||
return base64JoinWithDots(
|
||||
serializedProtected,
|
||||
payload,
|
||||
obj.Signatures[0].Signature,
|
||||
), nil
|
||||
}
|
||||
|
||||
// CompactSerialize serializes an object using the compact serialization format.
|
||||
|
|
|
|||
|
|
@ -183,8 +183,13 @@ type Header struct {
|
|||
// Unverified certificate chain parsed from x5c header.
|
||||
certificates []*x509.Certificate
|
||||
|
||||
// Any headers not recognised above get unmarshalled
|
||||
// from JSON in a generic manner and placed in this map.
|
||||
// At parse time, each header parameter with a name other than "kid",
|
||||
// "jwk", "alg", "nonce", or "x5c" will have its value passed to
|
||||
// [json.Unmarshal] to unmarshal it into an interface value.
|
||||
// The resulting value will be stored in this map, with the header
|
||||
// parameter name as the key.
|
||||
//
|
||||
// [json.Unmarshal]: https://pkg.go.dev/encoding/json#Unmarshal
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -40,6 +40,15 @@ type Signer interface {
|
|||
}
|
||||
|
||||
// SigningKey represents an algorithm/key used to sign a message.
|
||||
//
|
||||
// Key must have one of these types:
|
||||
// - ed25519.PrivateKey
|
||||
// - *ecdsa.PrivateKey
|
||||
// - *rsa.PrivateKey
|
||||
// - *JSONWebKey
|
||||
// - JSONWebKey
|
||||
// - []byte (an HMAC key)
|
||||
// - Any type that satisfies the OpaqueSigner interface
|
||||
type SigningKey struct {
|
||||
Algorithm SignatureAlgorithm
|
||||
Key interface{}
|
||||
|
|
@ -52,12 +61,22 @@ type SignerOptions struct {
|
|||
|
||||
// Optional map of additional keys to be inserted into the protected header
|
||||
// of a JWS object. Some specifications which make use of JWS like to insert
|
||||
// additional values here. All values must be JSON-serializable.
|
||||
// additional values here.
|
||||
//
|
||||
// Values will be serialized by [json.Marshal] and must be valid inputs to
|
||||
// that function.
|
||||
//
|
||||
// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
|
||||
// if necessary. It returns itself and so can be used in a fluent style.
|
||||
// if necessary, and returns the updated SignerOptions.
|
||||
//
|
||||
// The v argument will be serialized by [json.Marshal] and must be a valid
|
||||
// input to that function.
|
||||
//
|
||||
// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
|
||||
func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions {
|
||||
if so.ExtraHeaders == nil {
|
||||
so.ExtraHeaders = map[HeaderKey]interface{}{}
|
||||
|
|
@ -173,12 +192,12 @@ func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
|
|||
return newVerifier(verificationKey.Key)
|
||||
case *JSONWebKey:
|
||||
return newVerifier(verificationKey.Key)
|
||||
}
|
||||
if ov, ok := verificationKey.(OpaqueVerifier); ok {
|
||||
return &opaqueVerifier{verifier: ov}, nil
|
||||
}
|
||||
case OpaqueVerifier:
|
||||
return &opaqueVerifier{verifier: verificationKey}, nil
|
||||
default:
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
|
||||
recipient, err := makeJWSRecipient(alg, signingKey)
|
||||
|
|
@ -204,12 +223,12 @@ func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipient
|
|||
return newJWKSigner(alg, signingKey)
|
||||
case *JSONWebKey:
|
||||
return newJWKSigner(alg, *signingKey)
|
||||
}
|
||||
if signer, ok := signingKey.(OpaqueSigner); ok {
|
||||
return newOpaqueSigner(alg, signer)
|
||||
}
|
||||
case OpaqueSigner:
|
||||
return newOpaqueSigner(alg, signingKey)
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedKeyType
|
||||
}
|
||||
}
|
||||
|
||||
func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) {
|
||||
recipient, err := makeJWSRecipient(alg, signingKey.Key)
|
||||
|
|
@ -321,12 +340,21 @@ func (ctx *genericSigner) Options() SignerOptions {
|
|||
}
|
||||
|
||||
// Verify validates the signature on the object and returns the payload.
|
||||
// This function does not support multi-signature, if you desire multi-sig
|
||||
// This function does not support multi-signature. If you desire multi-signature
|
||||
// verification use VerifyMulti instead.
|
||||
//
|
||||
// Be careful when verifying signatures based on embedded JWKs inside the
|
||||
// payload header. You cannot assume that the key received in a payload is
|
||||
// trusted.
|
||||
//
|
||||
// The verificationKey argument must have one of these types:
|
||||
// - ed25519.PublicKey
|
||||
// - *ecdsa.PublicKey
|
||||
// - *rsa.PublicKey
|
||||
// - *JSONWebKey
|
||||
// - JSONWebKey
|
||||
// - []byte (an HMAC key)
|
||||
// - Any type that implements the OpaqueVerifier interface.
|
||||
func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
|
||||
err := obj.DetachedVerify(obj.payload, verificationKey)
|
||||
if err != nil {
|
||||
|
|
@ -346,6 +374,9 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
|
|||
// most cases, you will probably want to use Verify instead. DetachedVerify
|
||||
// is only useful if you have a payload and signature that are separated from
|
||||
// each other.
|
||||
//
|
||||
// The verificationKey argument must have one of the types allowed for the
|
||||
// verificationKey argument of JSONWebSignature.Verify().
|
||||
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
verifier, err := newVerifier(key)
|
||||
|
|
@ -388,6 +419,9 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter
|
|||
// returns the index of the signature that was verified, along with the signature
|
||||
// object and the payload. We return the signature and index to guarantee that
|
||||
// callers are getting the verified value.
|
||||
//
|
||||
// The verificationKey argument must have one of the types allowed for the
|
||||
// verificationKey argument of JSONWebSignature.Verify().
|
||||
func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
|
||||
idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey)
|
||||
if err != nil {
|
||||
|
|
@ -405,6 +439,9 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa
|
|||
// DetachedVerifyMulti is only useful if you have a payload and signature that are
|
||||
// separated from each other, and the signature can have multiple signers at the
|
||||
// same time.
|
||||
//
|
||||
// The verificationKey argument must have one of the types allowed for the
|
||||
// verificationKey argument of JSONWebSignature.Verify().
|
||||
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
verifier, err := newVerifier(key)
|
||||
|
|
|
|||
|
|
@ -40,12 +40,17 @@ var RandReader = rand.Reader
|
|||
|
||||
const (
|
||||
// RFC7518 recommends a minimum of 1,000 iterations:
|
||||
// https://tools.ietf.org/html/rfc7518#section-4.8.1.2
|
||||
// - https://tools.ietf.org/html/rfc7518#section-4.8.1.2
|
||||
//
|
||||
// NIST recommends a minimum of 10,000:
|
||||
// https://pages.nist.gov/800-63-3/sp800-63b.html
|
||||
// 1Password uses 100,000:
|
||||
// https://support.1password.com/pbkdf2/
|
||||
defaultP2C = 100000
|
||||
// - https://pages.nist.gov/800-63-3/sp800-63b.html
|
||||
//
|
||||
// 1Password increased in 2023 from 100,000 to 650,000:
|
||||
// - https://support.1password.com/pbkdf2/
|
||||
//
|
||||
// OWASP recommended 600,000 in Dec 2022:
|
||||
// - https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2
|
||||
defaultP2C = 600000
|
||||
// Default salt size: 128 bits
|
||||
defaultP2SSize = 16
|
||||
)
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ package strfmt
|
|||
|
||||
import (
|
||||
"encoding"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
|
@ -117,7 +118,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc {
|
|||
case "datetime":
|
||||
input := data
|
||||
if len(input) == 0 {
|
||||
return nil, fmt.Errorf("empty string is an invalid datetime format")
|
||||
return nil, stderrors.New("empty string is an invalid datetime format")
|
||||
}
|
||||
return ParseDateTime(input)
|
||||
case "duration":
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
|
|||
func (m *indexOfInitialisms) sorted() (result []string) {
|
||||
m.sortMutex.Lock()
|
||||
defer m.sortMutex.Unlock()
|
||||
m.index.Range(func(key, value interface{}) bool {
|
||||
m.index.Range(func(key, _ interface{}) bool {
|
||||
k := key.(string)
|
||||
result = append(result, k)
|
||||
return true
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ package swag
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
|
@ -50,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
|
|||
return nil, err
|
||||
}
|
||||
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
|
||||
return nil, fmt.Errorf("only YAML documents that are objects are supported")
|
||||
return nil, errors.New("only YAML documents that are objects are supported")
|
||||
}
|
||||
return &document, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,18 @@
|
|||
# Changelog
|
||||
|
||||
## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
|
||||
* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
|
||||
|
||||
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,12 @@ var (
|
|||
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
|
||||
Nil UUID // empty UUID, all zeros
|
||||
|
||||
// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
|
||||
Max = UUID{
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
}
|
||||
)
|
||||
|
||||
// NewHash returns a new UUID derived from the hash of space concatenated with
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ func makeV7(uuid []byte) {
|
|||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| unix_ts_ms |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| unix_ts_ms | ver | rand_a |
|
||||
| unix_ts_ms | ver | rand_a (12 bit seq) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|var| rand_b |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|
|
@ -61,7 +61,7 @@ func makeV7(uuid []byte) {
|
|||
*/
|
||||
_ = uuid[15] // bounds check
|
||||
|
||||
t := timeNow().UnixMilli()
|
||||
t, s := getV7Time()
|
||||
|
||||
uuid[0] = byte(t >> 40)
|
||||
uuid[1] = byte(t >> 32)
|
||||
|
|
@ -70,6 +70,35 @@ func makeV7(uuid []byte) {
|
|||
uuid[4] = byte(t >> 8)
|
||||
uuid[5] = byte(t)
|
||||
|
||||
uuid[6] = 0x70 | (uuid[6] & 0x0F)
|
||||
// uuid[8] has already has right version
|
||||
uuid[6] = 0x70 | (0x0F & byte(s>>8))
|
||||
uuid[7] = byte(s)
|
||||
}
|
||||
|
||||
// lastV7time is the last time we returned stored as:
|
||||
//
|
||||
// 52 bits of time in milliseconds since epoch
|
||||
// 12 bits of (fractional nanoseconds) >> 8
|
||||
var lastV7time int64
|
||||
|
||||
const nanoPerMilli = 1000000
|
||||
|
||||
// getV7Time returns the time in milliseconds and nanoseconds / 256.
|
||||
// The returned (milli << 12 + seq) is guarenteed to be greater than
|
||||
// (milli << 12 + seq) returned by any previous call to getV7Time.
|
||||
func getV7Time() (milli, seq int64) {
|
||||
timeMu.Lock()
|
||||
defer timeMu.Unlock()
|
||||
|
||||
nano := timeNow().UnixNano()
|
||||
milli = nano / nanoPerMilli
|
||||
// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
|
||||
seq = (nano - milli*nanoPerMilli) >> 8
|
||||
now := milli<<12 + seq
|
||||
if now <= lastV7time {
|
||||
now = lastV7time + 1
|
||||
milli = now >> 12
|
||||
seq = now & 0xfff
|
||||
}
|
||||
lastV7time = now
|
||||
return milli, seq
|
||||
}
|
||||
|
|
|
|||
211
common/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go
generated
vendored
Normal file
211
common/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go
generated
vendored
Normal file
|
|
@ -0,0 +1,211 @@
|
|||
//
|
||||
// Copyright 2024 The Sigstore Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/sigstore/sigstore/pkg/signature/options"
|
||||
)
|
||||
|
||||
var ed25519phSupportedHashFuncs = []crypto.Hash{
|
||||
crypto.SHA512,
|
||||
}
|
||||
|
||||
// ED25519phSigner is a signature.Signer that uses the Ed25519 public-key signature system with pre-hashing
|
||||
type ED25519phSigner struct {
|
||||
priv ed25519.PrivateKey
|
||||
}
|
||||
|
||||
// LoadED25519phSigner calculates signatures using the specified private key.
|
||||
func LoadED25519phSigner(priv ed25519.PrivateKey) (*ED25519phSigner, error) {
|
||||
if priv == nil {
|
||||
return nil, errors.New("invalid ED25519 private key specified")
|
||||
}
|
||||
|
||||
return &ED25519phSigner{
|
||||
priv: priv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToED25519SignerVerifier creates a ED25519SignerVerifier from a ED25519phSignerVerifier
|
||||
//
|
||||
// Clients that use ED25519phSignerVerifier should use this method to get a
|
||||
// SignerVerifier that uses the same ED25519 private key, but with the Pure
|
||||
// Ed25519 algorithm. This might be necessary to interact with Fulcio, which
|
||||
// only supports the Pure Ed25519 algorithm.
|
||||
func (e ED25519phSignerVerifier) ToED25519SignerVerifier() (*ED25519SignerVerifier, error) {
|
||||
return LoadED25519SignerVerifier(e.priv)
|
||||
}
|
||||
|
||||
// SignMessage signs the provided message. If the message is provided,
|
||||
// this method will compute the digest according to the hash function specified
|
||||
// when the ED25519phSigner was created.
|
||||
//
|
||||
// This function recognizes the following Options listed in order of preference:
|
||||
//
|
||||
// - WithDigest()
|
||||
//
|
||||
// All other options are ignored if specified.
|
||||
func (e ED25519phSigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) {
|
||||
digest, _, err := ComputeDigestForSigning(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return e.priv.Sign(nil, digest, crypto.SHA512)
|
||||
}
|
||||
|
||||
// Public returns the public key that can be used to verify signatures created by
|
||||
// this signer.
|
||||
func (e ED25519phSigner) Public() crypto.PublicKey {
|
||||
if e.priv == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e.priv.Public()
|
||||
}
|
||||
|
||||
// PublicKey returns the public key that can be used to verify signatures created by
|
||||
// this signer. As this value is held in memory, all options provided in arguments
|
||||
// to this method are ignored.
|
||||
func (e ED25519phSigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
|
||||
return e.Public(), nil
|
||||
}
|
||||
|
||||
// Sign computes the signature for the specified message; the first and third arguments to this
|
||||
// function are ignored as they are not used by the ED25519ph algorithm.
|
||||
func (e ED25519phSigner) Sign(_ io.Reader, digest []byte, _ crypto.SignerOpts) ([]byte, error) {
|
||||
return e.SignMessage(nil, options.WithDigest(digest))
|
||||
}
|
||||
|
||||
// ED25519phVerifier is a signature.Verifier that uses the Ed25519 public-key signature system
|
||||
type ED25519phVerifier struct {
|
||||
publicKey ed25519.PublicKey
|
||||
}
|
||||
|
||||
// LoadED25519phVerifier returns a Verifier that verifies signatures using the
|
||||
// specified ED25519 public key.
|
||||
func LoadED25519phVerifier(pub ed25519.PublicKey) (*ED25519phVerifier, error) {
|
||||
if pub == nil {
|
||||
return nil, errors.New("invalid ED25519 public key specified")
|
||||
}
|
||||
|
||||
return &ED25519phVerifier{
|
||||
publicKey: pub,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PublicKey returns the public key that is used to verify signatures by
|
||||
// this verifier. As this value is held in memory, all options provided in arguments
|
||||
// to this method are ignored.
|
||||
func (e *ED25519phVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
|
||||
return e.publicKey, nil
|
||||
}
|
||||
|
||||
// VerifySignature verifies the signature for the given message. Unless provided
|
||||
// in an option, the digest of the message will be computed using the hash function specified
|
||||
// when the ED25519phVerifier was created.
|
||||
//
|
||||
// This function returns nil if the verification succeeded, and an error message otherwise.
|
||||
//
|
||||
// This function recognizes the following Options listed in order of preference:
|
||||
//
|
||||
// - WithDigest()
|
||||
//
|
||||
// All other options are ignored if specified.
|
||||
func (e *ED25519phVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error {
|
||||
if signature == nil {
|
||||
return errors.New("nil signature passed to VerifySignature")
|
||||
}
|
||||
|
||||
digest, _, err := ComputeDigestForVerifying(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sigBytes, err := io.ReadAll(signature)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading signature: %w", err)
|
||||
}
|
||||
|
||||
if err := ed25519.VerifyWithOptions(e.publicKey, digest, sigBytes, &ed25519.Options{Hash: crypto.SHA512}); err != nil {
|
||||
return fmt.Errorf("failed to verify signature: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ED25519phSignerVerifier is a signature.SignerVerifier that uses the Ed25519 public-key signature system
|
||||
type ED25519phSignerVerifier struct {
|
||||
*ED25519phSigner
|
||||
*ED25519phVerifier
|
||||
}
|
||||
|
||||
// LoadED25519phSignerVerifier creates a combined signer and verifier. This is
|
||||
// a convenience object that simply wraps an instance of ED25519phSigner and ED25519phVerifier.
|
||||
func LoadED25519phSignerVerifier(priv ed25519.PrivateKey) (*ED25519phSignerVerifier, error) {
|
||||
signer, err := LoadED25519phSigner(priv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing signer: %w", err)
|
||||
}
|
||||
pub, ok := priv.Public().(ed25519.PublicKey)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("given key is not ed25519.PublicKey")
|
||||
}
|
||||
verifier, err := LoadED25519phVerifier(pub)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing verifier: %w", err)
|
||||
}
|
||||
|
||||
return &ED25519phSignerVerifier{
|
||||
ED25519phSigner: signer,
|
||||
ED25519phVerifier: verifier,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewDefaultED25519phSignerVerifier creates a combined signer and verifier using ED25519.
|
||||
// This creates a new ED25519 key using crypto/rand as an entropy source.
|
||||
func NewDefaultED25519phSignerVerifier() (*ED25519phSignerVerifier, ed25519.PrivateKey, error) {
|
||||
return NewED25519phSignerVerifier(rand.Reader)
|
||||
}
|
||||
|
||||
// NewED25519phSignerVerifier creates a combined signer and verifier using ED25519.
|
||||
// This creates a new ED25519 key using the specified entropy source.
|
||||
func NewED25519phSignerVerifier(rand io.Reader) (*ED25519phSignerVerifier, ed25519.PrivateKey, error) {
|
||||
_, priv, err := ed25519.GenerateKey(rand)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sv, err := LoadED25519phSignerVerifier(priv)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return sv, priv, nil
|
||||
}
|
||||
|
||||
// PublicKey returns the public key that is used to verify signatures by
|
||||
// this verifier. As this value is held in memory, all options provided in arguments
|
||||
// to this method are ignored.
|
||||
func (e ED25519phSignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
|
||||
return e.publicKey, nil
|
||||
}
|
||||
|
|
@ -18,6 +18,7 @@ package signature
|
|||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/rsa"
|
||||
"io"
|
||||
|
||||
"github.com/sigstore/sigstore/pkg/signature/options"
|
||||
|
|
@ -55,3 +56,10 @@ type VerifyOption interface {
|
|||
RPCOption
|
||||
MessageOption
|
||||
}
|
||||
|
||||
// LoadOption specifies options to be used when creating a Signer/Verifier
|
||||
type LoadOption interface {
|
||||
ApplyHash(*crypto.Hash)
|
||||
ApplyED25519ph(*bool)
|
||||
ApplyRSAPSS(**rsa.PSSOptions)
|
||||
}
|
||||
|
|
|
|||
76
common/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go
generated
vendored
Normal file
76
common/vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go
generated
vendored
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
//
|
||||
// Copyright 2024 The Sigstore Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rsa"
|
||||
)
|
||||
|
||||
// RequestHash implements the functional option pattern for setting a Hash
|
||||
// function when loading a signer or verifier
|
||||
type RequestHash struct {
|
||||
NoOpOptionImpl
|
||||
hashFunc crypto.Hash
|
||||
}
|
||||
|
||||
// ApplyHash sets the hash as requested by the functional option
|
||||
func (r RequestHash) ApplyHash(hash *crypto.Hash) {
|
||||
*hash = r.hashFunc
|
||||
}
|
||||
|
||||
// WithHash specifies that the given hash function should be used when loading a signer or verifier
|
||||
func WithHash(hash crypto.Hash) RequestHash {
|
||||
return RequestHash{hashFunc: hash}
|
||||
}
|
||||
|
||||
// RequestED25519ph implements the functional option pattern for specifying
|
||||
// ED25519ph (pre-hashed) should be used when loading a signer or verifier and a
|
||||
// ED25519 key is
|
||||
type RequestED25519ph struct {
|
||||
NoOpOptionImpl
|
||||
useED25519ph bool
|
||||
}
|
||||
|
||||
// ApplyED25519ph sets the ED25519ph flag as requested by the functional option
|
||||
func (r RequestED25519ph) ApplyED25519ph(useED25519ph *bool) {
|
||||
*useED25519ph = r.useED25519ph
|
||||
}
|
||||
|
||||
// WithED25519ph specifies that the ED25519ph algorithm should be used when a ED25519 key is used
|
||||
func WithED25519ph() RequestED25519ph {
|
||||
return RequestED25519ph{useED25519ph: true}
|
||||
}
|
||||
|
||||
// RequestPSSOptions implements the functional option pattern for specifying RSA
|
||||
// PSS should be used when loading a signer or verifier and a RSA key is
|
||||
// detected
|
||||
type RequestPSSOptions struct {
|
||||
NoOpOptionImpl
|
||||
opts *rsa.PSSOptions
|
||||
}
|
||||
|
||||
// ApplyRSAPSS sets the RSAPSS options as requested by the functional option
|
||||
func (r RequestPSSOptions) ApplyRSAPSS(opts **rsa.PSSOptions) {
|
||||
*opts = r.opts
|
||||
}
|
||||
|
||||
// WithRSAPSS specifies that the RSAPSS algorithm should be used when a RSA key is used
|
||||
// Note that the RSA PSSOptions contains an hash algorithm, which will override
|
||||
// the hash function specified with WithHash.
|
||||
func WithRSAPSS(opts *rsa.PSSOptions) RequestPSSOptions {
|
||||
return RequestPSSOptions{opts: opts}
|
||||
}
|
||||
|
|
@ -18,6 +18,7 @@ package options
|
|||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/rsa"
|
||||
"io"
|
||||
)
|
||||
|
||||
|
|
@ -47,3 +48,12 @@ func (NoOpOptionImpl) ApplyKeyVersion(_ *string) {}
|
|||
|
||||
// ApplyKeyVersionUsed is a no-op required to fully implement the requisite interfaces
|
||||
func (NoOpOptionImpl) ApplyKeyVersionUsed(_ **string) {}
|
||||
|
||||
// ApplyHash is a no-op required to fully implement the requisite interfaces
|
||||
func (NoOpOptionImpl) ApplyHash(_ *crypto.Hash) {}
|
||||
|
||||
// ApplyED25519ph is a no-op required to fully implement the requisite interfaces
|
||||
func (NoOpOptionImpl) ApplyED25519ph(_ *bool) {}
|
||||
|
||||
// ApplyRSAPSS is a no-op required to fully implement the requisite interfaces
|
||||
func (NoOpOptionImpl) ApplyRSAPSS(_ **rsa.PSSOptions) {}
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ import (
|
|||
_ "crypto/sha512"
|
||||
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
"github.com/sigstore/sigstore/pkg/signature/options"
|
||||
|
||||
// these ensure we have the implementations loaded
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
|
|
@ -59,12 +60,33 @@ func (s SignerOpts) HashFunc() crypto.Hash {
|
|||
// If privateKey is an RSA key, a RSAPKCS1v15Signer will be returned. If a
|
||||
// RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() method directly.
|
||||
func LoadSigner(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (Signer, error) {
|
||||
return LoadSignerWithOpts(privateKey, options.WithHash(hashFunc))
|
||||
}
|
||||
|
||||
// LoadSignerWithOpts returns a signature.Signer based on the algorithm of the private key
|
||||
// provided.
|
||||
func LoadSignerWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (Signer, error) {
|
||||
var rsaPSSOptions *rsa.PSSOptions
|
||||
var useED25519ph bool
|
||||
hashFunc := crypto.SHA256
|
||||
for _, o := range opts {
|
||||
o.ApplyED25519ph(&useED25519ph)
|
||||
o.ApplyHash(&hashFunc)
|
||||
o.ApplyRSAPSS(&rsaPSSOptions)
|
||||
}
|
||||
|
||||
switch pk := privateKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
if rsaPSSOptions != nil {
|
||||
return LoadRSAPSSSigner(pk, hashFunc, rsaPSSOptions)
|
||||
}
|
||||
return LoadRSAPKCS1v15Signer(pk, hashFunc)
|
||||
case *ecdsa.PrivateKey:
|
||||
return LoadECDSASigner(pk, hashFunc)
|
||||
case ed25519.PrivateKey:
|
||||
if useED25519ph {
|
||||
return LoadED25519phSigner(pk)
|
||||
}
|
||||
return LoadED25519Signer(pk)
|
||||
}
|
||||
return nil, errors.New("unsupported public key type")
|
||||
|
|
@ -87,3 +109,17 @@ func LoadSignerFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.Pas
|
|||
}
|
||||
return LoadSigner(priv, hashFunc)
|
||||
}
|
||||
|
||||
// LoadSignerFromPEMFileWithOpts returns a signature.Signer based on the algorithm of the private key
|
||||
// in the file. The Signer will use the hash function specified in the options when computing digests.
|
||||
func LoadSignerFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (Signer, error) {
|
||||
fileBytes, err := os.ReadFile(filepath.Clean(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LoadSignerWithOpts(priv, opts...)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
"github.com/sigstore/sigstore/pkg/signature/options"
|
||||
)
|
||||
|
||||
// SignerVerifier creates and verifies digital signatures over a message using a specified key pair
|
||||
|
|
@ -39,12 +40,33 @@ type SignerVerifier interface {
|
|||
// If privateKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a
|
||||
// RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() method directly.
|
||||
func LoadSignerVerifier(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (SignerVerifier, error) {
|
||||
return LoadSignerVerifierWithOpts(privateKey, options.WithHash(hashFunc))
|
||||
}
|
||||
|
||||
// LoadSignerVerifierWithOpts returns a signature.SignerVerifier based on the
|
||||
// algorithm of the private key provided and the user's choice.
|
||||
func LoadSignerVerifierWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (SignerVerifier, error) {
|
||||
var rsaPSSOptions *rsa.PSSOptions
|
||||
var useED25519ph bool
|
||||
hashFunc := crypto.SHA256
|
||||
for _, o := range opts {
|
||||
o.ApplyED25519ph(&useED25519ph)
|
||||
o.ApplyHash(&hashFunc)
|
||||
o.ApplyRSAPSS(&rsaPSSOptions)
|
||||
}
|
||||
|
||||
switch pk := privateKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
if rsaPSSOptions != nil {
|
||||
return LoadRSAPSSSignerVerifier(pk, hashFunc, rsaPSSOptions)
|
||||
}
|
||||
return LoadRSAPKCS1v15SignerVerifier(pk, hashFunc)
|
||||
case *ecdsa.PrivateKey:
|
||||
return LoadECDSASignerVerifier(pk, hashFunc)
|
||||
case ed25519.PrivateKey:
|
||||
if useED25519ph {
|
||||
return LoadED25519phSignerVerifier(pk)
|
||||
}
|
||||
return LoadED25519SignerVerifier(pk)
|
||||
}
|
||||
return nil, errors.New("unsupported public key type")
|
||||
|
|
@ -67,3 +89,17 @@ func LoadSignerVerifierFromPEMFile(path string, hashFunc crypto.Hash, pf cryptou
|
|||
}
|
||||
return LoadSignerVerifier(priv, hashFunc)
|
||||
}
|
||||
|
||||
// LoadSignerVerifierFromPEMFileWithOpts returns a signature.SignerVerifier based on the algorithm of the private key
|
||||
// in the file. The SignerVerifier will use the hash function specified in the options when computing digests.
|
||||
func LoadSignerVerifierFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (SignerVerifier, error) {
|
||||
fileBytes, err := os.ReadFile(filepath.Clean(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LoadSignerVerifierWithOpts(priv, opts...)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
"github.com/sigstore/sigstore/pkg/signature/options"
|
||||
)
|
||||
|
||||
// Verifier verifies the digital signature using a specified public key
|
||||
|
|
@ -40,12 +41,33 @@ type Verifier interface {
|
|||
// If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a
|
||||
// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly.
|
||||
func LoadVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (Verifier, error) {
|
||||
return LoadVerifierWithOpts(publicKey, options.WithHash(hashFunc))
|
||||
}
|
||||
|
||||
// LoadVerifierWithOpts returns a signature.Verifier based on the algorithm of the public key
|
||||
// provided that will use the hash function specified when computing digests.
|
||||
func LoadVerifierWithOpts(publicKey crypto.PublicKey, opts ...LoadOption) (Verifier, error) {
|
||||
var rsaPSSOptions *rsa.PSSOptions
|
||||
var useED25519ph bool
|
||||
hashFunc := crypto.SHA256
|
||||
for _, o := range opts {
|
||||
o.ApplyED25519ph(&useED25519ph)
|
||||
o.ApplyHash(&hashFunc)
|
||||
o.ApplyRSAPSS(&rsaPSSOptions)
|
||||
}
|
||||
|
||||
switch pk := publicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
if rsaPSSOptions != nil {
|
||||
return LoadRSAPSSVerifier(pk, hashFunc, rsaPSSOptions)
|
||||
}
|
||||
return LoadRSAPKCS1v15Verifier(pk, hashFunc)
|
||||
case *ecdsa.PublicKey:
|
||||
return LoadECDSAVerifier(pk, hashFunc)
|
||||
case ed25519.PublicKey:
|
||||
if useED25519ph {
|
||||
return LoadED25519phVerifier(pk)
|
||||
}
|
||||
return LoadED25519Verifier(pk)
|
||||
}
|
||||
return nil, errors.New("unsupported public key type")
|
||||
|
|
@ -98,3 +120,19 @@ func LoadVerifierFromPEMFile(path string, hashFunc crypto.Hash) (Verifier, error
|
|||
|
||||
return LoadVerifier(pubKey, hashFunc)
|
||||
}
|
||||
|
||||
// LoadVerifierFromPEMFileWithOpts returns a signature.Verifier based on the contents of a
|
||||
// file located at path. The Verifier wil use the hash function specified in the options when computing digests.
|
||||
func LoadVerifierFromPEMFileWithOpts(path string, opts ...LoadOption) (Verifier, error) {
|
||||
fileBytes, err := os.ReadFile(filepath.Clean(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(fileBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return LoadVerifierWithOpts(pubKey, opts...)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,16 +15,15 @@ import (
|
|||
|
||||
// ArrayCodec is the Codec used for bsoncore.Array values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// ArrayCodec registered.
|
||||
// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0.
|
||||
type ArrayCodec struct{}
|
||||
|
||||
var defaultArrayCodec = NewArrayCodec()
|
||||
|
||||
// NewArrayCodec returns an ArrayCodec.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// ArrayCodec registered.
|
||||
// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See
|
||||
// [ArrayCodec] for more details.
|
||||
func NewArrayCodec() *ArrayCodec {
|
||||
return &ArrayCodec{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,13 +17,28 @@ import (
|
|||
|
||||
// ByteSliceCodec is the Codec used for []byte values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// ByteSliceCodec registered.
|
||||
// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver
|
||||
// 2.0. To configure the byte slice encode and decode behavior, use the
|
||||
// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
|
||||
// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice
|
||||
// encode and decode behavior for a mongo.Client, use
|
||||
// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
|
||||
//
|
||||
// For example, to configure a mongo.Client to encode nil byte slices as empty
|
||||
// BSON binary values, use:
|
||||
//
|
||||
// opt := options.Client().SetBSONOptions(&options.BSONOptions{
|
||||
// NilByteSliceAsEmpty: true,
|
||||
// })
|
||||
//
|
||||
// See the deprecation notice for each field in ByteSliceCodec for the
|
||||
// corresponding settings.
|
||||
type ByteSliceCodec struct {
|
||||
// EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values
|
||||
// instead of BSON null.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.NilByteSliceAsEmpty instead.
|
||||
// Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty
|
||||
// instead.
|
||||
EncodeNilAsEmpty bool
|
||||
}
|
||||
|
||||
|
|
@ -38,8 +53,8 @@ var (
|
|||
|
||||
// NewByteSliceCodec returns a ByteSliceCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// ByteSliceCodec registered.
|
||||
// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See
|
||||
// [ByteSliceCodec] for more details.
|
||||
func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec {
|
||||
byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...)
|
||||
codec := ByteSliceCodec{}
|
||||
|
|
|
|||
10
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go
generated
vendored
10
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go
generated
vendored
|
|
@ -41,7 +41,7 @@ func newDefaultStructCodec() *StructCodec {
|
|||
if err != nil {
|
||||
// This function is called from the codec registration path, so errors can't be propagated. If there's an error
|
||||
// constructing the StructCodec, we panic to avoid losing it.
|
||||
panic(fmt.Errorf("error creating default StructCodec: %v", err))
|
||||
panic(fmt.Errorf("error creating default StructCodec: %w", err))
|
||||
}
|
||||
return codec
|
||||
}
|
||||
|
|
@ -178,7 +178,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe
|
|||
|
||||
for {
|
||||
key, elemVr, err := dr.ReadElement()
|
||||
if err == bsonrw.ErrEOD {
|
||||
if errors.Is(err, bsonrw.ErrEOD) {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
|
|
@ -1379,7 +1379,7 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value
|
|||
keyType := val.Type().Key()
|
||||
for {
|
||||
key, vr, err := dr.ReadElement()
|
||||
if err == bsonrw.ErrEOD {
|
||||
if errors.Is(err, bsonrw.ErrEOD) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
|
|
@ -1675,7 +1675,7 @@ func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueR
|
|||
idx := 0
|
||||
for {
|
||||
vr, err := ar.ReadValue()
|
||||
if err == bsonrw.ErrEOA {
|
||||
if errors.Is(err, bsonrw.ErrEOA) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
|
|
@ -1787,7 +1787,7 @@ func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr b
|
|||
elems := make([]reflect.Value, 0)
|
||||
for {
|
||||
key, vr, err := dr.ReadElement()
|
||||
if err == bsonrw.ErrEOD {
|
||||
if errors.Is(err, bsonrw.ErrEOD) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
|
|
|
|||
12
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go
generated
vendored
12
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go
generated
vendored
|
|
@ -343,7 +343,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum
|
|||
}
|
||||
|
||||
currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key))
|
||||
if lookupErr != nil && lookupErr != errInvalidValue {
|
||||
if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
|
||||
return lookupErr
|
||||
}
|
||||
|
||||
|
|
@ -352,7 +352,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum
|
|||
return err
|
||||
}
|
||||
|
||||
if lookupErr == errInvalidValue {
|
||||
if errors.Is(lookupErr, errInvalidValue) {
|
||||
err = vw.WriteNull()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -418,7 +418,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val
|
|||
|
||||
for idx := 0; idx < val.Len(); idx++ {
|
||||
currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx))
|
||||
if lookupErr != nil && lookupErr != errInvalidValue {
|
||||
if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
|
||||
return lookupErr
|
||||
}
|
||||
|
||||
|
|
@ -427,7 +427,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val
|
|||
return err
|
||||
}
|
||||
|
||||
if lookupErr == errInvalidValue {
|
||||
if errors.Is(lookupErr, errInvalidValue) {
|
||||
err = vw.WriteNull()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -487,7 +487,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val
|
|||
|
||||
for idx := 0; idx < val.Len(); idx++ {
|
||||
currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx))
|
||||
if lookupErr != nil && lookupErr != errInvalidValue {
|
||||
if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
|
||||
return lookupErr
|
||||
}
|
||||
|
||||
|
|
@ -496,7 +496,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val
|
|||
return err
|
||||
}
|
||||
|
||||
if lookupErr == errInvalidValue {
|
||||
if errors.Is(lookupErr, errInvalidValue) {
|
||||
err = vw.WriteNull()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
24
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go
generated
vendored
24
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go
generated
vendored
|
|
@ -17,13 +17,27 @@ import (
|
|||
|
||||
// EmptyInterfaceCodec is the Codec used for interface{} values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// EmptyInterfaceCodec registered.
|
||||
// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go
|
||||
// Driver 2.0. To configure the empty interface encode and decode behavior, use
|
||||
// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
|
||||
// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface
|
||||
// encode and decode behavior for a mongo.Client, use
|
||||
// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
|
||||
//
|
||||
// For example, to configure a mongo.Client to unmarshal BSON binary field
|
||||
// values as a Go byte slice, use:
|
||||
//
|
||||
// opt := options.Client().SetBSONOptions(&options.BSONOptions{
|
||||
// BinaryAsSlice: true,
|
||||
// })
|
||||
//
|
||||
// See the deprecation notice for each field in EmptyInterfaceCodec for the
|
||||
// corresponding settings.
|
||||
type EmptyInterfaceCodec struct {
|
||||
// DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the
|
||||
// "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary.
|
||||
//
|
||||
// Deprecated: Use bson.Decoder.BinaryAsSlice instead.
|
||||
// Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead.
|
||||
DecodeBinaryAsSlice bool
|
||||
}
|
||||
|
||||
|
|
@ -38,8 +52,8 @@ var (
|
|||
|
||||
// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// EmptyInterfaceCodec registered.
|
||||
// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See
|
||||
// [EmptyInterfaceCodec] for more details.
|
||||
func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec {
|
||||
interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ package bsoncodec
|
|||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
|
@ -21,25 +22,40 @@ var defaultMapCodec = NewMapCodec()
|
|||
|
||||
// MapCodec is the Codec used for map values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// MapCodec registered.
|
||||
// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To
|
||||
// configure the map encode and decode behavior, use the configuration methods
|
||||
// on a [go.mongodb.org/mongo-driver/bson.Encoder] or
|
||||
// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and
|
||||
// decode behavior for a mongo.Client, use
|
||||
// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
|
||||
//
|
||||
// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON
|
||||
// documents, use:
|
||||
//
|
||||
// opt := options.Client().SetBSONOptions(&options.BSONOptions{
|
||||
// NilMapAsEmpty: true,
|
||||
// })
|
||||
//
|
||||
// See the deprecation notice for each field in MapCodec for the corresponding
|
||||
// settings.
|
||||
type MapCodec struct {
|
||||
// DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination
|
||||
// value passed to Decode before unmarshaling BSON documents into them.
|
||||
//
|
||||
// Deprecated: Use bson.Decoder.ZeroMaps instead.
|
||||
// Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead.
|
||||
DecodeZerosMap bool
|
||||
|
||||
// EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of
|
||||
// BSON null.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.NilMapAsEmpty instead.
|
||||
// Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead.
|
||||
EncodeNilAsEmpty bool
|
||||
|
||||
// EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name
|
||||
// strings using fmt.Sprintf() instead of the default string conversion logic.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt instead.
|
||||
// Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or
|
||||
// options.BSONOptions.StringifyMapKeysWithFmt instead.
|
||||
EncodeKeysWithStringer bool
|
||||
}
|
||||
|
||||
|
|
@ -61,8 +77,8 @@ type KeyUnmarshaler interface {
|
|||
|
||||
// NewMapCodec returns a MapCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// MapCodec registered.
|
||||
// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See
|
||||
// [MapCodec] for more details.
|
||||
func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec {
|
||||
mapOpt := bsonoptions.MergeMapCodecOptions(opts...)
|
||||
|
||||
|
|
@ -128,7 +144,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v
|
|||
}
|
||||
|
||||
currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key))
|
||||
if lookupErr != nil && lookupErr != errInvalidValue {
|
||||
if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
|
||||
return lookupErr
|
||||
}
|
||||
|
||||
|
|
@ -137,7 +153,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v
|
|||
return err
|
||||
}
|
||||
|
||||
if lookupErr == errInvalidValue {
|
||||
if errors.Is(lookupErr, errInvalidValue) {
|
||||
err = vw.WriteNull()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -200,7 +216,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref
|
|||
|
||||
for {
|
||||
key, vr, err := dr.ReadElement()
|
||||
if err == bsonrw.ErrEOD {
|
||||
if errors.Is(err, bsonrw.ErrEOD) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
|
|
@ -313,7 +329,7 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value,
|
|||
if mc.EncodeKeysWithStringer {
|
||||
parsed, err := strconv.ParseFloat(key, 64)
|
||||
if err != nil {
|
||||
return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err)
|
||||
return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err)
|
||||
}
|
||||
keyVal = reflect.ValueOf(parsed)
|
||||
break
|
||||
|
|
|
|||
|
|
@ -18,8 +18,16 @@ var _ ValueDecoder = &PointerCodec{}
|
|||
|
||||
// PointerCodec is the Codec used for pointers.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// PointerCodec registered.
|
||||
// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To
|
||||
// override the default pointer encode and decode behavior, create a new registry
|
||||
// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new
|
||||
// encoder and decoder for pointers.
|
||||
//
|
||||
// For example,
|
||||
//
|
||||
// reg := bson.NewRegistry()
|
||||
// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder)
|
||||
// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder)
|
||||
type PointerCodec struct {
|
||||
ecache typeEncoderCache
|
||||
dcache typeDecoderCache
|
||||
|
|
@ -27,8 +35,8 @@ type PointerCodec struct {
|
|||
|
||||
// NewPointerCodec returns a PointerCodec that has been initialized.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// PointerCodec registered.
|
||||
// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See
|
||||
// [PointerCodec] for more details.
|
||||
func NewPointerCodec() *PointerCodec {
|
||||
return &PointerCodec{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
package bsoncodec
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
|
|
@ -20,8 +21,22 @@ var defaultSliceCodec = NewSliceCodec()
|
|||
|
||||
// SliceCodec is the Codec used for slice values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// SliceCodec registered.
|
||||
// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To
|
||||
// configure the slice encode and decode behavior, use the configuration methods
|
||||
// on a [go.mongodb.org/mongo-driver/bson.Encoder] or
|
||||
// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and
|
||||
// decode behavior for a mongo.Client, use
|
||||
// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
|
||||
//
|
||||
// For example, to configure a mongo.Client to marshal nil Go slices as empty
|
||||
// BSON arrays, use:
|
||||
//
|
||||
// opt := options.Client().SetBSONOptions(&options.BSONOptions{
|
||||
// NilSliceAsEmpty: true,
|
||||
// })
|
||||
//
|
||||
// See the deprecation notice for each field in SliceCodec for the corresponding
|
||||
// settings.
|
||||
type SliceCodec struct {
|
||||
// EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of
|
||||
// BSON null.
|
||||
|
|
@ -32,8 +47,8 @@ type SliceCodec struct {
|
|||
|
||||
// NewSliceCodec returns a MapCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// SliceCodec registered.
|
||||
// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See
|
||||
// [SliceCodec] for more details.
|
||||
func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec {
|
||||
sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...)
|
||||
|
||||
|
|
@ -93,7 +108,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re
|
|||
|
||||
for idx := 0; idx < val.Len(); idx++ {
|
||||
currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx))
|
||||
if lookupErr != nil && lookupErr != errInvalidValue {
|
||||
if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) {
|
||||
return lookupErr
|
||||
}
|
||||
|
||||
|
|
@ -102,7 +117,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re
|
|||
return err
|
||||
}
|
||||
|
||||
if lookupErr == errInvalidValue {
|
||||
if errors.Is(lookupErr, errInvalidValue) {
|
||||
err = vw.WriteNull()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -17,8 +17,16 @@ import (
|
|||
|
||||
// StringCodec is the Codec used for string values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// StringCodec registered.
|
||||
// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To
|
||||
// override the default string encode and decode behavior, create a new registry
|
||||
// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new
|
||||
// encoder and decoder for strings.
|
||||
//
|
||||
// For example,
|
||||
//
|
||||
// reg := bson.NewRegistry()
|
||||
// reg.RegisterKindEncoder(reflect.String, myStringEncoder)
|
||||
// reg.RegisterKindDecoder(reflect.String, myStringDecoder)
|
||||
type StringCodec struct {
|
||||
// DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation.
|
||||
// If false, a string made from the raw object ID bytes will be used. Defaults to true.
|
||||
|
|
@ -38,8 +46,8 @@ var (
|
|||
|
||||
// NewStringCodec returns a StringCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// StringCodec registered.
|
||||
// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See
|
||||
// [StringCodec] for more details.
|
||||
func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec {
|
||||
stringOpt := bsonoptions.MergeStringCodecOptions(opts...)
|
||||
return &StringCodec{*stringOpt.DecodeObjectIDAsHex}
|
||||
|
|
|
|||
|
|
@ -60,8 +60,22 @@ type Zeroer interface {
|
|||
|
||||
// StructCodec is the Codec used for struct values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// StructCodec registered.
|
||||
// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0.
|
||||
// To configure the struct encode and decode behavior, use the configuration
|
||||
// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
|
||||
// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode
|
||||
// and decode behavior for a mongo.Client, use
|
||||
// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
|
||||
//
|
||||
// For example, to configure a mongo.Client to omit zero-value structs when
|
||||
// using the "omitempty" struct tag, use:
|
||||
//
|
||||
// opt := options.Client().SetBSONOptions(&options.BSONOptions{
|
||||
// OmitZeroStruct: true,
|
||||
// })
|
||||
//
|
||||
// See the deprecation notice for each field in StructCodec for the corresponding
|
||||
// settings.
|
||||
type StructCodec struct {
|
||||
cache sync.Map // map[reflect.Type]*structDescription
|
||||
parser StructTagParser
|
||||
|
|
@ -69,7 +83,7 @@ type StructCodec struct {
|
|||
// DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the
|
||||
// destination value passed to Decode before unmarshaling BSON documents into them.
|
||||
//
|
||||
// Deprecated: Use bson.Decoder.ZeroStructs instead.
|
||||
// Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead.
|
||||
DecodeZeroStruct bool
|
||||
|
||||
// DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the
|
||||
|
|
@ -82,7 +96,7 @@ type StructCodec struct {
|
|||
// MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag
|
||||
// option is set.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.OmitZeroStruct instead.
|
||||
// Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead.
|
||||
EncodeOmitDefaultStruct bool
|
||||
|
||||
// AllowUnexportedFields allows encoding and decoding values from un-exported struct fields.
|
||||
|
|
@ -95,7 +109,8 @@ type StructCodec struct {
|
|||
// a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The
|
||||
// default value is true.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates instead.
|
||||
// Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or
|
||||
// options.BSONOptions.ErrorOnInlineDuplicates instead.
|
||||
OverwriteDuplicatedInlinedFields bool
|
||||
}
|
||||
|
||||
|
|
@ -104,8 +119,8 @@ var _ ValueDecoder = &StructCodec{}
|
|||
|
||||
// NewStructCodec returns a StructCodec that uses p for struct tag parsing.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// StructCodec registered.
|
||||
// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See
|
||||
// [StructCodec] for more details.
|
||||
func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) {
|
||||
if p == nil {
|
||||
return nil, errors.New("a StructTagParser must be provided to NewStructCodec")
|
||||
|
|
@ -164,11 +179,11 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val
|
|||
|
||||
desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv)
|
||||
|
||||
if err != nil && err != errInvalidValue {
|
||||
if err != nil && !errors.Is(err, errInvalidValue) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == errInvalidValue {
|
||||
if errors.Is(err, errInvalidValue) {
|
||||
if desc.omitEmpty {
|
||||
continue
|
||||
}
|
||||
|
|
@ -189,17 +204,17 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val
|
|||
|
||||
encoder := desc.encoder
|
||||
|
||||
var zero bool
|
||||
var empty bool
|
||||
if cz, ok := encoder.(CodecZeroer); ok {
|
||||
zero = cz.IsTypeZero(rv.Interface())
|
||||
empty = cz.IsTypeZero(rv.Interface())
|
||||
} else if rv.Kind() == reflect.Interface {
|
||||
// isZero will not treat an interface rv as an interface, so we need to check for the
|
||||
// zero interface separately.
|
||||
zero = rv.IsNil()
|
||||
// isEmpty will not treat an interface rv as an interface, so we need to check for the
|
||||
// nil interface separately.
|
||||
empty = rv.IsNil()
|
||||
} else {
|
||||
zero = isZero(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct)
|
||||
empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct)
|
||||
}
|
||||
if desc.omitEmpty && zero {
|
||||
if desc.omitEmpty && empty {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -239,8 +254,8 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val
|
|||
}
|
||||
|
||||
func newDecodeError(key string, original error) error {
|
||||
de, ok := original.(*DecodeError)
|
||||
if !ok {
|
||||
var de *DecodeError
|
||||
if !errors.As(original, &de) {
|
||||
return &DecodeError{
|
||||
keys: []string{key},
|
||||
wrapped: original,
|
||||
|
|
@ -308,7 +323,7 @@ func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val
|
|||
|
||||
for {
|
||||
name, vr, err := dr.ReadElement()
|
||||
if err == bsonrw.ErrEOD {
|
||||
if errors.Is(err, bsonrw.ErrEOD) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
|
|
@ -391,12 +406,15 @@ func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val
|
|||
return nil
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value, omitZeroStruct bool) bool {
|
||||
func isEmpty(v reflect.Value, omitZeroStruct bool) bool {
|
||||
kind := v.Kind()
|
||||
if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) {
|
||||
return v.Interface().(Zeroer).IsZero()
|
||||
}
|
||||
if kind == reflect.Struct {
|
||||
switch kind {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Struct:
|
||||
if !omitZeroStruct {
|
||||
return false
|
||||
}
|
||||
|
|
@ -410,7 +428,7 @@ func isZero(v reflect.Value, omitZeroStruct bool) bool {
|
|||
if ff.PkgPath != "" && !ff.Anonymous {
|
||||
continue // Private field
|
||||
}
|
||||
if !isZero(v.Field(i), omitZeroStruct) {
|
||||
if !isEmpty(v.Field(i), omitZeroStruct) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,12 +23,26 @@ const (
|
|||
|
||||
// TimeCodec is the Codec used for time.Time values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// TimeCodec registered.
|
||||
// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0.
|
||||
// To configure the time.Time encode and decode behavior, use the configuration
|
||||
// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or
|
||||
// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode
|
||||
// and decode behavior for a mongo.Client, use
|
||||
// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
|
||||
//
|
||||
// For example, to configure a mongo.Client to ..., use:
|
||||
//
|
||||
// opt := options.Client().SetBSONOptions(&options.BSONOptions{
|
||||
// UseLocalTimeZone: true,
|
||||
// })
|
||||
//
|
||||
// See the deprecation notice for each field in TimeCodec for the corresponding
|
||||
// settings.
|
||||
type TimeCodec struct {
|
||||
// UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use bson.Decoder.UseLocalTimeZone instead.
|
||||
// Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone
|
||||
// instead.
|
||||
UseLocalTimeZone bool
|
||||
}
|
||||
|
||||
|
|
@ -42,8 +56,8 @@ var (
|
|||
|
||||
// NewTimeCodec returns a TimeCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// TimeCodec registered.
|
||||
// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See
|
||||
// [TimeCodec] for more details.
|
||||
func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec {
|
||||
timeOpt := bsonoptions.MergeTimeCodecOptions(opts...)
|
||||
|
||||
|
|
|
|||
|
|
@ -18,13 +18,27 @@ import (
|
|||
|
||||
// UIntCodec is the Codec used for uint values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// UIntCodec registered.
|
||||
// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To
|
||||
// configure the uint encode and decode behavior, use the configuration methods
|
||||
// on a [go.mongodb.org/mongo-driver/bson.Encoder] or
|
||||
// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and
|
||||
// decode behavior for a mongo.Client, use
|
||||
// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions].
|
||||
//
|
||||
// For example, to configure a mongo.Client to marshal Go uint values as the
|
||||
// minimum BSON int size that can represent the value, use:
|
||||
//
|
||||
// opt := options.Client().SetBSONOptions(&options.BSONOptions{
|
||||
// IntMinSize: true,
|
||||
// })
|
||||
//
|
||||
// See the deprecation notice for each field in UIntCodec for the corresponding
|
||||
// settings.
|
||||
type UIntCodec struct {
|
||||
// EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the
|
||||
// minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.IntMinSize instead.
|
||||
// Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead.
|
||||
EncodeToMinSize bool
|
||||
}
|
||||
|
||||
|
|
@ -38,8 +52,8 @@ var (
|
|||
|
||||
// NewUIntCodec returns a UIntCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// UIntCodec registered.
|
||||
// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See
|
||||
// [UIntCodec] for more details.
|
||||
func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec {
|
||||
uintOpt := bsonoptions.MergeUIntCodecOptions(opts...)
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
package bsonrw
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
|
|
@ -442,7 +443,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error {
|
|||
|
||||
for {
|
||||
vr, err := ar.ReadValue()
|
||||
if err == ErrEOA {
|
||||
if errors.Is(err, ErrEOA) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
|
|
@ -466,7 +467,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error {
|
|||
func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error {
|
||||
for {
|
||||
key, vr, err := dr.ReadElement()
|
||||
if err == ErrEOD {
|
||||
if errors.Is(err, ErrEOD) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -313,7 +313,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) {
|
|||
// convert hex to bytes
|
||||
bytes, err := hex.DecodeString(uuidNoHyphens)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err)
|
||||
return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err)
|
||||
}
|
||||
|
||||
ejp.advanceState()
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
package bsonrw
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
|
@ -613,7 +614,7 @@ func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) {
|
|||
name, t, err := ejvr.p.readKey()
|
||||
|
||||
if err != nil {
|
||||
if err == ErrEOD {
|
||||
if errors.Is(err, ErrEOD) {
|
||||
if ejvr.stack[ejvr.frame].mode == mCodeWithScope {
|
||||
_, err := ejvr.p.peekType()
|
||||
if err != nil {
|
||||
|
|
@ -640,7 +641,7 @@ func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) {
|
|||
|
||||
t, err := ejvr.p.peekType()
|
||||
if err != nil {
|
||||
if err == ErrEOA {
|
||||
if errors.Is(err, ErrEOA) {
|
||||
ejvr.pop()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ func (js *jsonScanner) nextToken() (*jsonToken, error) {
|
|||
c, err = js.readNextByte()
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return &jsonToken{t: jttEOF}, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -198,7 +198,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) {
|
|||
for {
|
||||
c, err = js.readNextByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil, errors.New("end of input in JSON string")
|
||||
}
|
||||
return nil, err
|
||||
|
|
@ -209,7 +209,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) {
|
|||
case '\\':
|
||||
c, err = js.readNextByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil, errors.New("end of input in JSON string")
|
||||
}
|
||||
return nil, err
|
||||
|
|
@ -248,7 +248,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) {
|
|||
if utf16.IsSurrogate(rn) {
|
||||
c, err = js.readNextByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil, errors.New("end of input in JSON string")
|
||||
}
|
||||
return nil, err
|
||||
|
|
@ -264,7 +264,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) {
|
|||
|
||||
c, err = js.readNextByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil, errors.New("end of input in JSON string")
|
||||
}
|
||||
return nil, err
|
||||
|
|
@ -325,17 +325,17 @@ func (js *jsonScanner) scanLiteral(first byte) (*jsonToken, error) {
|
|||
|
||||
c5, err := js.readNextByte()
|
||||
|
||||
if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || err == io.EOF) {
|
||||
if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) {
|
||||
js.pos = int(math.Max(0, float64(js.pos-1)))
|
||||
return &jsonToken{t: jttBool, v: true, p: p}, nil
|
||||
} else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || err == io.EOF) {
|
||||
} else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) {
|
||||
js.pos = int(math.Max(0, float64(js.pos-1)))
|
||||
return &jsonToken{t: jttNull, v: nil, p: p}, nil
|
||||
} else if bytes.Equal([]byte("fals"), lit) {
|
||||
if c5 == 'e' {
|
||||
c5, err = js.readNextByte()
|
||||
|
||||
if isValueTerminator(c5) || err == io.EOF {
|
||||
if isValueTerminator(c5) || errors.Is(err, io.EOF) {
|
||||
js.pos = int(math.Max(0, float64(js.pos-1)))
|
||||
return &jsonToken{t: jttBool, v: false, p: p}, nil
|
||||
}
|
||||
|
|
@ -384,7 +384,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) {
|
|||
for {
|
||||
c, err = js.readNextByte()
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
@ -413,7 +413,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) {
|
|||
case '}', ']', ',':
|
||||
s = nssDone
|
||||
default:
|
||||
if isWhiteSpace(c) || err == io.EOF {
|
||||
if isWhiteSpace(c) || errors.Is(err, io.EOF) {
|
||||
s = nssDone
|
||||
} else {
|
||||
s = nssInvalid
|
||||
|
|
@ -430,7 +430,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) {
|
|||
case '}', ']', ',':
|
||||
s = nssDone
|
||||
default:
|
||||
if isWhiteSpace(c) || err == io.EOF {
|
||||
if isWhiteSpace(c) || errors.Is(err, io.EOF) {
|
||||
s = nssDone
|
||||
} else if isDigit(c) {
|
||||
s = nssSawIntegerDigits
|
||||
|
|
@ -455,7 +455,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) {
|
|||
case '}', ']', ',':
|
||||
s = nssDone
|
||||
default:
|
||||
if isWhiteSpace(c) || err == io.EOF {
|
||||
if isWhiteSpace(c) || errors.Is(err, io.EOF) {
|
||||
s = nssDone
|
||||
} else if isDigit(c) {
|
||||
s = nssSawFractionDigits
|
||||
|
|
@ -490,7 +490,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) {
|
|||
case '}', ']', ',':
|
||||
s = nssDone
|
||||
default:
|
||||
if isWhiteSpace(c) || err == io.EOF {
|
||||
if isWhiteSpace(c) || errors.Is(err, io.EOF) {
|
||||
s = nssDone
|
||||
} else if isDigit(c) {
|
||||
s = nssSawExponentDigits
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@
|
|||
|
||||
// Package bson is a library for reading, writing, and manipulating BSON. BSON is a binary serialization format used to
|
||||
// store documents and make remote procedure calls in MongoDB. The BSON specification is located at https://bsonspec.org.
|
||||
// The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description
|
||||
// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information and
|
||||
// usage examples, check out the [Work with BSON] page in the Go Driver docs site.
|
||||
// The BSON library handles marshaling and unmarshaling of values through a configurable codec system. For a description
|
||||
// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information
|
||||
// and usage examples, check out the [Work with BSON] page in the Go Driver docs site.
|
||||
//
|
||||
// # Raw BSON
|
||||
//
|
||||
|
|
@ -38,7 +38,7 @@
|
|||
// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
|
||||
// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
|
||||
//
|
||||
// When decoding BSON to a D or M, the following type mappings apply when unmarshalling:
|
||||
// When decoding BSON to a D or M, the following type mappings apply when unmarshaling:
|
||||
//
|
||||
// 1. BSON int32 unmarshals to an int32.
|
||||
// 2. BSON int64 unmarshals to an int64.
|
||||
|
|
@ -62,83 +62,78 @@
|
|||
// 20. BSON DBPointer unmarshals to a primitive.DBPointer.
|
||||
// 21. BSON symbol unmarshals to a primitive.Symbol.
|
||||
//
|
||||
// The above mappings also apply when marshalling a D or M to BSON. Some other useful marshalling mappings are:
|
||||
// The above mappings also apply when marshaling a D or M to BSON. Some other useful marshaling mappings are:
|
||||
//
|
||||
// 1. time.Time marshals to a BSON datetime.
|
||||
// 2. int8, int16, and int32 marshal to a BSON int32.
|
||||
// 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64
|
||||
// otherwise.
|
||||
// 4. int64 marshals to BSON int64.
|
||||
// 4. int64 marshals to BSON int64 (unless [Encoder.IntMinSize] is set).
|
||||
// 5. uint8 and uint16 marshal to a BSON int32.
|
||||
// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32,
|
||||
// inclusive, and BSON int64 otherwise.
|
||||
// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or
|
||||
// 6. uint, uint32, and uint64 marshal to a BSON int64 (unless [Encoder.IntMinSize] is set).
|
||||
// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshaling a BSON null or
|
||||
// undefined value into a string will yield the empty string.).
|
||||
//
|
||||
// # Structs
|
||||
//
|
||||
// Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended
|
||||
// Structs can be marshaled/unmarshaled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended
|
||||
// JSON, the following rules apply:
|
||||
//
|
||||
// 1. Only exported fields in structs will be marshalled or unmarshalled.
|
||||
// 1. Only exported fields in structs will be marshaled or unmarshaled.
|
||||
//
|
||||
// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element.
|
||||
// 2. When marshaling a struct, each field will be lowercased to generate the key for the corresponding BSON element.
|
||||
// For example, a struct field named "Foo" will generate key "foo". This can be overridden via a struct tag (e.g.
|
||||
// `bson:"fooField"` to generate key "fooField" instead).
|
||||
//
|
||||
// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type.
|
||||
// 3. An embedded struct field is marshaled as a subdocument. The key will be the lowercased name of the field's type.
|
||||
//
|
||||
// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is
|
||||
// marshalled as a BSON null value.
|
||||
// 4. A pointer field is marshaled as the underlying type if the pointer is non-nil. If the pointer is nil, it is
|
||||
// marshaled as a BSON null value.
|
||||
//
|
||||
// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents
|
||||
// unmarshalled into an interface{} field will be unmarshalled as a D.
|
||||
// 5. When unmarshaling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents
|
||||
// unmarshaled into an interface{} field will be unmarshaled as a D.
|
||||
//
|
||||
// The encoding of each struct field can be customized by the "bson" struct tag.
|
||||
//
|
||||
// This tag behavior is configurable, and different struct tag behavior can be configured by initializing a new
|
||||
// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON tags
|
||||
// are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below:
|
||||
// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON
|
||||
// tags are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below:
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser)
|
||||
//
|
||||
// The bson tag gives the name of the field, possibly followed by a comma-separated list of options.
|
||||
// The name may be empty in order to specify options without overriding the default field name. The following options can be used
|
||||
// to configure behavior:
|
||||
// The name may be empty in order to specify options without overriding the default field name. The following options can
|
||||
// be used to configure behavior:
|
||||
//
|
||||
// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to
|
||||
// the zero value. Fields with language primitive types such as integers, booleans, and strings are considered empty if
|
||||
// their value is equal to the zero value for the type (i.e. 0 for integers, false for booleans, and "" for strings).
|
||||
// Slices, maps, and arrays are considered empty if they are of length zero. Interfaces and pointers are considered
|
||||
// empty if their value is nil. By default, structs are only considered empty if the struct type implements the
|
||||
// bsoncodec.Zeroer interface and the IsZero method returns true. Struct fields whose types do not implement Zeroer are
|
||||
// never considered empty and will be marshalled as embedded documents.
|
||||
// 1. omitempty: If the omitempty struct tag is specified on a field, the field will be omitted from the marshaling if
|
||||
// the field has an empty value, defined as false, 0, a nil pointer, a nil interface value, and any empty array,
|
||||
// slice, map, or string.
|
||||
// NOTE: It is recommended that this tag be used for all slice and map fields.
|
||||
//
|
||||
// 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of
|
||||
// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other
|
||||
// types, this tag is ignored.
|
||||
// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For
|
||||
// other types, this tag is ignored.
|
||||
//
|
||||
// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled
|
||||
// into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int,
|
||||
// it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be
|
||||
// decoded without losing precision. For float64 or non-numeric types, this tag is ignored.
|
||||
// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles
|
||||
// unmarshaled into that field will be truncated at the decimal point. For example, if 3.14 is unmarshaled into a
|
||||
// field of type int, it will be unmarshaled as 3. If this tag is not specified, the decoder will throw an error if
|
||||
// the value cannot be decoded without losing precision. For float64 or non-numeric types, this tag is ignored.
|
||||
//
|
||||
// 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when
|
||||
// marshalling and "un-flattened" when unmarshalling. This means that all of the fields in that struct/map will be
|
||||
// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a
|
||||
// map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be
|
||||
// {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If there are
|
||||
// duplicated fields in the resulting document when an inlined struct is marshalled, the inlined field will be overwritten.
|
||||
// If there are duplicated fields in the resulting document when an inlined map is marshalled, an error will be returned.
|
||||
// This tag can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be
|
||||
// marshalled. For fields that are not maps or structs, this tag is ignored.
|
||||
// marshaling and "un-flattened" when unmarshaling. This means that all of the fields in that struct/map will be
|
||||
// pulled up one level and will become top-level fields rather than being fields in a nested document. For example,
|
||||
// if a map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will
|
||||
// be {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If
|
||||
// there are duplicated fields in the resulting document when an inlined struct is marshaled, the inlined field will
|
||||
// be overwritten. If there are duplicated fields in the resulting document when an inlined map is marshaled, an
|
||||
// error will be returned. This tag can be used with fields that are pointers to structs. If an inlined pointer field
|
||||
// is nil, it will not be marshaled. For fields that are not maps or structs, this tag is ignored.
|
||||
//
|
||||
// # Marshalling and Unmarshalling
|
||||
// # Marshaling and Unmarshaling
|
||||
//
|
||||
// Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions.
|
||||
// Manually marshaling and unmarshaling can be done with the Marshal and Unmarshal family of functions.
|
||||
//
|
||||
// [Work with BSON]: https://www.mongodb.com/docs/drivers/go/current/fundamentals/bson/
|
||||
package bson
|
||||
|
|
|
|||
|
|
@ -164,9 +164,6 @@ func (d Decimal128) BigInt() (*big.Int, int, error) {
|
|||
|
||||
// Would be handled by the logic below, but that's trivial and common.
|
||||
if high == 0 && low == 0 && exp == 0 {
|
||||
if posSign {
|
||||
return new(big.Int), 0, nil
|
||||
}
|
||||
return new(big.Int), 0, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ func processUniqueBytes() [5]byte {
|
|||
var b [5]byte
|
||||
_, err := io.ReadFull(rand.Reader, b[:])
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err))
|
||||
panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err))
|
||||
}
|
||||
|
||||
return b
|
||||
|
|
@ -193,7 +193,7 @@ func readRandomUint32() uint32 {
|
|||
var b [4]byte
|
||||
_, err := io.ReadFull(rand.Reader, b[:])
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err))
|
||||
panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err))
|
||||
}
|
||||
|
||||
return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
|
||||
|
|
|
|||
|
|
@ -2911,6 +2911,15 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
|
|||
fl = &cs.flow
|
||||
}
|
||||
if !fl.add(int32(f.Increment)) {
|
||||
// For stream, the sender sends RST_STREAM with an error code of FLOW_CONTROL_ERROR
|
||||
if cs != nil {
|
||||
rl.endStreamError(cs, StreamError{
|
||||
StreamID: f.StreamID,
|
||||
Code: ErrCodeFlowControl,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
cc.cond.Broadcast()
|
||||
|
|
|
|||
|
|
@ -15,22 +15,10 @@ Load passes most patterns directly to the underlying build tool.
|
|||
The default build tool is the go command.
|
||||
Its supported patterns are described at
|
||||
https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns.
|
||||
Other build systems may be supported by providing a "driver";
|
||||
see [The driver protocol].
|
||||
|
||||
Load may be used in Go projects that use alternative build systems, by
|
||||
installing an appropriate "driver" program for the build system and
|
||||
specifying its location in the GOPACKAGESDRIVER environment variable.
|
||||
For example,
|
||||
https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration
|
||||
explains how to use the driver for Bazel.
|
||||
The driver program is responsible for interpreting patterns in its
|
||||
preferred notation and reporting information about the packages that
|
||||
they identify.
|
||||
(See driverRequest and driverResponse types for the JSON
|
||||
schema used by the protocol.
|
||||
Though the protocol is supported, these types are currently unexported;
|
||||
see #64608 for a proposal to publish them.)
|
||||
|
||||
Regardless of driver, all patterns with the prefix "query=", where query is a
|
||||
All patterns with the prefix "query=", where query is a
|
||||
non-empty string of letters from [a-z], are reserved and may be
|
||||
interpreted as query operators.
|
||||
|
||||
|
|
@ -86,7 +74,29 @@ for details.
|
|||
Most tools should pass their command-line arguments (after any flags)
|
||||
uninterpreted to [Load], so that it can interpret them
|
||||
according to the conventions of the underlying build system.
|
||||
|
||||
See the Example function for typical usage.
|
||||
|
||||
# The driver protocol
|
||||
|
||||
[Load] may be used to load Go packages even in Go projects that use
|
||||
alternative build systems, by installing an appropriate "driver"
|
||||
program for the build system and specifying its location in the
|
||||
GOPACKAGESDRIVER environment variable.
|
||||
For example,
|
||||
https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration
|
||||
explains how to use the driver for Bazel.
|
||||
|
||||
The driver program is responsible for interpreting patterns in its
|
||||
preferred notation and reporting information about the packages that
|
||||
those patterns identify. Drivers must also support the special "file="
|
||||
and "pattern=" patterns described above.
|
||||
|
||||
The patterns are provided as positional command-line arguments. A
|
||||
JSON-encoded [DriverRequest] message providing additional information
|
||||
is written to the driver's standard input. The driver must write a
|
||||
JSON-encoded [DriverResponse] message to its standard output. (This
|
||||
message differs from the JSON schema produced by 'go list'.)
|
||||
*/
|
||||
package packages // import "golang.org/x/tools/go/packages"
|
||||
|
||||
|
|
|
|||
|
|
@ -2,12 +2,11 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file enables an external tool to intercept package requests.
|
||||
// If the tool is present then its results are used in preference to
|
||||
// the go list command.
|
||||
|
||||
package packages
|
||||
|
||||
// This file defines the protocol that enables an external "driver"
|
||||
// tool to supply package metadata in place of 'go list'.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
|
@ -17,31 +16,71 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// The Driver Protocol
|
||||
// DriverRequest defines the schema of a request for package metadata
|
||||
// from an external driver program. The JSON-encoded DriverRequest
|
||||
// message is provided to the driver program's standard input. The
|
||||
// query patterns are provided as command-line arguments.
|
||||
//
|
||||
// The driver, given the inputs to a call to Load, returns metadata about the packages specified.
|
||||
// This allows for different build systems to support go/packages by telling go/packages how the
|
||||
// packages' source is organized.
|
||||
// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in
|
||||
// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package
|
||||
// documentation in doc.go for the full description of the patterns that need to be supported.
|
||||
// A driver receives as a JSON-serialized driverRequest struct in standard input and will
|
||||
// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output.
|
||||
|
||||
// driverRequest is used to provide the portion of Load's Config that is needed by a driver.
|
||||
type driverRequest struct {
|
||||
// See the package documentation for an overview.
|
||||
type DriverRequest struct {
|
||||
Mode LoadMode `json:"mode"`
|
||||
|
||||
// Env specifies the environment the underlying build system should be run in.
|
||||
Env []string `json:"env"`
|
||||
|
||||
// BuildFlags are flags that should be passed to the underlying build system.
|
||||
BuildFlags []string `json:"build_flags"`
|
||||
|
||||
// Tests specifies whether the patterns should also return test packages.
|
||||
Tests bool `json:"tests"`
|
||||
|
||||
// Overlay maps file paths (relative to the driver's working directory) to the byte contents
|
||||
// of overlay files.
|
||||
Overlay map[string][]byte `json:"overlay"`
|
||||
}
|
||||
|
||||
// DriverResponse defines the schema of a response from an external
|
||||
// driver program, providing the results of a query for package
|
||||
// metadata. The driver program must write a JSON-encoded
|
||||
// DriverResponse message to its standard output.
|
||||
//
|
||||
// See the package documentation for an overview.
|
||||
type DriverResponse struct {
|
||||
// NotHandled is returned if the request can't be handled by the current
|
||||
// driver. If an external driver returns a response with NotHandled, the
|
||||
// rest of the DriverResponse is ignored, and go/packages will fallback
|
||||
// to the next driver. If go/packages is extended in the future to support
|
||||
// lists of multiple drivers, go/packages will fall back to the next driver.
|
||||
NotHandled bool
|
||||
|
||||
// Compiler and Arch are the arguments pass of types.SizesFor
|
||||
// to get a types.Sizes to use when type checking.
|
||||
Compiler string
|
||||
Arch string
|
||||
|
||||
// Roots is the set of package IDs that make up the root packages.
|
||||
// We have to encode this separately because when we encode a single package
|
||||
// we cannot know if it is one of the roots as that requires knowledge of the
|
||||
// graph it is part of.
|
||||
Roots []string `json:",omitempty"`
|
||||
|
||||
// Packages is the full set of packages in the graph.
|
||||
// The packages are not connected into a graph.
|
||||
// The Imports if populated will be stubs that only have their ID set.
|
||||
// Imports will be connected and then type and syntax information added in a
|
||||
// later pass (see refine).
|
||||
Packages []*Package
|
||||
|
||||
// GoVersion is the minor version number used by the driver
|
||||
// (e.g. the go command on the PATH) when selecting .go files.
|
||||
// Zero means unknown.
|
||||
GoVersion int
|
||||
}
|
||||
|
||||
// driver is the type for functions that query the build system for the
|
||||
// packages named by the patterns.
|
||||
type driver func(cfg *Config, patterns ...string) (*DriverResponse, error)
|
||||
|
||||
// findExternalDriver returns the file path of a tool that supplies
|
||||
// the build system package structure, or "" if not found."
|
||||
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
|
||||
|
|
@ -64,8 +103,8 @@ func findExternalDriver(cfg *Config) driver {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
return func(cfg *Config, words ...string) (*driverResponse, error) {
|
||||
req, err := json.Marshal(driverRequest{
|
||||
return func(cfg *Config, words ...string) (*DriverResponse, error) {
|
||||
req, err := json.Marshal(DriverRequest{
|
||||
Mode: cfg.Mode,
|
||||
Env: cfg.Env,
|
||||
BuildFlags: cfg.BuildFlags,
|
||||
|
|
@ -92,7 +131,7 @@ func findExternalDriver(cfg *Config) driver {
|
|||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr)
|
||||
}
|
||||
|
||||
var response driverResponse
|
||||
var response DriverResponse
|
||||
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,23 +35,23 @@ type goTooOldError struct {
|
|||
error
|
||||
}
|
||||
|
||||
// responseDeduper wraps a driverResponse, deduplicating its contents.
|
||||
// responseDeduper wraps a DriverResponse, deduplicating its contents.
|
||||
type responseDeduper struct {
|
||||
seenRoots map[string]bool
|
||||
seenPackages map[string]*Package
|
||||
dr *driverResponse
|
||||
dr *DriverResponse
|
||||
}
|
||||
|
||||
func newDeduper() *responseDeduper {
|
||||
return &responseDeduper{
|
||||
dr: &driverResponse{},
|
||||
dr: &DriverResponse{},
|
||||
seenRoots: map[string]bool{},
|
||||
seenPackages: map[string]*Package{},
|
||||
}
|
||||
}
|
||||
|
||||
// addAll fills in r with a driverResponse.
|
||||
func (r *responseDeduper) addAll(dr *driverResponse) {
|
||||
// addAll fills in r with a DriverResponse.
|
||||
func (r *responseDeduper) addAll(dr *DriverResponse) {
|
||||
for _, pkg := range dr.Packages {
|
||||
r.addPackage(pkg)
|
||||
}
|
||||
|
|
@ -128,7 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string {
|
|||
// goListDriver uses the go list command to interpret the patterns and produce
|
||||
// the build system package structure.
|
||||
// See driver for more details.
|
||||
func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||
func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) {
|
||||
// Make sure that any asynchronous go commands are killed when we return.
|
||||
parentCtx := cfg.Context
|
||||
if parentCtx == nil {
|
||||
|
|
@ -146,16 +146,18 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
|||
}
|
||||
|
||||
// Fill in response.Sizes asynchronously if necessary.
|
||||
var sizeserr error
|
||||
var sizeswg sync.WaitGroup
|
||||
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
|
||||
sizeswg.Add(1)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner)
|
||||
sizeserr = err
|
||||
response.dr.Compiler = compiler
|
||||
response.dr.Arch = arch
|
||||
sizeswg.Done()
|
||||
errCh <- err
|
||||
}()
|
||||
defer func() {
|
||||
if sizesErr := <-errCh; sizesErr != nil {
|
||||
err = sizesErr
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
|
|
@ -208,10 +210,7 @@ extractQueries:
|
|||
}
|
||||
}
|
||||
|
||||
sizeswg.Wait()
|
||||
if sizeserr != nil {
|
||||
return nil, sizeserr
|
||||
}
|
||||
// (We may yet return an error due to defer.)
|
||||
return response.dr, nil
|
||||
}
|
||||
|
||||
|
|
@ -266,7 +265,7 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries
|
|||
|
||||
// adhocPackage attempts to load or construct an ad-hoc package for a given
|
||||
// query, if the original call to the driver produced inadequate results.
|
||||
func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) {
|
||||
func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) {
|
||||
response, err := state.createDriverResponse(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -357,7 +356,7 @@ func otherFiles(p *jsonPackage) [][]string {
|
|||
|
||||
// createDriverResponse uses the "go list" command to expand the pattern
|
||||
// words and return a response for the specified packages.
|
||||
func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) {
|
||||
func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) {
|
||||
// go list uses the following identifiers in ImportPath and Imports:
|
||||
//
|
||||
// "p" -- importable package or main (command)
|
||||
|
|
@ -384,7 +383,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
|
|||
pkgs := make(map[string]*Package)
|
||||
additionalErrors := make(map[string][]Error)
|
||||
// Decode the JSON and convert it to Package form.
|
||||
response := &driverResponse{
|
||||
response := &DriverResponse{
|
||||
GoVersion: goVersion,
|
||||
}
|
||||
for dec := json.NewDecoder(buf); dec.More(); {
|
||||
|
|
|
|||
|
|
@ -206,43 +206,6 @@ type Config struct {
|
|||
Overlay map[string][]byte
|
||||
}
|
||||
|
||||
// driver is the type for functions that query the build system for the
|
||||
// packages named by the patterns.
|
||||
type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
|
||||
|
||||
// driverResponse contains the results for a driver query.
|
||||
type driverResponse struct {
|
||||
// NotHandled is returned if the request can't be handled by the current
|
||||
// driver. If an external driver returns a response with NotHandled, the
|
||||
// rest of the driverResponse is ignored, and go/packages will fallback
|
||||
// to the next driver. If go/packages is extended in the future to support
|
||||
// lists of multiple drivers, go/packages will fall back to the next driver.
|
||||
NotHandled bool
|
||||
|
||||
// Compiler and Arch are the arguments pass of types.SizesFor
|
||||
// to get a types.Sizes to use when type checking.
|
||||
Compiler string
|
||||
Arch string
|
||||
|
||||
// Roots is the set of package IDs that make up the root packages.
|
||||
// We have to encode this separately because when we encode a single package
|
||||
// we cannot know if it is one of the roots as that requires knowledge of the
|
||||
// graph it is part of.
|
||||
Roots []string `json:",omitempty"`
|
||||
|
||||
// Packages is the full set of packages in the graph.
|
||||
// The packages are not connected into a graph.
|
||||
// The Imports if populated will be stubs that only have their ID set.
|
||||
// Imports will be connected and then type and syntax information added in a
|
||||
// later pass (see refine).
|
||||
Packages []*Package
|
||||
|
||||
// GoVersion is the minor version number used by the driver
|
||||
// (e.g. the go command on the PATH) when selecting .go files.
|
||||
// Zero means unknown.
|
||||
GoVersion int
|
||||
}
|
||||
|
||||
// Load loads and returns the Go packages named by the given patterns.
|
||||
//
|
||||
// Config specifies loading options;
|
||||
|
|
@ -291,7 +254,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) {
|
|||
// no external driver, or the driver returns a response with NotHandled set,
|
||||
// defaultDriver will fall back to the go list driver.
|
||||
// The boolean result indicates that an external driver handled the request.
|
||||
func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) {
|
||||
func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) {
|
||||
if driver := findExternalDriver(cfg); driver != nil {
|
||||
response, err := driver(cfg, patterns...)
|
||||
if err != nil {
|
||||
|
|
@ -303,7 +266,10 @@ func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, erro
|
|||
}
|
||||
|
||||
response, err := goListDriver(cfg, patterns...)
|
||||
return response, false, err
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return response, false, nil
|
||||
}
|
||||
|
||||
// A Package describes a loaded Go package.
|
||||
|
|
@ -648,7 +614,7 @@ func newLoader(cfg *Config) *loader {
|
|||
|
||||
// refine connects the supplied packages into a graph and then adds type
|
||||
// and syntax information as requested by the LoadMode.
|
||||
func (ld *loader) refine(response *driverResponse) ([]*Package, error) {
|
||||
func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
||||
roots := response.Roots
|
||||
rootMap := make(map[string]int, len(roots))
|
||||
for i, root := range roots {
|
||||
|
|
|
|||
|
|
@ -224,6 +224,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte
|
|||
|
||||
// Gather the relevant packages from the manifest.
|
||||
items := make([]GetPackagesItem, r.uint64())
|
||||
uniquePkgPaths := make(map[string]bool)
|
||||
for i := range items {
|
||||
pkgPathOff := r.uint64()
|
||||
pkgPath := p.stringAt(pkgPathOff)
|
||||
|
|
@ -248,6 +249,12 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte
|
|||
}
|
||||
|
||||
items[i].nameIndex = nameIndex
|
||||
|
||||
uniquePkgPaths[pkgPath] = true
|
||||
}
|
||||
// Debugging #63822; hypothesis: there are duplicate PkgPaths.
|
||||
if len(uniquePkgPaths) != len(items) {
|
||||
reportf("found duplicate PkgPaths while reading export data manifest: %v", items)
|
||||
}
|
||||
|
||||
// Request packages all at once from the client,
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ github.com/containernetworking/cni/pkg/version
|
|||
# github.com/containernetworking/plugins v1.4.0
|
||||
## explicit; go 1.20
|
||||
github.com/containernetworking/plugins/pkg/ns
|
||||
# github.com/containers/image/v5 v5.29.3-0.20240227090231-5bef5e1e1506
|
||||
# github.com/containers/image/v5 v5.30.0
|
||||
## explicit; go 1.19
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
|
|
@ -182,7 +182,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7
|
|||
github.com/containers/ocicrypt/spec
|
||||
github.com/containers/ocicrypt/utils
|
||||
github.com/containers/ocicrypt/utils/keyprovider
|
||||
# github.com/containers/storage v1.52.1-0.20240227215008-a083950a778f
|
||||
# github.com/containers/storage v1.53.0
|
||||
## explicit; go 1.20
|
||||
github.com/containers/storage
|
||||
github.com/containers/storage/drivers
|
||||
|
|
@ -299,7 +299,7 @@ github.com/felixge/httpsnoop
|
|||
# github.com/fsnotify/fsnotify v1.7.0
|
||||
## explicit; go 1.17
|
||||
github.com/fsnotify/fsnotify
|
||||
# github.com/go-jose/go-jose/v3 v3.0.1
|
||||
# github.com/go-jose/go-jose/v3 v3.0.2
|
||||
## explicit; go 1.12
|
||||
github.com/go-jose/go-jose/v3
|
||||
github.com/go-jose/go-jose/v3/cipher
|
||||
|
|
@ -320,7 +320,7 @@ github.com/go-openapi/analysis/internal/flatten/operations
|
|||
github.com/go-openapi/analysis/internal/flatten/replace
|
||||
github.com/go-openapi/analysis/internal/flatten/schutils
|
||||
github.com/go-openapi/analysis/internal/flatten/sortref
|
||||
# github.com/go-openapi/errors v0.21.0
|
||||
# github.com/go-openapi/errors v0.21.1
|
||||
## explicit; go 1.19
|
||||
github.com/go-openapi/errors
|
||||
# github.com/go-openapi/jsonpointer v0.19.6
|
||||
|
|
@ -339,10 +339,10 @@ github.com/go-openapi/runtime
|
|||
# github.com/go-openapi/spec v0.20.9
|
||||
## explicit; go 1.13
|
||||
github.com/go-openapi/spec
|
||||
# github.com/go-openapi/strfmt v0.22.0
|
||||
# github.com/go-openapi/strfmt v0.22.2
|
||||
## explicit; go 1.19
|
||||
github.com/go-openapi/strfmt
|
||||
# github.com/go-openapi/swag v0.22.9
|
||||
# github.com/go-openapi/swag v0.22.10
|
||||
## explicit; go 1.19
|
||||
github.com/go-openapi/swag
|
||||
# github.com/go-openapi/validate v0.22.1
|
||||
|
|
@ -375,7 +375,7 @@ github.com/google/go-cmp/cmp/internal/diff
|
|||
github.com/google/go-cmp/cmp/internal/flags
|
||||
github.com/google/go-cmp/cmp/internal/function
|
||||
github.com/google/go-cmp/cmp/internal/value
|
||||
# github.com/google/go-containerregistry v0.17.0
|
||||
# github.com/google/go-containerregistry v0.19.0
|
||||
## explicit; go 1.18
|
||||
github.com/google/go-containerregistry/pkg/name
|
||||
github.com/google/go-containerregistry/pkg/v1
|
||||
|
|
@ -386,7 +386,7 @@ github.com/google/go-intervals/intervalset
|
|||
# github.com/google/pprof v0.0.0-20230323073829-e72429f035bd
|
||||
## explicit; go 1.19
|
||||
github.com/google/pprof/profile
|
||||
# github.com/google/uuid v1.5.0
|
||||
# github.com/google/uuid v1.6.0
|
||||
## explicit
|
||||
github.com/google/uuid
|
||||
# github.com/gorilla/mux v1.8.1
|
||||
|
|
@ -581,7 +581,7 @@ github.com/sigstore/fulcio/pkg/certificate
|
|||
# github.com/sigstore/rekor v1.2.2
|
||||
## explicit; go 1.19
|
||||
github.com/sigstore/rekor/pkg/generated/models
|
||||
# github.com/sigstore/sigstore v1.8.1
|
||||
# github.com/sigstore/sigstore v1.8.2
|
||||
## explicit; go 1.20
|
||||
github.com/sigstore/sigstore/pkg/cryptoutils
|
||||
github.com/sigstore/sigstore/pkg/signature
|
||||
|
|
@ -642,8 +642,8 @@ github.com/vishvananda/netns
|
|||
# go.etcd.io/bbolt v1.3.9
|
||||
## explicit; go 1.17
|
||||
go.etcd.io/bbolt
|
||||
# go.mongodb.org/mongo-driver v1.13.1
|
||||
## explicit; go 1.13
|
||||
# go.mongodb.org/mongo-driver v1.14.0
|
||||
## explicit; go 1.18
|
||||
go.mongodb.org/mongo-driver/bson
|
||||
go.mongodb.org/mongo-driver/bson/bsoncodec
|
||||
go.mongodb.org/mongo-driver/bson/bsonoptions
|
||||
|
|
@ -710,15 +710,15 @@ golang.org/x/crypto/ssh
|
|||
golang.org/x/crypto/ssh/agent
|
||||
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
||||
golang.org/x/crypto/ssh/knownhosts
|
||||
# golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
|
||||
# golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||
## explicit; go 1.20
|
||||
golang.org/x/exp/constraints
|
||||
golang.org/x/exp/maps
|
||||
golang.org/x/exp/slices
|
||||
# golang.org/x/mod v0.14.0
|
||||
# golang.org/x/mod v0.15.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.21.0
|
||||
# golang.org/x/net v0.22.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/html
|
||||
|
|
@ -766,7 +766,7 @@ golang.org/x/text/secure/bidirule
|
|||
golang.org/x/text/transform
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
# golang.org/x/tools v0.17.0
|
||||
# golang.org/x/tools v0.18.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/tools/cmd/stringer
|
||||
golang.org/x/tools/go/ast/inspector
|
||||
|
|
|
|||
Loading…
Reference in New Issue