fix(deps): update github.com/containers/image/v5 digest to 1b221d4
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
parent
64b69126bd
commit
a10ef86dd9
|
|
@ -7,9 +7,9 @@ require (
|
|||
github.com/containerd/containerd v1.7.11
|
||||
github.com/containernetworking/cni v1.1.2
|
||||
github.com/containernetworking/plugins v1.4.0
|
||||
github.com/containers/image/v5 v5.29.1-0.20231120202631-293b00ba7166
|
||||
github.com/containers/image/v5 v5.29.1-0.20231221164234-1b221d4a9c28
|
||||
github.com/containers/ocicrypt v1.1.9
|
||||
github.com/containers/storage v1.51.1-0.20231120144510-2cf61989a5bc
|
||||
github.com/containers/storage v1.51.1-0.20231205203947-fe005407c7d5
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/cyphar/filepath-securejoin v0.2.4
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
|
|
@ -69,37 +69,37 @@ require (
|
|||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.4 // indirect
|
||||
github.com/go-openapi/errors v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/loads v0.21.2 // indirect
|
||||
github.com/go-openapi/runtime v0.26.0 // indirect
|
||||
github.com/go-openapi/spec v0.20.9 // indirect
|
||||
github.com/go-openapi/strfmt v0.21.7 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-openapi/strfmt v0.21.10 // indirect
|
||||
github.com/go-openapi/swag v0.22.5 // indirect
|
||||
github.com/go-openapi/validate v0.22.1 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-containerregistry v0.16.1 // indirect
|
||||
github.com/google/go-containerregistry v0.17.0 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/pprof v0.0.0-20230323073829-e72429f035bd // indirect
|
||||
github.com/google/uuid v1.3.1 // indirect
|
||||
github.com/google/uuid v1.5.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.3 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.18 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.19 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
|
|
@ -112,24 +112,24 @@ require (
|
|||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.11.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/sigstore/fulcio v1.4.3 // indirect
|
||||
github.com/sigstore/rekor v1.2.2 // indirect
|
||||
github.com/sigstore/sigstore v1.7.5 // indirect
|
||||
github.com/sigstore/sigstore v1.8.0 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.15.0 // indirect
|
||||
github.com/sylabs/sif/v2 v2.15.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.11 // indirect
|
||||
github.com/vbatts/tar-split v0.11.5 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.6.2 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.7.1 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
go.mongodb.org/mongo-driver v1.11.3 // indirect
|
||||
go.mongodb.org/mongo-driver v1.13.1 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect
|
||||
golang.org/x/mod v0.13.0 // indirect
|
||||
golang.org/x/net v0.18.0 // indirect
|
||||
golang.org/x/net v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.14.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect
|
||||
|
|
|
|||
|
|
@ -50,14 +50,14 @@ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl3
|
|||
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
|
||||
github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic=
|
||||
github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0=
|
||||
github.com/containers/image/v5 v5.29.1-0.20231120202631-293b00ba7166 h1:Dz4ryT8VDKn6U+oWPtsihAV2eG7uFc+LYS7UjHjLcwk=
|
||||
github.com/containers/image/v5 v5.29.1-0.20231120202631-293b00ba7166/go.mod h1:0uOgAiVgmF8+VCXltRYmncWjkDYc+jFma49NKNz0cS4=
|
||||
github.com/containers/image/v5 v5.29.1-0.20231221164234-1b221d4a9c28 h1:dI4/9x4Oh8SWEKIP8KcwoCFUWDO8jHbbfLhaFr20R/Y=
|
||||
github.com/containers/image/v5 v5.29.1-0.20231221164234-1b221d4a9c28/go.mod h1:LC9m+8ED9+Vuw2WSd/mgvrHbi/44WJj/XBDNdiZC0AY=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM=
|
||||
github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
|
||||
github.com/containers/storage v1.51.1-0.20231120144510-2cf61989a5bc h1:K+fKkKkqwwY3YYM+RejJ6OcbCRZfDRZLoKsMMBAT2Bw=
|
||||
github.com/containers/storage v1.51.1-0.20231120144510-2cf61989a5bc/go.mod h1:oz9n9uia9xtxDQhw7nnlpMID5YKbHmMZsVFy4rR+5+s=
|
||||
github.com/containers/storage v1.51.1-0.20231205203947-fe005407c7d5 h1:eiCkAt+i9BYRjR7KEKPI3iORCSABhY+spM/w8BkI2lo=
|
||||
github.com/containers/storage v1.51.1-0.20231205203947-fe005407c7d5/go.mod h1:pMhG1O3eMGlQKpuEuv7ves+K3BsK8/UJs8ctV5fEaoI=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
|
|
@ -89,9 +89,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
|
|||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
|
||||
github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
|
||||
github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
|
||||
github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
|
|
@ -107,8 +104,8 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy
|
|||
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M=
|
||||
github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
|
||||
github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=
|
||||
github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
|
|
@ -129,14 +126,14 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6
|
|||
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
|
||||
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
|
||||
github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k=
|
||||
github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew=
|
||||
github.com/go-openapi/strfmt v0.21.10 h1:JIsly3KXZB/Qf4UzvzJpg4OELH/0ASDQsyk//TTBDDk=
|
||||
github.com/go-openapi/strfmt v0.21.10/go.mod h1:vNDMwbilnl7xKiO/Ve/8H8Bb2JIInBnH+lqiw6QWgis=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.5 h1:fVS63IE3M0lsuWRzuom3RLwUMVI2peDH01s6M70ugys=
|
||||
github.com/go-openapi/swag v0.22.5/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0=
|
||||
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
|
||||
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
|
|
@ -204,8 +201,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ=
|
||||
github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
|
||||
github.com/google/go-containerregistry v0.17.0 h1:5p+zYs/R4VGHkhyvgWurWrpJ2hW4Vv9fQI+GzdcwXLk=
|
||||
github.com/google/go-containerregistry v0.17.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
|
||||
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
|
||||
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
|
@ -215,8 +212,8 @@ github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHa
|
|||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
|
||||
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
|
|
@ -224,8 +221,6 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY
|
|||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc=
|
||||
github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
|
|
@ -233,7 +228,7 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
|
|||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
|
||||
github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
|
||||
github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=
|
||||
github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
|
|
@ -244,8 +239,8 @@ github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0Lh
|
|||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
|
||||
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
|
@ -259,8 +254,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I=
|
||||
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0=
|
||||
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e h1:RLTpX495BXToqxpM90Ws4hXEo4Wfh81jr9DX1n/4WOo=
|
||||
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e/go.mod h1:EAuqr9VFWxBi9nD5jc/EA2MT1RFty9288TF6zdtYoCU=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
|
|
@ -274,8 +269,8 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ
|
|||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
|
||||
github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI=
|
||||
github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
|
||||
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
|
|
@ -354,15 +349,15 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
|||
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
|
||||
github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
|
||||
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ=
|
||||
github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
|
||||
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
|
||||
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
|
||||
github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48=
|
||||
github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s=
|
||||
github.com/sigstore/sigstore v1.8.0 h1:sSRWXv1JiDsK4T2wNWVYcvKCgxcSrhQ/QUJxsfCO4OM=
|
||||
github.com/sigstore/sigstore v1.8.0/go.mod h1:l12B1gFlLIpBIVeqk/q1Lb+6YSOGNuN3xLExIjYH+qc=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
|
|
@ -390,34 +385,33 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
|||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw=
|
||||
github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc=
|
||||
github.com/sylabs/sif/v2 v2.15.1 h1:75BcunPOY11fVhe02/WHuNLTfDd3OHH0ex0MuuNMYX0=
|
||||
github.com/sylabs/sif/v2 v2.15.1/go.mod h1:YiwCUdZOhiohnPbyxuxvCZa+03HwAaiC+vfAKZPR8nQ=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
|
||||
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
|
||||
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
|
||||
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
|
||||
github.com/vbauerster/mpb/v8 v8.6.2 h1:9EhnJGQRtvgDVCychJgR96EDCOqgg2NsMuk5JUcX4DA=
|
||||
github.com/vbauerster/mpb/v8 v8.6.2/go.mod h1:oVJ7T+dib99kZ/VBjoBaC8aPXiSAihnzuKmotuihyFo=
|
||||
github.com/vbauerster/mpb/v8 v8.7.1 h1:bQoSMMTFAg/gjsLrBYmO8gbRcZt7aDq6WI2IMa9BTqM=
|
||||
github.com/vbauerster/mpb/v8 v8.7.1/go.mod h1:fWgXcAu4W+0cBSUh4ZlaKJyC2KtgU27ZSTaiIk0QNsQ=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
||||
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
|
||||
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
||||
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
|
||||
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
|
||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
|
|
@ -430,12 +424,14 @@ go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
|||
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
|
||||
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
|
||||
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
|
||||
go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y=
|
||||
go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
|
||||
go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
|
||||
go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
|
||||
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
|
|
@ -476,8 +472,8 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
|
|||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
|
||||
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
|
||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
@ -531,7 +527,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
|
|
@ -582,7 +580,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
|||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
|||
|
|
@ -6,8 +6,10 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
internalManifest "github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -19,8 +21,8 @@ import (
|
|||
// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
|
||||
var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType}
|
||||
|
||||
// ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption.
|
||||
var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest}
|
||||
// allManifestMIMETypes lists all possible manifest MIME types.
|
||||
var allManifestMIMETypes = []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType}
|
||||
|
||||
// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
|
||||
type orderedSet struct {
|
||||
|
|
@ -52,6 +54,7 @@ type determineManifestConversionInputs struct {
|
|||
destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes()
|
||||
|
||||
forceManifestMIMEType string // User’s choice of forced manifest MIME type
|
||||
requestedCompressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user _explictily_ requested one.
|
||||
requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption
|
||||
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
|
||||
}
|
||||
|
|
@ -80,41 +83,74 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
|
|||
destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
|
||||
}
|
||||
|
||||
restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat)
|
||||
if len(destSupportedManifestMIMETypes) == 0 {
|
||||
if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) {
|
||||
if (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) &&
|
||||
(!restrictiveCompressionRequired || internalManifest.MIMETypeSupportsCompressionAlgorithm(srcType, *in.requestedCompressionFormat)) {
|
||||
return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
|
||||
preferredMIMEType: srcType,
|
||||
otherMIMETypeCandidates: []string{},
|
||||
}, nil
|
||||
}
|
||||
destSupportedManifestMIMETypes = ociEncryptionMIMETypes
|
||||
destSupportedManifestMIMETypes = allManifestMIMETypes
|
||||
}
|
||||
supportedByDest := set.New[string]()
|
||||
for _, t := range destSupportedManifestMIMETypes {
|
||||
if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) {
|
||||
if in.requiresOCIEncryption && !manifest.MIMETypeSupportsEncryption(t) {
|
||||
continue
|
||||
}
|
||||
if restrictiveCompressionRequired && !internalManifest.MIMETypeSupportsCompressionAlgorithm(t, *in.requestedCompressionFormat) {
|
||||
continue
|
||||
}
|
||||
supportedByDest.Add(t)
|
||||
}
|
||||
}
|
||||
if supportedByDest.Empty() {
|
||||
if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes
|
||||
if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by allManifestMIMETypes
|
||||
return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty")
|
||||
}
|
||||
// We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved.
|
||||
if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption.
|
||||
return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting")
|
||||
}
|
||||
// We know, and have verified, that destSupportedManifestMIMETypes is not empty, so some filtering of supported MIME types must have been involved.
|
||||
|
||||
// destSupportedManifestMIMETypes has three possible origins:
|
||||
if in.forceManifestMIMEType != "" { // 1. forceManifestType specified
|
||||
switch {
|
||||
case in.requiresOCIEncryption && restrictiveCompressionRequired:
|
||||
return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required together with format %s, which does not support both",
|
||||
in.requestedCompressionFormat.Name(), in.forceManifestMIMEType)
|
||||
case in.requiresOCIEncryption:
|
||||
return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption",
|
||||
in.forceManifestMIMEType)
|
||||
case restrictiveCompressionRequired:
|
||||
return manifestConversionPlan{}, fmt.Errorf("compression using %s required together with format %s, which does not support it",
|
||||
in.requestedCompressionFormat.Name(), in.forceManifestMIMEType)
|
||||
default:
|
||||
return manifestConversionPlan{}, errors.New("internal error: forceManifestMIMEType was rejected for an unknown reason")
|
||||
}
|
||||
if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes
|
||||
// Coverage: This should never happen, ociEncryptionMIMETypes all support encryption
|
||||
}
|
||||
if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen allManifestTypes
|
||||
if !restrictiveCompressionRequired {
|
||||
// Coverage: This should never happen.
|
||||
// If we have not rejected for encryption reasons, we must have rejected due to encryption, but
|
||||
// allManifestTypes includes OCI, which supports encryption.
|
||||
return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well")
|
||||
}
|
||||
// 3. destination does not support encryption.
|
||||
// This can legitimately happen when the user asks for completely unsupported formats like Bzip2 or Xz.
|
||||
return manifestConversionPlan{}, fmt.Errorf("compression using %s required, but none of the known manifest formats support it", in.requestedCompressionFormat.Name())
|
||||
}
|
||||
// 3. destination accepts a restricted list of mime types
|
||||
destMIMEList := strings.Join(destSupportedManifestMIMETypes, ", ")
|
||||
switch {
|
||||
case in.requiresOCIEncryption && restrictiveCompressionRequired:
|
||||
return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required but the destination only supports MIME types [%s], none of which support both",
|
||||
in.requestedCompressionFormat.Name(), destMIMEList)
|
||||
case in.requiresOCIEncryption:
|
||||
return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption",
|
||||
strings.Join(destSupportedManifestMIMETypes, ", "))
|
||||
destMIMEList)
|
||||
case restrictiveCompressionRequired:
|
||||
return manifestConversionPlan{}, fmt.Errorf("compression using %s required but the destination only supports MIME types [%s], none of which support it",
|
||||
in.requestedCompressionFormat.Name(), destMIMEList)
|
||||
default: // Coverage: This should never happen, we only filter for in.requiresOCIEncryption || restrictiveCompressionRequired
|
||||
return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and we are neither encrypting nor requiring a restrictive compression algorithm")
|
||||
}
|
||||
}
|
||||
|
||||
// destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types.
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import (
|
|||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -167,6 +168,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
|||
srcMIMEType: ic.src.ManifestMIMEType,
|
||||
destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
|
||||
forceManifestMIMEType: c.options.ForceManifestMIMEType,
|
||||
requestedCompressionFormat: ic.compressionFormat,
|
||||
requiresOCIEncryption: destRequiresOciEncryption,
|
||||
cannotModifyManifestReason: ic.cannotModifyManifestReason,
|
||||
})
|
||||
|
|
@ -693,6 +695,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||
requiredCompression = ic.compressionFormat
|
||||
originalCompression = srcInfo.CompressionAlgorithm
|
||||
}
|
||||
|
||||
// Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest.
|
||||
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
|
||||
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
|
||||
Cache: ic.c.blobInfoCache,
|
||||
CanSubstitute: canSubstitute,
|
||||
|
|
@ -701,6 +710,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||
SrcRef: srcRef,
|
||||
RequiredCompression: requiredCompression,
|
||||
OriginalCompression: originalCompression,
|
||||
TOCDigest: tocDigest,
|
||||
})
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
|
||||
|
|
|
|||
|
|
@ -123,6 +123,9 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
|
|||
if !ok {
|
||||
return "", errors.New("ref must be a dockerReference")
|
||||
}
|
||||
if dr.isUnknownDigest {
|
||||
return "", fmt.Errorf("docker: reference %q is for unknown digest case; cannot get digest", dr.StringWithinTransport())
|
||||
}
|
||||
|
||||
tagOrDigest, err := dr.tagOrDigest()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -452,7 +452,15 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
||||
func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
|
||||
var refTail string
|
||||
if instanceDigest != nil {
|
||||
// If d.ref.isUnknownDigest=true, then we push without a tag, so get the
|
||||
// digest that will be used
|
||||
if d.ref.isUnknownDigest {
|
||||
digest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
refTail = digest.String()
|
||||
} else if instanceDigest != nil {
|
||||
// If the instanceDigest is provided, then use it as the refTail, because the reference,
|
||||
// whether it includes a tag or a digest, refers to the list as a whole, and not this
|
||||
// particular instance.
|
||||
|
|
|
|||
|
|
@ -38,8 +38,8 @@ type dockerImageSource struct {
|
|||
impl.DoesNotAffectLayerInfosForCopy
|
||||
stubs.ImplementsGetBlobAt
|
||||
|
||||
logicalRef dockerReference // The reference the user requested.
|
||||
physicalRef dockerReference // The actual reference we are accessing (possibly a mirror)
|
||||
logicalRef dockerReference // The reference the user requested. This must satisfy !isUnknownDigest
|
||||
physicalRef dockerReference // The actual reference we are accessing (possibly a mirror). This must satisfy !isUnknownDigest
|
||||
c *dockerClient
|
||||
// State
|
||||
cachedManifest []byte // nil if not loaded yet
|
||||
|
|
@ -48,7 +48,12 @@ type dockerImageSource struct {
|
|||
|
||||
// newImageSource creates a new ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
// The caller must ensure !ref.isUnknownDigest.
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
|
||||
if ref.isUnknownDigest {
|
||||
return nil, fmt.Errorf("reading images from docker: reference %q without a tag or digest is not supported", ref.StringWithinTransport())
|
||||
}
|
||||
|
||||
registryConfig, err := loadRegistryConfiguration(sys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -121,7 +126,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
|||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource,
|
||||
registryConfig *registryConfiguration) (*dockerImageSource, error) {
|
||||
physicalRef, err := newReference(pullSource.Reference)
|
||||
physicalRef, err := newReference(pullSource.Reference, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -591,6 +596,10 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con
|
|||
|
||||
// deleteImage deletes the named image from the registry, if supported.
|
||||
func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error {
|
||||
if ref.isUnknownDigest {
|
||||
return fmt.Errorf("Docker reference without a tag or digest cannot be deleted")
|
||||
}
|
||||
|
||||
registryConfig, err := loadRegistryConfiguration(sys)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -12,6 +12,11 @@ import (
|
|||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// UnknownDigestSuffix can be appended to a reference when the caller
|
||||
// wants to push an image without a tag or digest.
|
||||
// NewReferenceUnknownDigest() is called when this const is detected.
|
||||
const UnknownDigestSuffix = "@@unknown-digest@@"
|
||||
|
||||
func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
|
@ -43,7 +48,8 @@ func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error {
|
|||
|
||||
// dockerReference is an ImageReference for Docker images.
|
||||
type dockerReference struct {
|
||||
ref reference.Named // By construction we know that !reference.IsNameOnly(ref)
|
||||
ref reference.Named // By construction we know that !reference.IsNameOnly(ref) unless isUnknownDigest=true
|
||||
isUnknownDigest bool
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
|
|
@ -51,23 +57,46 @@ func ParseReference(refString string) (types.ImageReference, error) {
|
|||
if !strings.HasPrefix(refString, "//") {
|
||||
return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
|
||||
}
|
||||
// Check if ref has UnknownDigestSuffix suffixed to it
|
||||
unknownDigest := false
|
||||
if strings.HasSuffix(refString, UnknownDigestSuffix) {
|
||||
unknownDigest = true
|
||||
refString = strings.TrimSuffix(refString, UnknownDigestSuffix)
|
||||
}
|
||||
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if unknownDigest {
|
||||
if !reference.IsNameOnly(ref) {
|
||||
return nil, fmt.Errorf("docker: image reference %q has unknown digest set but it contains either a tag or digest", ref.String()+UnknownDigestSuffix)
|
||||
}
|
||||
return NewReferenceUnknownDigest(ref)
|
||||
}
|
||||
|
||||
ref = reference.TagNameOnly(ref)
|
||||
return NewReference(ref)
|
||||
}
|
||||
|
||||
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
|
||||
func NewReference(ref reference.Named) (types.ImageReference, error) {
|
||||
return newReference(ref)
|
||||
return newReference(ref, false)
|
||||
}
|
||||
|
||||
// NewReferenceUnknownDigest returns a Docker reference for a named reference, which can be used to write images without setting
|
||||
// a tag on the registry. The reference must satisfy reference.IsNameOnly()
|
||||
func NewReferenceUnknownDigest(ref reference.Named) (types.ImageReference, error) {
|
||||
return newReference(ref, true)
|
||||
}
|
||||
|
||||
// newReference returns a dockerReference for a named reference.
|
||||
func newReference(ref reference.Named) (dockerReference, error) {
|
||||
if reference.IsNameOnly(ref) {
|
||||
return dockerReference{}, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
func newReference(ref reference.Named, unknownDigest bool) (dockerReference, error) {
|
||||
if reference.IsNameOnly(ref) && !unknownDigest {
|
||||
return dockerReference{}, fmt.Errorf("Docker reference %s is not for an unknown digest case; tag or digest is needed", reference.FamiliarString(ref))
|
||||
}
|
||||
if !reference.IsNameOnly(ref) && unknownDigest {
|
||||
return dockerReference{}, fmt.Errorf("Docker reference %s is for an unknown digest case but reference has a tag or digest", reference.FamiliarString(ref))
|
||||
}
|
||||
// A github.com/distribution/reference value can have a tag and a digest at the same time!
|
||||
// The docker/distribution API does not really support that (we can’t ask for an image with a specific
|
||||
|
|
@ -82,6 +111,7 @@ func newReference(ref reference.Named) (dockerReference, error) {
|
|||
|
||||
return dockerReference{
|
||||
ref: ref,
|
||||
isUnknownDigest: unknownDigest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -95,7 +125,11 @@ func (ref dockerReference) Transport() types.ImageTransport {
|
|||
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref dockerReference) StringWithinTransport() string {
|
||||
return "//" + reference.FamiliarString(ref.ref)
|
||||
famString := "//" + reference.FamiliarString(ref.ref)
|
||||
if ref.isUnknownDigest {
|
||||
return famString + UnknownDigestSuffix
|
||||
}
|
||||
return famString
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
|
|
@ -113,6 +147,9 @@ func (ref dockerReference) DockerReference() reference.Named {
|
|||
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
|
||||
// Returns "" if configuration identities for these references are not supported.
|
||||
func (ref dockerReference) PolicyConfigurationIdentity() string {
|
||||
if ref.isUnknownDigest {
|
||||
return ref.ref.Name()
|
||||
}
|
||||
res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
|
||||
if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
|
||||
panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
|
||||
|
|
@ -126,7 +163,13 @@ func (ref dockerReference) PolicyConfigurationIdentity() string {
|
|||
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
|
||||
// and each following element to be a prefix of the element preceding it.
|
||||
func (ref dockerReference) PolicyConfigurationNamespaces() []string {
|
||||
return policyconfiguration.DockerReferenceNamespaces(ref.ref)
|
||||
namespaces := policyconfiguration.DockerReferenceNamespaces(ref.ref)
|
||||
if ref.isUnknownDigest {
|
||||
if len(namespaces) != 0 && namespaces[0] == ref.ref.Name() {
|
||||
namespaces = namespaces[1:]
|
||||
}
|
||||
}
|
||||
return namespaces
|
||||
}
|
||||
|
||||
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
|
||||
|
|
@ -163,6 +206,10 @@ func (ref dockerReference) tagOrDigest() (string, error) {
|
|||
if ref, ok := ref.ref.(reference.NamedTagged); ok {
|
||||
return ref.Tag(), nil
|
||||
}
|
||||
|
||||
if ref.isUnknownDigest {
|
||||
return "", fmt.Errorf("Docker reference %q is for an unknown digest case, has neither a digest nor a tag", reference.FamiliarString(ref.ref))
|
||||
}
|
||||
// This should not happen, NewReference above refuses reference.IsNameOnly values.
|
||||
return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ type BlobInfoCache2 interface {
|
|||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
// up variants of the blob which have the same uncompressed digest.
|
||||
//
|
||||
|
|
|
|||
2
common/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
2
common/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
|
|
@ -28,7 +28,7 @@ type wrapped struct {
|
|||
//
|
||||
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
|
||||
// with public methods, or perhaps a private interface), so that we can add methods
|
||||
// without breaking any external implementors of a public interface.
|
||||
// without breaking any external implementers of a public interface.
|
||||
func FromPublic(dest types.ImageDestination) private.ImageDestination {
|
||||
if dest2, ok := dest.(private.ImageDestination); ok {
|
||||
return dest2
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ type wrapped struct {
|
|||
//
|
||||
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
|
||||
// with public methods, or perhaps a private interface), so that we can add methods
|
||||
// without breaking any external implementors of a public interface.
|
||||
// without breaking any external implementers of a public interface.
|
||||
func FromPublic(src types.ImageSource) private.ImageSource {
|
||||
if src2, ok := src.(private.ImageSource); ok {
|
||||
return src2
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package manifest
|
|||
import (
|
||||
"encoding/json"
|
||||
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/libtrust"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
|
@ -14,7 +15,7 @@ import (
|
|||
const (
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
|
||||
DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
|
||||
// DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature
|
||||
DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
||||
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
|
||||
DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
|
||||
|
|
@ -165,3 +166,26 @@ func NormalizedMIMEType(input string) string {
|
|||
return DockerV2Schema1SignedMediaType
|
||||
}
|
||||
}
|
||||
|
||||
// CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values.
|
||||
func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool {
|
||||
switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
case compressiontypes.GzipAlgorithmName:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// MIMETypeSupportsCompressionAlgorithm returns true if mimeType can represent algo.
|
||||
func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes.Algorithm) bool {
|
||||
if CompressionAlgorithmIsUniversallySupported(algo) {
|
||||
return true
|
||||
}
|
||||
switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName:
|
||||
return mimeType == imgspecv1.MediaTypeImageManifest
|
||||
default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -117,6 +117,7 @@ type TryReusingBlobOptions struct {
|
|||
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
|
||||
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
|
||||
SrcRef reference.Named // A reference to the source image that contains the input blob.
|
||||
TOCDigest *digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest.
|
||||
}
|
||||
|
||||
// ReusedBlob is information about a blob reused in a destination.
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
|
|
@ -142,6 +143,15 @@ func (m *Schema1) LayerInfos() []LayerInfo {
|
|||
return layers
|
||||
}
|
||||
|
||||
const fakeSchema1MIMEType = DockerV2Schema2LayerMediaType // Used only in schema1CompressionMIMETypeSets
|
||||
var schema1CompressionMIMETypeSets = []compressionMIMETypeSet{
|
||||
{
|
||||
mtsUncompressed: fakeSchema1MIMEType,
|
||||
compressiontypes.GzipAlgorithmName: fakeSchema1MIMEType,
|
||||
compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
|
||||
},
|
||||
}
|
||||
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
||||
func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
// Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well.
|
||||
|
|
@ -150,6 +160,11 @@ func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|||
}
|
||||
m.FSLayers = make([]Schema1FSLayers, len(layerInfos))
|
||||
for i, info := range layerInfos {
|
||||
// There are no MIME types in schema1, but we do a “conversion” here to reject unsupported compression algorithms,
|
||||
// in a way that is consistent with the other schema implementations.
|
||||
if _, err := updatedMIMEType(schema1CompressionMIMETypeSets, fakeSchema1MIMEType, info); err != nil {
|
||||
return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err)
|
||||
}
|
||||
// (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest,
|
||||
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
|
||||
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import (
|
|||
const (
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
|
||||
DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
|
||||
// DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature
|
||||
DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType
|
||||
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
|
||||
DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
ociencspec "github.com/containers/ocicrypt/spec"
|
||||
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
|
@ -235,7 +236,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
|
|||
}
|
||||
|
||||
// ImageID computes an ID which can uniquely identify this image by its contents.
|
||||
func (m *OCI1) ImageID([]digest.Digest) (string, error) {
|
||||
func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
|
||||
// The way m.Config.Digest “uniquely identifies” an image is
|
||||
// by containing RootFS.DiffIDs, which identify the layers of the image.
|
||||
// For non-image artifacts, the we can’t expect the config to change
|
||||
|
|
@ -259,9 +260,44 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) {
|
|||
if err := m.Config.Digest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If there is any layer that is using partial content, we calculate the image ID
|
||||
// in a different way since the diffID cannot be validated as for regular pulled images.
|
||||
for _, layer := range m.Layers {
|
||||
toc, err := chunkedToc.GetTOCDigest(layer.Annotations)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err)
|
||||
}
|
||||
if toc != nil {
|
||||
return m.calculateImageIDForPartialImage(diffIDs)
|
||||
}
|
||||
}
|
||||
|
||||
return m.Config.Digest.Hex(), nil
|
||||
}
|
||||
|
||||
func (m *OCI1) calculateImageIDForPartialImage(diffIDs []digest.Digest) (string, error) {
|
||||
newID := digest.Canonical.Digester()
|
||||
for i, layer := range m.Layers {
|
||||
diffID := diffIDs[i]
|
||||
_, err := newID.Hash().Write([]byte(diffID.Hex()))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing diffID %q: %w", diffID, err)
|
||||
}
|
||||
toc, err := chunkedToc.GetTOCDigest(layer.Annotations)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err)
|
||||
}
|
||||
if toc != nil {
|
||||
_, err = newID.Hash().Write([]byte(toc.Hex()))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing TOC %q: %w", toc, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return newID.Digest().Hex(), nil
|
||||
}
|
||||
|
||||
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
|
||||
// (and the code can handle that).
|
||||
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
|
@ -169,10 +170,15 @@ func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedTopleve
|
|||
// tar converts the directory at src and saves it to dst
|
||||
func tarDirectory(src, dst string) error {
|
||||
// input is a stream of bytes from the archive of the directory at path
|
||||
input, err := archive.Tar(src, archive.Uncompressed)
|
||||
input, err := archive.TarWithOptions(src, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
// Don’t include the data about the user account this code is running under.
|
||||
ChownOpts: &idtools.IDPair{UID: 0, GID: 0},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err)
|
||||
}
|
||||
defer input.Close()
|
||||
|
||||
// creates the tar file
|
||||
outFile, err := os.Create(dst)
|
||||
|
|
|
|||
|
|
@ -91,11 +91,11 @@ func min(a, b int) int {
|
|||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
|
||||
// number of entries to limit for known and unknown location separately, only to make testing simpler.
|
||||
// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original
|
||||
// TODO: following function is not destructive any more in the nature instead prioritized result is actually copies of the original
|
||||
// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
|
||||
// split unknown candidates and known candidates
|
||||
// and limit them seperately.
|
||||
// and limit them separately.
|
||||
var knownLocationCandidates []CandidateWithTime
|
||||
var unknownLocationCandidates []CandidateWithTime
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
|
|
|
|||
2
common/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
2
common/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
|
|
@ -184,7 +184,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types
|
|||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
|
|
|
|||
4
common/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
4
common/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
|
|
@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
|||
|
||||
// dbTransaction calls fn within a read-write transaction in db.
|
||||
func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
||||
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion.
|
||||
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive discussion.
|
||||
|
||||
var zeroRes T // A zero value of T
|
||||
|
||||
|
|
@ -496,7 +496,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
|||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
// up variants of the blob which have the same uncompressed digest.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
//go:build !containers_image_fulcio_stub
|
||||
// +build !containers_image_fulcio_stub
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
|
|
|
|||
28
common/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
Normal file
28
common/vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
//go:build containers_image_fulcio_stub
|
||||
// +build containers_image_fulcio_stub
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type fulcioTrustRoot struct {
|
||||
caCertificates *x509.CertPool
|
||||
oidcIssuer string
|
||||
subjectEmail string
|
||||
}
|
||||
|
||||
func (f *fulcioTrustRoot) validate() error {
|
||||
return errors.New("fulcio disabled at compile-time")
|
||||
}
|
||||
|
||||
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
|
||||
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
|
||||
return nil, errors.New("fulcio disabled at compile-time")
|
||||
|
||||
}
|
||||
|
|
@ -1,3 +1,6 @@
|
|||
//go:build !containers_image_rekor_stub
|
||||
// +build !containers_image_rekor_stub
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
|
|
|
|||
15
common/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
Normal file
15
common/vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
//go:build containers_image_rekor_stub
|
||||
// +build containers_image_rekor_stub
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"time"
|
||||
)
|
||||
|
||||
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
|
||||
// Returns bundle upload time on success.
|
||||
func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time")
|
||||
}
|
||||
|
|
@ -77,7 +77,7 @@ type storageImageDestination struct {
|
|||
indexToStorageID map[int]*string
|
||||
// All accesses to below data are protected by `lock` which is made
|
||||
// *explicit* in the code.
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||
uncompressedOrTocDigest map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs or TOC IDs.
|
||||
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
||||
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
||||
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
|
||||
|
|
@ -120,7 +120,7 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
|
|||
imageRef: imageRef,
|
||||
directory: directory,
|
||||
signatureses: make(map[digest.Digest][]byte),
|
||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
uncompressedOrTocDigest: make(map[digest.Digest]digest.Digest),
|
||||
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
|
||||
fileSizes: make(map[digest.Digest]int64),
|
||||
filenames: make(map[digest.Digest]string),
|
||||
|
|
@ -227,7 +227,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
|
|||
|
||||
// Record information about the blob.
|
||||
s.lock.Lock()
|
||||
s.blobDiffIDs[blobDigest] = diffID.Digest()
|
||||
s.uncompressedOrTocDigest[blobDigest] = diffID.Digest()
|
||||
s.fileSizes[blobDigest] = counter.Count
|
||||
s.filenames[blobDigest] = filename
|
||||
s.lock.Unlock()
|
||||
|
|
@ -289,7 +289,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
|||
blobDigest := srcInfo.Digest
|
||||
|
||||
s.lock.Lock()
|
||||
s.blobDiffIDs[blobDigest] = blobDigest
|
||||
s.uncompressedOrTocDigest[blobDigest] = blobDigest
|
||||
s.fileSizes[blobDigest] = 0
|
||||
s.filenames[blobDigest] = ""
|
||||
s.diffOutputs[blobDigest] = out
|
||||
|
|
@ -321,7 +321,7 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
})
|
||||
}
|
||||
|
||||
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata.
|
||||
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.uncompressedOrTocDigest and other metadata.
|
||||
// The caller must arrange the blob to be eventually committed using s.commitLayer().
|
||||
func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
// lock the entire method as it executes fairly quickly
|
||||
|
|
@ -335,7 +335,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
|||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err)
|
||||
} else if err == nil {
|
||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||
s.blobDiffIDs[digest] = aLayer.UncompressedDigest()
|
||||
s.uncompressedOrTocDigest[digest] = aLayer.UncompressedDigest()
|
||||
s.blobAdditionalLayer[digest] = aLayer
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
|
|
@ -366,7 +366,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
|||
}
|
||||
if len(layers) > 0 {
|
||||
// Save this for completeness.
|
||||
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
|
|
@ -380,7 +380,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
|||
}
|
||||
if len(layers) > 0 {
|
||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
Size: layers[0].CompressedSize,
|
||||
|
|
@ -398,7 +398,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
|||
}
|
||||
if len(layers) > 0 {
|
||||
if size != -1 {
|
||||
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
Size: size,
|
||||
|
|
@ -407,7 +407,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
|||
if !options.CanSubstitute {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest)
|
||||
}
|
||||
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
|
||||
s.uncompressedOrTocDigest[uncompressedDigest] = layers[0].UncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: uncompressedDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
|
|
@ -416,6 +416,25 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
|||
}
|
||||
}
|
||||
|
||||
tocDigest := digest
|
||||
if options.TOCDigest != nil {
|
||||
tocDigest = *options.TOCDigest
|
||||
}
|
||||
|
||||
// Check if we have a chunked layer in storage with the same TOC digest.
|
||||
layers, err = s.imageRef.transport.store.LayersByTOCDigest(tocDigest)
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, tocDigest, err)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Save this for completeness.
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].TOCDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: layers[0].TOCDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Nope, we don't have it.
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
|
|
@ -438,16 +457,20 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
|
|||
continue
|
||||
}
|
||||
blobSum := m.FSLayers[i].BlobSum
|
||||
diffID, ok := s.blobDiffIDs[blobSum]
|
||||
diffID, ok := s.uncompressedOrTocDigest[blobSum]
|
||||
if !ok {
|
||||
logrus.Infof("error looking up diffID for layer %q", blobSum.String())
|
||||
return ""
|
||||
}
|
||||
diffIDs = append([]digest.Digest{diffID}, diffIDs...)
|
||||
}
|
||||
case *manifest.Schema2, *manifest.OCI1:
|
||||
// We know the ID calculation for these formats doesn't actually use the diffIDs,
|
||||
// so we don't need to populate the diffID list.
|
||||
case *manifest.Schema2:
|
||||
// We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate
|
||||
// the diffID list.
|
||||
case *manifest.OCI1:
|
||||
for _, l := range m.Layers {
|
||||
diffIDs = append(diffIDs, l.Digest)
|
||||
}
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
|
@ -518,7 +541,7 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
|
|||
}
|
||||
s.lock.Unlock()
|
||||
// Note: commitLayer locks on-demand.
|
||||
if err := s.commitLayer(index, info, -1); err != nil {
|
||||
if stopQueue, err := s.commitLayer(index, info, -1); stopQueue || err != nil {
|
||||
return err
|
||||
}
|
||||
s.lock.Lock()
|
||||
|
|
@ -532,18 +555,32 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
|
|||
return nil
|
||||
}
|
||||
|
||||
// getDiffIDOrTOCDigest returns the diffID for the specified digest or the digest for the TOC, if known.
|
||||
func (s *storageImageDestination) getDiffIDOrTOCDigest(uncompressedDigest digest.Digest) (digest.Digest, bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if d, found := s.diffOutputs[uncompressedDigest]; found {
|
||||
return d.TOCDigest, found
|
||||
}
|
||||
d, found := s.uncompressedOrTocDigest[uncompressedDigest]
|
||||
return d, found
|
||||
}
|
||||
|
||||
// commitLayer commits the specified layer with the given index to the storage.
|
||||
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
|
||||
// size can usually be -1; it can be provided if the layer is not known to be already present in uncompressedOrTocDigest.
|
||||
//
|
||||
// If the layer cannot be committed yet, the function returns (true, nil).
|
||||
//
|
||||
// Note that the previous layer is expected to already be committed.
|
||||
//
|
||||
// Caution: this function must be called without holding `s.lock`. Callers
|
||||
// must guarantee that, at any given time, at most one goroutine may execute
|
||||
// `commitLayer()`.
|
||||
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error {
|
||||
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) {
|
||||
// Already committed? Return early.
|
||||
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Start with an empty string or the previous layer ID. Note that
|
||||
|
|
@ -557,68 +594,96 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
|||
// Carry over the previous ID for empty non-base layers.
|
||||
if info.emptyLayer {
|
||||
s.indexToStorageID[index] = &lastLayer
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
s.lock.Lock()
|
||||
diffID, haveDiffID := s.blobDiffIDs[info.digest]
|
||||
s.lock.Unlock()
|
||||
if !haveDiffID {
|
||||
// The diffIDOrTOCDigest refers either to the DiffID or the digest of the TOC.
|
||||
diffIDOrTOCDigest, haveDiffIDOrTOCDigest := s.getDiffIDOrTOCDigest(info.digest)
|
||||
if !haveDiffIDOrTOCDigest {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||
// or to even check if we had it.
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
||||
// that relies on using a blob digest that has never been seen by the store had better call
|
||||
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
||||
// so far we are going to accommodate that (if we should be doing that at all).
|
||||
logrus.Debugf("looking for diffID for blob %+v", info.digest)
|
||||
logrus.Debugf("looking for diffID or TOC digest for blob %+v", info.digest)
|
||||
// Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
|
||||
has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
|
||||
Cache: none.NoCache,
|
||||
CanSubstitute: false,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
|
||||
return false, fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
|
||||
}
|
||||
if !has {
|
||||
return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
|
||||
return false, fmt.Errorf("error determining uncompressed digest or TOC digest for blob %q", info.digest.String())
|
||||
}
|
||||
diffID, haveDiffID = s.blobDiffIDs[info.digest]
|
||||
if !haveDiffID {
|
||||
return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String())
|
||||
diffIDOrTOCDigest, haveDiffIDOrTOCDigest = s.getDiffIDOrTOCDigest(info.digest)
|
||||
if !haveDiffIDOrTOCDigest {
|
||||
return false, fmt.Errorf("we have blob %q, but don't know its uncompressed or TOC digest", info.digest.String())
|
||||
}
|
||||
}
|
||||
id := diffID.Hex()
|
||||
id := diffIDOrTOCDigest.Hex()
|
||||
if lastLayer != "" {
|
||||
id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
|
||||
id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffIDOrTOCDigest.Hex())).Hex()
|
||||
}
|
||||
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
||||
// There's already a layer that should have the right contents, just reuse it.
|
||||
lastLayer = layer.ID
|
||||
s.indexToStorageID[index] = &lastLayer
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
diffOutput, ok := s.diffOutputs[info.digest]
|
||||
s.lock.Unlock()
|
||||
if ok {
|
||||
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
if s.manifest == nil {
|
||||
logrus.Debugf("Skipping commit for TOC=%q, manifest not yet available", id)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// FIXME: what to do with the uncompressed digest?
|
||||
diffOutput.UncompressedDigest = info.digest
|
||||
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("parsing manifest: %w", err)
|
||||
}
|
||||
|
||||
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
|
||||
cb, err := s.getConfigBlob(man.ConfigInfo())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// retrieve the expected uncompressed digest from the config blob.
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(cb, configOCI); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if index >= len(configOCI.RootFS.DiffIDs) {
|
||||
return false, fmt.Errorf("index %d out of range for configOCI.RootFS.DiffIDs", index)
|
||||
}
|
||||
|
||||
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// let the storage layer know what was the original uncompressed layer.
|
||||
flags := make(map[string]interface{})
|
||||
flags[expectedLayerDiffIDFlag] = configOCI.RootFS.DiffIDs[index]
|
||||
logrus.Debugf("Setting uncompressed digest to %q for layer %q", configOCI.RootFS.DiffIDs[index], id)
|
||||
options := &graphdriver.ApplyDiffWithDifferOpts{
|
||||
Flags: flags,
|
||||
}
|
||||
|
||||
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, options); err != nil {
|
||||
_ = s.imageRef.transport.store.Delete(layer.ID)
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
s.indexToStorageID[index] = &layer.ID
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
|
|
@ -627,11 +692,11 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
|||
if ok {
|
||||
layer, err := al.PutAs(id, lastLayer, nil)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return fmt.Errorf("failed to put layer from digest and labels: %w", err)
|
||||
return false, fmt.Errorf("failed to put layer from digest and labels: %w", err)
|
||||
}
|
||||
lastLayer = layer.ID
|
||||
s.indexToStorageID[index] = &lastLayer
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if we previously cached a file with that blob's contents. If we didn't,
|
||||
|
|
@ -642,7 +707,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
|||
if !ok {
|
||||
// Try to find the layer with contents matching that blobsum.
|
||||
layer := ""
|
||||
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
|
||||
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffIDOrTOCDigest)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = layers[0].ID
|
||||
} else {
|
||||
|
|
@ -652,7 +717,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
|||
}
|
||||
}
|
||||
if layer == "" {
|
||||
return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
|
||||
return false, fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
|
||||
}
|
||||
// Read the layer's contents.
|
||||
noCompression := archive.Uncompressed
|
||||
|
|
@ -661,17 +726,17 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
|||
}
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
|
||||
return false, fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
|
||||
}
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
// the same lock, so the diff can't just be directly streamed from one
|
||||
// to the other.
|
||||
filename = s.computeNextBlobCacheFile()
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0o600)
|
||||
if err != nil {
|
||||
diff.Close()
|
||||
return fmt.Errorf("creating temporary file %q: %w", filename, err)
|
||||
return false, fmt.Errorf("creating temporary file %q: %w", filename, err)
|
||||
}
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using
|
||||
|
|
@ -680,7 +745,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
|||
diff.Close()
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||
return false, fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||
}
|
||||
// Make sure that we can find this file later, should we need the layer's
|
||||
// contents again.
|
||||
|
|
@ -691,21 +756,21 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
|||
// Read the cached blob and use it as a diff.
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening file %q: %w", filename, err)
|
||||
return false, fmt.Errorf("opening file %q: %w", filename, err)
|
||||
}
|
||||
defer file.Close()
|
||||
// Build the new layer using the diff, regardless of where it came from.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
|
||||
OriginalDigest: info.digest,
|
||||
UncompressedDigest: diffID,
|
||||
UncompressedDigest: diffIDOrTOCDigest,
|
||||
}, file)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
|
||||
return false, fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
|
||||
}
|
||||
|
||||
s.indexToStorageID[index] = &layer.ID
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
|
|
@ -752,11 +817,13 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
|||
|
||||
// Extract, commit, or find the layers.
|
||||
for i, blob := range layerBlobs {
|
||||
if err := s.commitLayer(i, addedLayerInfo{
|
||||
if stopQueue, err := s.commitLayer(i, addedLayerInfo{
|
||||
digest: blob.Digest,
|
||||
emptyLayer: blob.EmptyLayer,
|
||||
}, blob.Size); err != nil {
|
||||
return err
|
||||
} else if stopQueue {
|
||||
return fmt.Errorf("Internal error: storageImageDestination.Commit(): commitLayer() not ready to commit for layer %q", blob.Digest)
|
||||
}
|
||||
}
|
||||
var lastLayer string
|
||||
|
|
|
|||
|
|
@ -29,6 +29,16 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// getBlobMutexProtected is a struct to hold the state of the getBlobMutex mutex.
|
||||
type getBlobMutexProtected struct {
|
||||
// digestToLayerID is a lookup map from the layer digest (either the uncompressed digest or the TOC digest) to the
|
||||
// layer ID in the store.
|
||||
digestToLayerID map[digest.Digest]string
|
||||
|
||||
// layerPosition stores where we are in reading a blob's layers
|
||||
layerPosition map[digest.Digest]int
|
||||
}
|
||||
|
||||
type storageImageSource struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
|
|
@ -37,13 +47,15 @@ type storageImageSource struct {
|
|||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions (it guards layerPosition and digestToLayerID)
|
||||
getBlobMutexProtected getBlobMutexProtected
|
||||
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
||||
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
|
||||
}
|
||||
|
||||
const expectedLayerDiffIDFlag = "expected-layer-diffid"
|
||||
|
||||
// newImageSource sets up an image for reading.
|
||||
func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
|
||||
// First, locate the image.
|
||||
|
|
@ -62,9 +74,12 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora
|
|||
imageRef: imageRef,
|
||||
systemContext: sys,
|
||||
image: img,
|
||||
layerPosition: make(map[digest.Digest]int),
|
||||
SignatureSizes: []int{},
|
||||
SignaturesSizes: make(map[digest.Digest][]int),
|
||||
getBlobMutexProtected: getBlobMutexProtected{
|
||||
digestToLayerID: make(map[digest.Digest]string),
|
||||
layerPosition: make(map[digest.Digest]int),
|
||||
},
|
||||
}
|
||||
image.Compat = impl.AddCompat(image)
|
||||
if img.Metadata != "" {
|
||||
|
|
@ -91,6 +106,7 @@ func (s *storageImageSource) Close() error {
|
|||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
|
||||
// We need a valid digest value.
|
||||
digest := info.Digest
|
||||
|
||||
err = digest.Validate()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
|
|
@ -100,10 +116,24 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
|
|||
return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
|
||||
}
|
||||
|
||||
var layers []storage.Layer
|
||||
|
||||
// If the digest was overridden by LayerInfosForCopy, then we need to use the TOC digest
|
||||
// to retrieve it from the storage.
|
||||
s.getBlobMutex.Lock()
|
||||
layerID, found := s.getBlobMutexProtected.digestToLayerID[digest]
|
||||
s.getBlobMutex.Unlock()
|
||||
|
||||
if found {
|
||||
if layer, err := s.imageRef.transport.store.Layer(layerID); err == nil {
|
||||
layers = []storage.Layer{*layer}
|
||||
}
|
||||
} else {
|
||||
// Check if the blob corresponds to a diff that was used to initialize any layers. Our
|
||||
// callers should try to retrieve layers using their uncompressed digests, so no need to
|
||||
// check if they're using one of the compressed digests, which we can't reproduce anyway.
|
||||
layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
|
||||
layers, _ = s.imageRef.transport.store.LayersByUncompressedDigest(digest)
|
||||
}
|
||||
|
||||
// If it's not a layer, then it must be a data item.
|
||||
if len(layers) == 0 {
|
||||
|
|
@ -174,8 +204,8 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st
|
|||
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could
|
||||
// just go ahead and use the first one every time.
|
||||
s.getBlobMutex.Lock()
|
||||
i := s.layerPosition[digest]
|
||||
s.layerPosition[digest] = i + 1
|
||||
i := s.getBlobMutexProtected.layerPosition[digest]
|
||||
s.getBlobMutexProtected.layerPosition[digest] = i + 1
|
||||
s.getBlobMutex.Unlock()
|
||||
if len(layers) > 0 {
|
||||
layer = layers[i%len(layers)]
|
||||
|
|
@ -267,14 +297,35 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
|
||||
}
|
||||
if layer.UncompressedDigest == "" {
|
||||
return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID)
|
||||
if layer.UncompressedDigest == "" && layer.TOCDigest == "" {
|
||||
return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID)
|
||||
}
|
||||
if layer.UncompressedSize < 0 {
|
||||
return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
|
||||
}
|
||||
|
||||
blobDigest := layer.UncompressedDigest
|
||||
|
||||
if layer.TOCDigest != "" {
|
||||
if layer.Flags == nil || layer.Flags[expectedLayerDiffIDFlag] == nil {
|
||||
return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not set", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
|
||||
}
|
||||
if expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string); ok {
|
||||
// if the layer is stored by its TOC, report the expected diffID as the layer Digest
|
||||
// but store the TOC digest so we can later retrieve it from the storage.
|
||||
blobDigest, err = digest.Parse(expectedDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not a string", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
|
||||
}
|
||||
}
|
||||
s.getBlobMutex.Lock()
|
||||
s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID
|
||||
s.getBlobMutex.Unlock()
|
||||
blobInfo := types.BlobInfo{
|
||||
Digest: layer.UncompressedDigest,
|
||||
Digest: blobDigest,
|
||||
Size: layer.UncompressedSize,
|
||||
MediaType: uncompressedLayerType,
|
||||
}
|
||||
|
|
@ -384,7 +435,7 @@ func (s *storageImageSource) getSize() (int64, error) {
|
|||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
|
||||
if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || layer.UncompressedSize < 0 {
|
||||
return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
|
||||
}
|
||||
sum += layer.UncompressedSize
|
||||
|
|
|
|||
|
|
@ -73,6 +73,13 @@ type ApplyDiffOpts struct {
|
|||
ForceMask *os.FileMode
|
||||
}
|
||||
|
||||
// ApplyDiffWithDifferOpts contains optional arguments for ApplyDiffWithDiffer methods.
|
||||
type ApplyDiffWithDifferOpts struct {
|
||||
ApplyDiffOpts
|
||||
|
||||
Flags map[string]interface{}
|
||||
}
|
||||
|
||||
// InitFunc initializes the storage driver.
|
||||
type InitFunc func(homedir string, options Options) (Driver, error)
|
||||
|
||||
|
|
@ -223,9 +230,9 @@ type DriverWithDiffer interface {
|
|||
Driver
|
||||
// ApplyDiffWithDiffer applies the changes using the callback function.
|
||||
// If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
|
||||
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffOpts, differ Differ) (output DriverWithDifferOutput, err error)
|
||||
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
|
||||
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
|
||||
ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffOpts) error
|
||||
ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
// DifferTarget gets the location where files are stored for the layer.
|
||||
|
|
|
|||
|
|
@ -2049,7 +2049,7 @@ func (d *Driver) useComposeFs() bool {
|
|||
}
|
||||
|
||||
// ApplyDiff applies the changes in the new layer using the specified function
|
||||
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) {
|
||||
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) {
|
||||
var idMappings *idtools.IDMappings
|
||||
if options != nil {
|
||||
idMappings = options.Mappings
|
||||
|
|
@ -2100,7 +2100,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
|||
}
|
||||
|
||||
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
|
||||
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffOpts) error {
|
||||
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
|
||||
if filepath.Dir(stagingDirectory) != d.getStagingDir() {
|
||||
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
|
||||
}
|
||||
|
|
@ -2125,8 +2125,6 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri
|
|||
return err
|
||||
}
|
||||
|
||||
diffOutput.UncompressedDigest = diffOutput.TOCDigest
|
||||
|
||||
return os.Rename(stagingDirectory, diffPath)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -126,6 +126,13 @@ type Layer struct {
|
|||
// as a DiffID.
|
||||
UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
|
||||
|
||||
// TOCDigest represents the digest of the Table of Contents (TOC) of the blob.
|
||||
// This digest is utilized when the UncompressedDigest is not
|
||||
// validated during the partial image pull process, but the
|
||||
// TOC itself is validated.
|
||||
// It serves as an alternative reference under these specific conditions.
|
||||
TOCDigest digest.Digest `json:"toc-digest,omitempty"`
|
||||
|
||||
// UncompressedSize is the length of the blob that was last passed to
|
||||
// ApplyDiff() or create(), after we decompressed it. If
|
||||
// UncompressedDigest is not set, this should be treated as if it were
|
||||
|
|
@ -228,6 +235,10 @@ type roLayerStore interface {
|
|||
// specified uncompressed digest value recorded for them.
|
||||
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
|
||||
|
||||
// LayersByTOCDigest returns a slice of the layers with the
|
||||
// specified uncompressed digest value recorded for them.
|
||||
LayersByTOCDigest(d digest.Digest) ([]Layer, error)
|
||||
|
||||
// Layers returns a slice of the known layers.
|
||||
Layers() ([]Layer, error)
|
||||
}
|
||||
|
|
@ -296,13 +307,13 @@ type rwLayerStore interface {
|
|||
|
||||
// ApplyDiffWithDiffer applies the changes through the differ callback function.
|
||||
// If to is the empty string, then a staging directory is created by the driver.
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
|
||||
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
|
||||
ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
|
||||
ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
|
||||
|
||||
// DifferTarget gets the location where files are stored for the layer.
|
||||
DifferTarget(id string) (string, error)
|
||||
|
|
@ -337,6 +348,7 @@ type layerStore struct {
|
|||
bymount map[string]*Layer
|
||||
bycompressedsum map[digest.Digest][]string
|
||||
byuncompressedsum map[digest.Digest][]string
|
||||
bytocsum map[digest.Digest][]string
|
||||
layerspathsModified [numLayerLocationIndex]time.Time
|
||||
|
||||
// FIXME: This field is only set when constructing layerStore, but locking rules of the driver
|
||||
|
|
@ -366,6 +378,7 @@ func copyLayer(l *Layer) *Layer {
|
|||
CompressedSize: l.CompressedSize,
|
||||
UncompressedDigest: l.UncompressedDigest,
|
||||
UncompressedSize: l.UncompressedSize,
|
||||
TOCDigest: l.TOCDigest,
|
||||
CompressionType: l.CompressionType,
|
||||
ReadOnly: l.ReadOnly,
|
||||
volatileStore: l.volatileStore,
|
||||
|
|
@ -745,6 +758,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
|||
names := make(map[string]*Layer)
|
||||
compressedsums := make(map[digest.Digest][]string)
|
||||
uncompressedsums := make(map[digest.Digest][]string)
|
||||
tocsums := make(map[digest.Digest][]string)
|
||||
var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them.
|
||||
if r.lockfile.IsReadWrite() {
|
||||
selinux.ClearLabels()
|
||||
|
|
@ -765,6 +779,9 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
|||
if layer.UncompressedDigest != "" {
|
||||
uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
|
||||
}
|
||||
if layer.TOCDigest != "" {
|
||||
tocsums[layer.TOCDigest] = append(tocsums[layer.TOCDigest], layer.ID)
|
||||
}
|
||||
if layer.MountLabel != "" {
|
||||
selinux.ReserveLabel(layer.MountLabel)
|
||||
}
|
||||
|
|
@ -792,6 +809,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
|||
r.byname = names
|
||||
r.bycompressedsum = compressedsums
|
||||
r.byuncompressedsum = uncompressedsums
|
||||
r.bytocsum = tocsums
|
||||
|
||||
// Load and merge information about which layers are mounted, and where.
|
||||
if r.lockfile.IsReadWrite() {
|
||||
|
|
@ -1112,7 +1130,7 @@ func (r *layerStore) Size(name string) (int64, error) {
|
|||
// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
|
||||
// a zero value is not just present because it was never set to anything else (which can happen if the layer was
|
||||
// created by a version of this library that didn't keep track of digest and size information).
|
||||
if layer.UncompressedDigest != "" {
|
||||
if layer.TOCDigest != "" || layer.UncompressedDigest != "" {
|
||||
return layer.UncompressedSize, nil
|
||||
}
|
||||
return -1, nil
|
||||
|
|
@ -1201,6 +1219,9 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
|
|||
if layer.UncompressedDigest != "" {
|
||||
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
|
||||
}
|
||||
if layer.TOCDigest != "" {
|
||||
r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID)
|
||||
}
|
||||
if err := r.saveFor(layer); err != nil {
|
||||
if e := r.Delete(layer.ID); e != nil {
|
||||
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e)
|
||||
|
|
@ -1251,6 +1272,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
|||
templateCompressedDigest digest.Digest
|
||||
templateCompressedSize int64
|
||||
templateUncompressedDigest digest.Digest
|
||||
templateTOCDigest digest.Digest
|
||||
templateUncompressedSize int64
|
||||
templateCompressionType archive.Compression
|
||||
templateUIDs, templateGIDs []uint32
|
||||
|
|
@ -1263,6 +1285,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
|||
}
|
||||
templateMetadata = templateLayer.Metadata
|
||||
templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap)
|
||||
templateTOCDigest = templateLayer.TOCDigest
|
||||
templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize
|
||||
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
|
||||
templateCompressionType = templateLayer.CompressionType
|
||||
|
|
@ -1291,6 +1314,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
|||
CompressedDigest: templateCompressedDigest,
|
||||
CompressedSize: templateCompressedSize,
|
||||
UncompressedDigest: templateUncompressedDigest,
|
||||
TOCDigest: templateTOCDigest,
|
||||
UncompressedSize: templateUncompressedSize,
|
||||
CompressionType: templateCompressionType,
|
||||
UIDs: templateUIDs,
|
||||
|
|
@ -1413,6 +1437,9 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
|||
if layer.UncompressedDigest != "" {
|
||||
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
|
||||
}
|
||||
if layer.TOCDigest != "" {
|
||||
r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID)
|
||||
}
|
||||
}
|
||||
|
||||
delete(layer.Flags, incompleteFlag)
|
||||
|
|
@ -2197,6 +2224,25 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
|
|||
return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel)
|
||||
}
|
||||
|
||||
func updateDigestMap(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
|
||||
var newList []string
|
||||
if oldvalue != "" {
|
||||
for _, value := range (*m)[oldvalue] {
|
||||
if value != id {
|
||||
newList = append(newList, value)
|
||||
}
|
||||
}
|
||||
if len(newList) > 0 {
|
||||
(*m)[oldvalue] = newList
|
||||
} else {
|
||||
delete(*m, oldvalue)
|
||||
}
|
||||
}
|
||||
if newvalue != "" {
|
||||
(*m)[newvalue] = append((*m)[newvalue], id)
|
||||
}
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
|
||||
return r.applyDiffWithOptions(to, nil, diff)
|
||||
|
|
@ -2313,24 +2359,6 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
|
|||
uncompressedDigest = uncompressedDigester.Digest()
|
||||
}
|
||||
|
||||
updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
|
||||
var newList []string
|
||||
if oldvalue != "" {
|
||||
for _, value := range (*m)[oldvalue] {
|
||||
if value != id {
|
||||
newList = append(newList, value)
|
||||
}
|
||||
}
|
||||
if len(newList) > 0 {
|
||||
(*m)[oldvalue] = newList
|
||||
} else {
|
||||
delete(*m, oldvalue)
|
||||
}
|
||||
}
|
||||
if newvalue != "" {
|
||||
(*m)[newvalue] = append((*m)[newvalue], id)
|
||||
}
|
||||
}
|
||||
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID)
|
||||
layer.CompressedDigest = compressedDigest
|
||||
layer.CompressedSize = compressedCounter.Count
|
||||
|
|
@ -2372,7 +2400,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) {
|
|||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error {
|
||||
func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
|
||||
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
|
||||
if !ok {
|
||||
return ErrNotSupported
|
||||
|
|
@ -2382,20 +2410,35 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
|
|||
return ErrLayerUnknown
|
||||
}
|
||||
if options == nil {
|
||||
options = &drivers.ApplyDiffOpts{
|
||||
options = &drivers.ApplyDiffWithDifferOpts{
|
||||
ApplyDiffOpts: drivers.ApplyDiffOpts{
|
||||
Mappings: r.layerMappings(layer),
|
||||
MountLabel: layer.MountLabel,
|
||||
},
|
||||
Flags: nil,
|
||||
}
|
||||
}
|
||||
|
||||
err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
layer.UIDs = diffOutput.UIDs
|
||||
layer.GIDs = diffOutput.GIDs
|
||||
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, diffOutput.UncompressedDigest, layer.ID)
|
||||
layer.UncompressedDigest = diffOutput.UncompressedDigest
|
||||
updateDigestMap(&r.bytocsum, diffOutput.TOCDigest, diffOutput.TOCDigest, layer.ID)
|
||||
layer.TOCDigest = diffOutput.TOCDigest
|
||||
layer.UncompressedSize = diffOutput.Size
|
||||
layer.Metadata = diffOutput.Metadata
|
||||
if options != nil && options.Flags != nil {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
for k, v := range options.Flags {
|
||||
layer.Flags[k] = v
|
||||
}
|
||||
}
|
||||
if len(diffOutput.TarSplit) != 0 {
|
||||
tsdata := bytes.Buffer{}
|
||||
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
|
||||
|
|
@ -2432,7 +2475,7 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
|
|||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
|
||||
if !ok {
|
||||
return nil, ErrNotSupported
|
||||
|
|
@ -2448,9 +2491,11 @@ func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOp
|
|||
return nil, ErrLayerUnknown
|
||||
}
|
||||
if options == nil {
|
||||
options = &drivers.ApplyDiffOpts{
|
||||
options = &drivers.ApplyDiffWithDifferOpts{
|
||||
ApplyDiffOpts: drivers.ApplyDiffOpts{
|
||||
Mappings: r.layerMappings(layer),
|
||||
MountLabel: layer.MountLabel,
|
||||
},
|
||||
}
|
||||
}
|
||||
output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ)
|
||||
|
|
@ -2494,6 +2539,11 @@ func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error
|
|||
return r.layersByDigestMap(r.byuncompressedsum, d)
|
||||
}
|
||||
|
||||
// Requires startReading or startWriting.
|
||||
func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
|
||||
return r.layersByDigestMap(r.bytocsum, d)
|
||||
}
|
||||
|
||||
func closeAll(closes ...func() error) (rErr error) {
|
||||
for _, f := range closes {
|
||||
if err := f(); err != nil {
|
||||
|
|
|
|||
|
|
@ -534,6 +534,10 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||
if ta.ChownOpts != nil {
|
||||
hdr.Uid = ta.ChownOpts.UID
|
||||
hdr.Gid = ta.ChownOpts.GID
|
||||
// Don’t expose the user names from the local system; they probably don’t match the ta.ChownOpts value anyway,
|
||||
// and they unnecessarily give recipients of the tar file potentially private data.
|
||||
hdr.Uname = ""
|
||||
hdr.Gname = ""
|
||||
}
|
||||
|
||||
maybeTruncateHeaderModTime(hdr)
|
||||
|
|
|
|||
|
|
@ -578,7 +578,10 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
|||
return byteSliceAsString(buf.Bytes()[from:to])
|
||||
}
|
||||
|
||||
iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
pool := iter.Pool()
|
||||
pool.ReturnIterator(iter)
|
||||
iter = pool.BorrowIterator(manifest)
|
||||
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
if strings.ToLower(field) == "version" {
|
||||
toc.Version = iter.ReadInt()
|
||||
|
|
@ -657,8 +660,17 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
|||
}
|
||||
toc.Entries = append(toc.Entries, m)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// validate there is no extra data in the provided input. This is a security measure to avoid
|
||||
// that the digest we calculate for the TOC refers to the entire document.
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
return nil, iter.Error
|
||||
}
|
||||
if iter.WhatIsNext() != jsoniter.InvalidValue || !errors.Is(iter.Error, io.EOF) {
|
||||
return nil, fmt.Errorf("unexpected data after manifest")
|
||||
}
|
||||
|
||||
toc.StringsBuf = buf
|
||||
return &toc, nil
|
||||
}
|
||||
|
|
|
|||
34
common/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
Normal file
34
common/vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
package toc
|
||||
|
||||
import (
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// tocJSONDigestAnnotation is the annotation key for the digest of the estargz
|
||||
// TOC JSON.
|
||||
// It is defined in github.com/containerd/stargz-snapshotter/estargz as TOCJSONDigestAnnotation
|
||||
// Duplicate it here to avoid a dependency on the package.
|
||||
const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
|
||||
|
||||
// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
|
||||
// This function retrieves a digest that represents the content of a
|
||||
// table of contents (TOC) from the image's annotations.
|
||||
// This is an experimental feature and may be changed/removed in the future.
|
||||
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
|
||||
if contentDigest, ok := annotations[tocJSONDigestAnnotation]; ok {
|
||||
d, err := digest.Parse(contentDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
if contentDigest, ok := annotations[internal.ManifestChecksumKey]; ok {
|
||||
d, err := digest.Parse(contentDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -11,6 +11,7 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
// register all of the built-in drivers
|
||||
|
|
@ -314,10 +315,10 @@ type Store interface {
|
|||
// ApplyDiffer applies a diff to a layer.
|
||||
// It is the caller responsibility to clean the staging directory if it is not
|
||||
// successfully applied with ApplyDiffFromStagingDirectory.
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
|
||||
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
|
||||
ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
|
||||
ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
|
||||
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
|
|
@ -333,6 +334,10 @@ type Store interface {
|
|||
// specified uncompressed digest value recorded for them.
|
||||
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
|
||||
|
||||
// LayersByTOCDigest returns a slice of the layers with the
|
||||
// specified TOC digest value recorded for them.
|
||||
LayersByTOCDigest(d digest.Digest) ([]Layer, error)
|
||||
|
||||
// LayerSize returns a cached approximation of the layer's size, or -1
|
||||
// if we don't have a value on hand.
|
||||
LayerSize(id string) (int64, error)
|
||||
|
|
@ -961,6 +966,10 @@ func (s *store) load() error {
|
|||
} else {
|
||||
ris, err = newROImageStore(gipath)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
logrus.Debugf("Ignoring creation of lockfiles on read-only file systems %q, %v", gipath, err)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
@ -2922,7 +2931,7 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
|
|||
return nil, ErrLayerUnknown
|
||||
}
|
||||
|
||||
func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error {
|
||||
func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
|
||||
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
|
||||
if !rlstore.Exists(to) {
|
||||
return struct{}{}, ErrLayerUnknown
|
||||
|
|
@ -2939,7 +2948,7 @@ func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) {
|
||||
if to != "" && !rlstore.Exists(to) {
|
||||
return nil, ErrLayerUnknown
|
||||
|
|
@ -3001,6 +3010,13 @@ func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
|
|||
return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
|
||||
}
|
||||
|
||||
func (s *store) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
|
||||
if err := d.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("looking for TOC matching digest %q: %w", d, err)
|
||||
}
|
||||
return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByTOCDigest(d) }, d)
|
||||
}
|
||||
|
||||
func (s *store) LayerSize(id string) (int64, error) {
|
||||
if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) {
|
||||
if store.Exists(id) {
|
||||
|
|
|
|||
|
|
@ -4,45 +4,59 @@ linters-settings:
|
|||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 30
|
||||
min-complexity: 45
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 100
|
||||
threshold: 200
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 4
|
||||
min-occurrences: 3
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- errname # this repo doesn't follow the convention advised by this linter
|
||||
- maligned
|
||||
- unparam
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- funlen
|
||||
- godox
|
||||
- gocognit
|
||||
- whitespace
|
||||
- wsl
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- scopelint
|
||||
- wrapcheck
|
||||
- exhaustivestruct
|
||||
- exhaustive
|
||||
- nlreturn
|
||||
- testpackage
|
||||
- gci
|
||||
- gofumpt
|
||||
- goerr113
|
||||
- nlreturn
|
||||
- gomnd
|
||||
- tparallel
|
||||
- exhaustivestruct
|
||||
- goerr113
|
||||
- errorlint
|
||||
- nestif
|
||||
- godot
|
||||
- errorlint
|
||||
- gofumpt
|
||||
- paralleltest
|
||||
- tparallel
|
||||
- cyclop
|
||||
- errname
|
||||
- varnamelen
|
||||
- thelper
|
||||
- ifshort
|
||||
- exhaustruct
|
||||
- maintidx
|
||||
- varnamelen
|
||||
- gci
|
||||
- depguard
|
||||
- errchkjson
|
||||
- inamedparam
|
||||
- nonamedreturns
|
||||
- musttag
|
||||
- ireturn
|
||||
- forcetypeassert
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- interfacer
|
||||
- scopelint
|
||||
- varcheck
|
||||
- structcheck
|
||||
- golint
|
||||
- nosnakecase
|
||||
|
|
|
|||
|
|
@ -1,11 +1,8 @@
|
|||
# OpenAPI errors
|
||||
# OpenAPI errors [](https://github.com/go-openapi/errors/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/errors)
|
||||
|
||||
[](https://travis-ci.org/go-openapi/errors)
|
||||
[](https://codecov.io/gh/go-openapi/errors)
|
||||
[](https://slackin.goswagger.io)
|
||||
[](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE)
|
||||
[](https://pkg.go.dev/github.com/go-openapi/errors)
|
||||
[](https://golangci.com)
|
||||
[](https://goreportcard.com/report/github.com/go-openapi/errors)
|
||||
|
||||
Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit.
|
||||
|
|
|
|||
|
|
@ -55,9 +55,15 @@ func (a apiError) MarshalJSON() ([]byte, error) {
|
|||
// New creates a new API error with a code and a message
|
||||
func New(code int32, message string, args ...interface{}) Error {
|
||||
if len(args) > 0 {
|
||||
return &apiError{code, fmt.Sprintf(message, args...)}
|
||||
return &apiError{
|
||||
code: code,
|
||||
message: fmt.Sprintf(message, args...),
|
||||
}
|
||||
}
|
||||
return &apiError{
|
||||
code: code,
|
||||
message: message,
|
||||
}
|
||||
return &apiError{code, message}
|
||||
}
|
||||
|
||||
// NotFound creates a new not found error
|
||||
|
|
@ -130,10 +136,14 @@ func flattenComposite(errs *CompositeError) *CompositeError {
|
|||
// MethodNotAllowed creates a new method not allowed error
|
||||
func MethodNotAllowed(requested string, allow []string) Error {
|
||||
msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ","))
|
||||
return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg}
|
||||
return &MethodNotAllowedError{
|
||||
code: http.StatusMethodNotAllowed,
|
||||
Allowed: allow,
|
||||
message: msg,
|
||||
}
|
||||
}
|
||||
|
||||
// ServeError the error handler interface implementation
|
||||
// ServeError implements the http error handler interface
|
||||
func ServeError(rw http.ResponseWriter, r *http.Request, err error) {
|
||||
rw.Header().Set("Content-Type", "application/json")
|
||||
switch e := err.(type) {
|
||||
|
|
|
|||
|
|
@ -120,6 +120,10 @@ func (c *CompositeError) Error() string {
|
|||
return c.message
|
||||
}
|
||||
|
||||
func (c *CompositeError) Unwrap() []error {
|
||||
return c.Errors
|
||||
}
|
||||
|
||||
// MarshalJSON implements the JSON encoding interface
|
||||
func (c CompositeError) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(map[string]interface{}{
|
||||
|
|
@ -133,7 +137,7 @@ func (c CompositeError) MarshalJSON() ([]byte, error) {
|
|||
func CompositeValidationError(errors ...error) *CompositeError {
|
||||
return &CompositeError{
|
||||
code: CompositeErrorCode,
|
||||
Errors: append([]error{}, errors...),
|
||||
Errors: append(make([]error, 0, len(errors)), errors...),
|
||||
message: "validation failure list",
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,56 +4,58 @@ linters-settings:
|
|||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 31
|
||||
min-complexity: 45
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 100
|
||||
threshold: 200
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 4
|
||||
min-occurrences: 3
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- revive
|
||||
- goimports
|
||||
- gosec
|
||||
enable-all: true
|
||||
disable:
|
||||
- maligned
|
||||
- unparam
|
||||
- unconvert
|
||||
- predeclared
|
||||
- prealloc
|
||||
- misspell
|
||||
|
||||
# disable:
|
||||
# - maligned
|
||||
# - lll
|
||||
# - gochecknoinits
|
||||
# - gochecknoglobals
|
||||
# - godox
|
||||
# - gocognit
|
||||
# - whitespace
|
||||
# - wsl
|
||||
# - funlen
|
||||
# - wrapcheck
|
||||
# - testpackage
|
||||
# - nlreturn
|
||||
# - gofumpt
|
||||
# - goerr113
|
||||
# - gci
|
||||
# - gomnd
|
||||
# - godot
|
||||
# - exhaustivestruct
|
||||
# - paralleltest
|
||||
# - varnamelen
|
||||
# - ireturn
|
||||
# - exhaustruct
|
||||
# #- thelper
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: bson.go
|
||||
text: "should be .*ObjectID"
|
||||
linters:
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- funlen
|
||||
- godox
|
||||
- gocognit
|
||||
- whitespace
|
||||
- wsl
|
||||
- wrapcheck
|
||||
- testpackage
|
||||
- nlreturn
|
||||
- gomnd
|
||||
- exhaustivestruct
|
||||
- goerr113
|
||||
- errorlint
|
||||
- nestif
|
||||
- godot
|
||||
- gofumpt
|
||||
- paralleltest
|
||||
- tparallel
|
||||
- thelper
|
||||
- ifshort
|
||||
- exhaustruct
|
||||
- varnamelen
|
||||
- gci
|
||||
- depguard
|
||||
- errchkjson
|
||||
- inamedparam
|
||||
- nonamedreturns
|
||||
- musttag
|
||||
- ireturn
|
||||
- forcetypeassert
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- interfacer
|
||||
- scopelint
|
||||
- varcheck
|
||||
- structcheck
|
||||
- golint
|
||||
- stylecheck
|
||||
|
||||
- nosnakecase
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
# Strfmt [](https://travis-ci.org/go-openapi/strfmt) [](https://codecov.io/gh/go-openapi/strfmt) [](https://slackin.goswagger.io)
|
||||
|
||||
# Strfmt [](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/strfmt)
|
||||
[](https://slackin.goswagger.io)
|
||||
[](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE)
|
||||
[](http://godoc.org/github.com/go-openapi/strfmt)
|
||||
[](https://golangci.com)
|
||||
[](https://goreportcard.com/report/github.com/go-openapi/strfmt)
|
||||
|
||||
This package exposes a registry of data types to support string formats in the go-openapi toolkit.
|
||||
|
|
|
|||
|
|
@ -39,10 +39,10 @@ func IsBSONObjectID(str string) bool {
|
|||
// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID)
|
||||
//
|
||||
// swagger:strfmt bsonobjectid
|
||||
type ObjectId bsonprim.ObjectID //nolint:revive
|
||||
type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck
|
||||
|
||||
// NewObjectId creates a ObjectId from a Hex String
|
||||
func NewObjectId(hex string) ObjectId { //nolint:revive
|
||||
func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck
|
||||
oid, err := bsonprim.ObjectIDFromHex(hex)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
@ -135,7 +135,7 @@ func (id *ObjectId) UnmarshalBSON(data []byte) error {
|
|||
// BSON document if the error is nil.
|
||||
func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) {
|
||||
oid := bsonprim.ObjectID(id)
|
||||
return bsontype.ObjectID, oid[:], nil
|
||||
return bson.TypeObjectID, oid[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalBSONValue is an interface implemented by types that can unmarshal a
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry {
|
|||
}
|
||||
|
||||
// MapStructureHookFunc is a decode hook function for mapstructure
|
||||
func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //nolint:gocyclo,cyclop
|
||||
func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc {
|
||||
return func(from reflect.Type, to reflect.Type, obj interface{}) (interface{}, error) {
|
||||
if from.Kind() != reflect.String {
|
||||
return obj, nil
|
||||
|
|
|
|||
|
|
@ -76,6 +76,8 @@ const (
|
|||
ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04"
|
||||
// ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern.
|
||||
ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05"
|
||||
// short form of ISO8601TimeUniversalSortableDateTimePattern
|
||||
ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02"
|
||||
// DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6
|
||||
DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$`
|
||||
)
|
||||
|
|
@ -84,7 +86,7 @@ var (
|
|||
rxDateTime = regexp.MustCompile(DateTimePattern)
|
||||
|
||||
// DateTimeFormats is the collection of formats used by ParseDateTime()
|
||||
DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern}
|
||||
DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm}
|
||||
|
||||
// MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds)
|
||||
MarshalFormat = RFC3339Millis
|
||||
|
|
@ -245,7 +247,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) {
|
|||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, uint64(i64))
|
||||
|
||||
return bsontype.DateTime, buf, nil
|
||||
return bson.TypeDateTime, buf, nil
|
||||
}
|
||||
|
||||
// UnmarshalBSONValue is an interface implemented by types that can unmarshal a
|
||||
|
|
@ -253,7 +255,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) {
|
|||
// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
|
||||
// wishes to retain the data after returning.
|
||||
func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error {
|
||||
if tpe == bsontype.Null {
|
||||
if tpe == bson.TypeNull {
|
||||
*t = DateTime{}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,3 +2,4 @@ secrets.yml
|
|||
vendor
|
||||
Godeps
|
||||
.idea
|
||||
*.out
|
||||
|
|
|
|||
|
|
@ -4,14 +4,14 @@ linters-settings:
|
|||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 25
|
||||
min-complexity: 45
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 100
|
||||
threshold: 200
|
||||
goconst:
|
||||
min-len: 3
|
||||
min-occurrences: 2
|
||||
min-occurrences: 3
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
|
|
@ -20,35 +20,41 @@ linters:
|
|||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- nlreturn
|
||||
- testpackage
|
||||
- funlen
|
||||
- godox
|
||||
- gocognit
|
||||
- whitespace
|
||||
- wsl
|
||||
- wrapcheck
|
||||
- testpackage
|
||||
- nlreturn
|
||||
- gomnd
|
||||
- exhaustive
|
||||
- exhaustivestruct
|
||||
- goerr113
|
||||
- wsl
|
||||
- whitespace
|
||||
- gofumpt
|
||||
- godot
|
||||
- errorlint
|
||||
- nestif
|
||||
- godox
|
||||
- funlen
|
||||
- gci
|
||||
- gocognit
|
||||
- godot
|
||||
- gofumpt
|
||||
- paralleltest
|
||||
- tparallel
|
||||
- thelper
|
||||
- ifshort
|
||||
- gomoddirectives
|
||||
- cyclop
|
||||
- forcetypeassert
|
||||
- ireturn
|
||||
- tagliatelle
|
||||
- varnamelen
|
||||
- goimports
|
||||
- tenv
|
||||
- golint
|
||||
- exhaustruct
|
||||
- nilnil
|
||||
- varnamelen
|
||||
- gci
|
||||
- depguard
|
||||
- errchkjson
|
||||
- inamedparam
|
||||
- nonamedreturns
|
||||
- musttag
|
||||
- ireturn
|
||||
- forcetypeassert
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- interfacer
|
||||
- scopelint
|
||||
- varcheck
|
||||
- structcheck
|
||||
- golint
|
||||
- nosnakecase
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
# Swag [](https://travis-ci.org/go-openapi/swag) [](https://codecov.io/gh/go-openapi/swag) [](https://slackin.goswagger.io)
|
||||
# Swag [](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/swag)
|
||||
|
||||
[](https://slackin.goswagger.io)
|
||||
[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
|
||||
[](http://godoc.org/github.com/go-openapi/swag)
|
||||
[](https://pkg.go.dev/github.com/go-openapi/swag)
|
||||
[](https://goreportcard.com/report/github.com/go-openapi/swag)
|
||||
|
||||
Contains a bunch of helper functions for go-openapi and go-swagger projects.
|
||||
|
|
@ -18,4 +19,5 @@ You may also use it standalone for your projects.
|
|||
|
||||
This repo has only few dependencies outside of the standard library:
|
||||
|
||||
* YAML utilities depend on gopkg.in/yaml.v2
|
||||
* YAML utilities depend on `gopkg.in/yaml.v3`
|
||||
* `github.com/mailru/easyjson v0.7.7`
|
||||
|
|
|
|||
|
|
@ -12,9 +12,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.9
|
||||
// +build go1.9
|
||||
|
||||
package swag
|
||||
|
||||
import (
|
||||
|
|
@ -21,6 +21,7 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
|
@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = ""
|
|||
var LoadHTTPCustomHeaders = map[string]string{}
|
||||
|
||||
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
|
||||
func LoadFromFileOrHTTP(path string) ([]byte, error) {
|
||||
return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
|
||||
func LoadFromFileOrHTTP(pth string) ([]byte, error) {
|
||||
return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth)
|
||||
}
|
||||
|
||||
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
|
||||
// timeout arg allows for per request overriding of the request timeout
|
||||
func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
|
||||
return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path)
|
||||
func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) {
|
||||
return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth)
|
||||
}
|
||||
|
||||
// LoadStrategy returns a loader function for a given path or uri
|
||||
func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
|
||||
if strings.HasPrefix(path, "http") {
|
||||
// LoadStrategy returns a loader function for a given path or URI.
|
||||
//
|
||||
// The load strategy returns the remote load for any path starting with `http`.
|
||||
// So this works for any URI with a scheme `http` or `https`.
|
||||
//
|
||||
// The fallback strategy is to call the local loader.
|
||||
//
|
||||
// The local loader takes a local file system path (absolute or relative) as argument,
|
||||
// or alternatively a `file://...` URI, **without host** (see also below for windows).
|
||||
//
|
||||
// There are a few liberalities, initially intended to be tolerant regarding the URI syntax,
|
||||
// especially on windows.
|
||||
//
|
||||
// Before the local loader is called, the given path is transformed:
|
||||
// - percent-encoded characters are unescaped
|
||||
// - simple paths (e.g. `./folder/file`) are passed as-is
|
||||
// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too.
|
||||
//
|
||||
// For paths provided as URIs with the "file" scheme, please note that:
|
||||
// - `file://` is simply stripped.
|
||||
// This means that the host part of the URI is not parsed at all.
|
||||
// For example, `file:///folder/file" becomes "/folder/file`,
|
||||
// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems.
|
||||
// Similarly, `file://./folder/file` yields `./folder/file`.
|
||||
// - on windows, `file://...` can take a host so as to specify an UNC share location.
|
||||
//
|
||||
// Reminder about windows-specifics:
|
||||
// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported)
|
||||
// - `file:///c:/folder/file` becomes `C:\folder\file`
|
||||
// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file`
|
||||
func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
|
||||
if strings.HasPrefix(pth, "http") {
|
||||
return remote
|
||||
}
|
||||
return func(pth string) ([]byte, error) {
|
||||
upth, err := pathUnescape(pth)
|
||||
|
||||
return func(p string) ([]byte, error) {
|
||||
upth, err := url.PathUnescape(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pth, `file://`) {
|
||||
if runtime.GOOS == "windows" {
|
||||
if !strings.HasPrefix(p, `file://`) {
|
||||
// regular file path provided: just normalize slashes
|
||||
return local(filepath.FromSlash(upth))
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
// crude processing: this leaves full URIs with a host with a (mostly) unexpected result
|
||||
upth = strings.TrimPrefix(upth, `file://`)
|
||||
|
||||
return local(filepath.FromSlash(upth))
|
||||
}
|
||||
|
||||
// windows-only pre-processing of file://... URIs
|
||||
|
||||
// support for canonical file URIs on windows.
|
||||
// Zero tolerance here for dodgy URIs.
|
||||
u, _ := url.Parse(upth)
|
||||
u, err := url.Parse(filepath.ToSlash(upth))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if u.Host != "" {
|
||||
// assume UNC name (volume share)
|
||||
// file://host/share/folder\... ==> \\host\share\path\folder
|
||||
// NOTE: UNC port not yet supported
|
||||
upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`)
|
||||
} else {
|
||||
// file:///c:/folder/... ==> just remove the leading slash
|
||||
upth = strings.TrimPrefix(upth, `file:///`)
|
||||
|
||||
// when the "host" segment is a drive letter:
|
||||
// file://C:/folder/... => C:\folder
|
||||
upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`))
|
||||
if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' {
|
||||
// tolerance: if we have a leading dot, this can't be a host
|
||||
// file://host/share/folder\... ==> \\host\share\path\folder
|
||||
upth = "//" + upth
|
||||
}
|
||||
} else {
|
||||
// no host, let's figure out if this is a drive letter
|
||||
upth = strings.TrimPrefix(upth, `file://`)
|
||||
first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/")
|
||||
if strings.HasSuffix(first, ":") {
|
||||
// drive letter in the first segment:
|
||||
// file:///c:/folder/... ==> strip the leading slash
|
||||
upth = strings.TrimPrefix(upth, `/`)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,24 +0,0 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.8
|
||||
// +build go1.8
|
||||
|
||||
package swag
|
||||
|
||||
import "net/url"
|
||||
|
||||
func pathUnescape(path string) (string, error) {
|
||||
return url.PathUnescape(path)
|
||||
}
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.8
|
||||
// +build !go1.8
|
||||
|
||||
package swag
|
||||
|
||||
import "net/url"
|
||||
|
||||
func pathUnescape(path string) (string, error) {
|
||||
return url.QueryUnescape(path)
|
||||
}
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.9
|
||||
// +build !go1.9
|
||||
|
||||
package swag
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
|
||||
// Before go1.9, this may be implemented with a mutex on the map.
|
||||
type indexOfInitialisms struct {
|
||||
getMutex *sync.Mutex
|
||||
index map[string]bool
|
||||
}
|
||||
|
||||
func newIndexOfInitialisms() *indexOfInitialisms {
|
||||
return &indexOfInitialisms{
|
||||
getMutex: new(sync.Mutex),
|
||||
index: make(map[string]bool, 50),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
|
||||
m.getMutex.Lock()
|
||||
defer m.getMutex.Unlock()
|
||||
for k, v := range initial {
|
||||
m.index[k] = v
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) isInitialism(key string) bool {
|
||||
m.getMutex.Lock()
|
||||
defer m.getMutex.Unlock()
|
||||
_, ok := m.index[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
|
||||
m.getMutex.Lock()
|
||||
defer m.getMutex.Unlock()
|
||||
m.index[key] = true
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *indexOfInitialisms) sorted() (result []string) {
|
||||
m.getMutex.Lock()
|
||||
defer m.getMutex.Unlock()
|
||||
for k := range m.index {
|
||||
result = append(result, k)
|
||||
}
|
||||
sort.Sort(sort.Reverse(byInitialism(result)))
|
||||
return
|
||||
}
|
||||
|
|
@ -343,7 +343,7 @@ type zeroable interface {
|
|||
func IsZero(data interface{}) bool {
|
||||
v := reflect.ValueOf(data)
|
||||
// check for nil data
|
||||
switch v.Kind() {
|
||||
switch v.Kind() { //nolint:exhaustive
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
if v.IsNil() {
|
||||
return true
|
||||
|
|
@ -356,7 +356,7 @@ func IsZero(data interface{}) bool {
|
|||
}
|
||||
|
||||
// continue with slightly more complex reflection
|
||||
switch v.Kind() {
|
||||
switch v.Kind() { //nolint:exhaustive
|
||||
case reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
|
|||
case yamlTimestamp:
|
||||
return node.Value, nil
|
||||
case yamlNull:
|
||||
return nil, nil
|
||||
return nil, nil //nolint:nilnil
|
||||
default:
|
||||
return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag())
|
||||
}
|
||||
|
|
@ -319,7 +319,8 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
|
|||
Value: strconv.FormatBool(val),
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice
|
||||
|
|
|
|||
|
|
@ -1,5 +1,23 @@
|
|||
# Changelog
|
||||
|
||||
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
|
||||
|
||||
## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
|
||||
|
||||
### Fixes
|
||||
|
||||
* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
|
||||
|
||||
## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ please explain why in the pull request description.
|
|||
|
||||
### Releasing
|
||||
|
||||
Commits that would precipitate a SemVer change, as desrcibed in the Conventional
|
||||
Commits that would precipitate a SemVer change, as described in the Conventional
|
||||
Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
|
||||
to create a release candidate pull request. Once submitted, `release-please`
|
||||
will create a release.
|
||||
|
|
|
|||
|
|
@ -108,12 +108,23 @@ func setClockSequence(seq int) {
|
|||
}
|
||||
|
||||
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
|
||||
// uuid. The time is only defined for version 1 and 2 UUIDs.
|
||||
// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
|
||||
func (uuid UUID) Time() Time {
|
||||
var t Time
|
||||
switch uuid.Version() {
|
||||
case 6:
|
||||
time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
|
||||
t = Time(time)
|
||||
case 7:
|
||||
time := binary.BigEndian.Uint64(uuid[:8])
|
||||
t = Time((time>>16)*10000 + g1582ns100)
|
||||
default: // forward compatible
|
||||
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
|
||||
return Time(time)
|
||||
t = Time(time)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ClockSequence returns the clock sequence encoded in uuid.
|
||||
|
|
|
|||
|
|
@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
|
||||
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
|
||||
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
|
||||
// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
|
||||
// the standard UUID forms defined in RFC 4122
|
||||
// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
|
||||
// Parse accepts non-standard strings such as the raw hex encoding
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
|
||||
// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
|
||||
// examined in the latter case. Parse should not be used to validate strings as
|
||||
// it parses non-standard encodings as indicated above.
|
||||
func Parse(s string) (UUID, error) {
|
||||
var uuid UUID
|
||||
switch len(s) {
|
||||
|
|
@ -182,6 +186,59 @@ func Must(uuid UUID, err error) UUID {
|
|||
return uuid
|
||||
}
|
||||
|
||||
// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
// It returns an error if the format is invalid, otherwise nil.
|
||||
func Validate(s string) error {
|
||||
switch len(s) {
|
||||
// Standard UUID format
|
||||
case 36:
|
||||
|
||||
// UUID with "urn:uuid:" prefix
|
||||
case 36 + 9:
|
||||
if !strings.EqualFold(s[:9], "urn:uuid:") {
|
||||
return fmt.Errorf("invalid urn prefix: %q", s[:9])
|
||||
}
|
||||
s = s[9:]
|
||||
|
||||
// UUID enclosed in braces
|
||||
case 36 + 2:
|
||||
if s[0] != '{' || s[len(s)-1] != '}' {
|
||||
return fmt.Errorf("invalid bracketed UUID format")
|
||||
}
|
||||
s = s[1 : len(s)-1]
|
||||
|
||||
// UUID without hyphens
|
||||
case 32:
|
||||
for i := 0; i < len(s); i += 2 {
|
||||
_, ok := xtob(s[i], s[i+1])
|
||||
if !ok {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return invalidLengthError{len(s)}
|
||||
}
|
||||
|
||||
// Check for standard UUID format
|
||||
if len(s) == 36 {
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
|
||||
if _, ok := xtob(s[x], s[x+1]); !ok {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// , or "" if uuid is invalid.
|
||||
func (uuid UUID) String() string {
|
||||
|
|
@ -294,3 +351,15 @@ func DisableRandPool() {
|
|||
poolMu.Lock()
|
||||
poolPos = randPoolSize
|
||||
}
|
||||
|
||||
// UUIDs is a slice of UUID types.
|
||||
type UUIDs []UUID
|
||||
|
||||
// Strings returns a string slice containing the string form of each UUID in uuids.
|
||||
func (uuids UUIDs) Strings() []string {
|
||||
var uuidStrs = make([]string, len(uuids))
|
||||
for i, uuid := range uuids {
|
||||
uuidStrs[i] = uuid.String()
|
||||
}
|
||||
return uuidStrs
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2023 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
|
||||
// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
|
||||
// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
|
||||
//
|
||||
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
|
||||
//
|
||||
// NewV6 returns a Version 6 UUID based on the current NodeID and clock
|
||||
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
||||
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
||||
// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
|
||||
// SetClockSequence then it will be set automatically. If GetTime fails to
|
||||
// return the current NewV6 returns Nil and an error.
|
||||
func NewV6() (UUID, error) {
|
||||
var uuid UUID
|
||||
now, seq, err := GetTime()
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
/*
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| time_high |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| time_mid | time_low_and_version |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|clk_seq_hi_res | clk_seq_low | node (0-1) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| node (2-5) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
|
||||
binary.BigEndian.PutUint64(uuid[0:], uint64(now))
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
|
||||
uuid[6] = 0x60 | (uuid[6] & 0x0F)
|
||||
uuid[8] = 0x80 | (uuid[8] & 0x3F)
|
||||
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
copy(uuid[10:], nodeID[:])
|
||||
nodeMu.Unlock()
|
||||
|
||||
return uuid, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
// Copyright 2023 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// UUID version 7 features a time-ordered value field derived from the widely
|
||||
// implemented and well known Unix Epoch timestamp source,
|
||||
// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
|
||||
// As well as improved entropy characteristics over versions 1 or 6.
|
||||
//
|
||||
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
|
||||
//
|
||||
// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
|
||||
//
|
||||
// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
|
||||
// Uses the randomness pool if it was enabled with EnableRandPool.
|
||||
// On error, NewV7 returns Nil and an error
|
||||
func NewV7() (UUID, error) {
|
||||
uuid, err := NewRandom()
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
makeV7(uuid[:])
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
|
||||
// it use NewRandomFromReader fill random bits.
|
||||
// On error, NewV7FromReader returns Nil and an error.
|
||||
func NewV7FromReader(r io.Reader) (UUID, error) {
|
||||
uuid, err := NewRandomFromReader(r)
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
makeV7(uuid[:])
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
|
||||
// uuid[8] already has the right version number (Variant is 10)
|
||||
// see function NewV7 and NewV7FromReader
|
||||
func makeV7(uuid []byte) {
|
||||
/*
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| unix_ts_ms |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| unix_ts_ms | ver | rand_a |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|var| rand_b |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| rand_b |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
_ = uuid[15] // bounds check
|
||||
|
||||
t := timeNow().UnixMilli()
|
||||
|
||||
uuid[0] = byte(t >> 40)
|
||||
uuid[1] = byte(t >> 32)
|
||||
uuid[2] = byte(t >> 24)
|
||||
uuid[3] = byte(t >> 16)
|
||||
uuid[4] = byte(t >> 8)
|
||||
uuid[5] = byte(t)
|
||||
|
||||
uuid[6] = 0x70 | (uuid[6] & 0x0F)
|
||||
// uuid[8] has already has right version
|
||||
}
|
||||
|
|
@ -1,44 +0,0 @@
|
|||
// Copyright 2018 Klaus Post. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
|
||||
|
||||
package huff0
|
||||
|
||||
// byteReader provides a byte reader that reads
|
||||
// little endian values from a byte stream.
|
||||
// The input stream is manually advanced.
|
||||
// The reader performs no bounds checks.
|
||||
type byteReader struct {
|
||||
b []byte
|
||||
off int
|
||||
}
|
||||
|
||||
// init will initialize the reader and set the input.
|
||||
func (b *byteReader) init(in []byte) {
|
||||
b.b = in
|
||||
b.off = 0
|
||||
}
|
||||
|
||||
// Int32 returns a little endian int32 starting at current offset.
|
||||
func (b byteReader) Int32() int32 {
|
||||
v3 := int32(b.b[b.off+3])
|
||||
v2 := int32(b.b[b.off+2])
|
||||
v1 := int32(b.b[b.off+1])
|
||||
v0 := int32(b.b[b.off])
|
||||
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
|
||||
}
|
||||
|
||||
// Uint32 returns a little endian uint32 starting at current offset.
|
||||
func (b byteReader) Uint32() uint32 {
|
||||
v3 := uint32(b.b[b.off+3])
|
||||
v2 := uint32(b.b[b.off+2])
|
||||
v1 := uint32(b.b[b.off+1])
|
||||
v0 := uint32(b.b[b.off])
|
||||
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
|
||||
}
|
||||
|
||||
// remain will return the number of bytes remaining.
|
||||
func (b byteReader) remain() int {
|
||||
return len(b.b) - b.off
|
||||
}
|
||||
|
|
@ -350,6 +350,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
|||
// Does not update s.clearCount.
|
||||
func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
|
||||
reuse = true
|
||||
_ = s.count // Assert that s != nil to speed up the following loop.
|
||||
for _, v := range in {
|
||||
s.count[v]++
|
||||
}
|
||||
|
|
@ -415,7 +416,7 @@ func (s *Scratch) validateTable(c cTable) bool {
|
|||
|
||||
// minTableLog provides the minimum logSize to safely represent a distribution.
|
||||
func (s *Scratch) minTableLog() uint8 {
|
||||
minBitsSrc := highBit32(uint32(s.br.remain())) + 1
|
||||
minBitsSrc := highBit32(uint32(s.srcLen)) + 1
|
||||
minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
|
||||
if minBitsSrc < minBitsSymbols {
|
||||
return uint8(minBitsSrc)
|
||||
|
|
@ -427,7 +428,7 @@ func (s *Scratch) minTableLog() uint8 {
|
|||
func (s *Scratch) optimalTableLog() {
|
||||
tableLog := s.TableLog
|
||||
minBits := s.minTableLog()
|
||||
maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
|
||||
maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1
|
||||
if maxBitsSrc < tableLog {
|
||||
// Accuracy can be reduced
|
||||
tableLog = maxBitsSrc
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ type Scratch struct {
|
|||
// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
|
||||
MaxDecodedSize int
|
||||
|
||||
br byteReader
|
||||
srcLen int
|
||||
|
||||
// MaxSymbolValue will override the maximum symbol value of the next block.
|
||||
MaxSymbolValue uint8
|
||||
|
|
@ -170,7 +170,7 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) {
|
|||
if s.fse == nil {
|
||||
s.fse = &fse.Scratch{}
|
||||
}
|
||||
s.br.init(in)
|
||||
s.srcLen = len(in)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -259,7 +259,7 @@ nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
|
|||
|
||||
## Decompressor
|
||||
|
||||
Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
|
||||
Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
|
||||
|
||||
This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
|
||||
kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
|
||||
|
|
|
|||
|
|
@ -75,11 +75,16 @@ type OCSPStatus string
|
|||
const (
|
||||
OCSPStatusGood = OCSPStatus("good")
|
||||
OCSPStatusRevoked = OCSPStatus("revoked")
|
||||
// Not a real OCSP status. This is a placeholder we write before the
|
||||
// actual precertificate is issued, to ensure we never return "good" before
|
||||
// issuance succeeds, for BR compliance reasons.
|
||||
OCSPStatusNotReady = OCSPStatus("wait")
|
||||
)
|
||||
|
||||
var OCSPStatusToInt = map[OCSPStatus]int{
|
||||
OCSPStatusGood: ocsp.Good,
|
||||
OCSPStatusRevoked: ocsp.Revoked,
|
||||
OCSPStatusNotReady: -1,
|
||||
}
|
||||
|
||||
// DNSPrefix is attached to DNS names in DNS challenges
|
||||
|
|
@ -120,7 +125,7 @@ type ValidationRecord struct {
|
|||
URL string `json:"url,omitempty"`
|
||||
|
||||
// Shared
|
||||
Hostname string `json:"hostname"`
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
Port string `json:"port,omitempty"`
|
||||
AddressesResolved []net.IP `json:"addressesResolved,omitempty"`
|
||||
AddressUsed net.IP `json:"addressUsed,omitempty"`
|
||||
|
|
@ -337,11 +342,18 @@ type Authorization struct {
|
|||
// slice and the order of these challenges may not be predictable.
|
||||
Challenges []Challenge `json:"challenges,omitempty" db:"-"`
|
||||
|
||||
// Wildcard is a Boulder-specific Authorization field that indicates the
|
||||
// authorization was created as a result of an order containing a name with
|
||||
// a `*.`wildcard prefix. This will help convey to users that an
|
||||
// Authorization with the identifier `example.com` and one DNS-01 challenge
|
||||
// corresponds to a name `*.example.com` from an associated order.
|
||||
// https://datatracker.ietf.org/doc/html/rfc8555#page-29
|
||||
//
|
||||
// wildcard (optional, boolean): This field MUST be present and true
|
||||
// for authorizations created as a result of a newOrder request
|
||||
// containing a DNS identifier with a value that was a wildcard
|
||||
// domain name. For other authorizations, it MUST be absent.
|
||||
// Wildcard domain names are described in Section 7.1.3.
|
||||
//
|
||||
// This is not represented in the database because we calculate it from
|
||||
// the identifier stored in the database. Unlike the identifier returned
|
||||
// as part of the authorization, the identifier we store in the database
|
||||
// can contain an asterisk.
|
||||
Wildcard bool `json:"wildcard,omitempty" db:"-"`
|
||||
}
|
||||
|
||||
|
|
@ -406,8 +418,8 @@ type Certificate struct {
|
|||
}
|
||||
|
||||
// CertificateStatus structs are internal to the server. They represent the
|
||||
// latest data about the status of the certificate, required for OCSP updating
|
||||
// and for validating that the subscriber has accepted the certificate.
|
||||
// latest data about the status of the certificate, required for generating new
|
||||
// OCSP responses and determining if a certificate has been revoked.
|
||||
type CertificateStatus struct {
|
||||
ID int64 `db:"id"`
|
||||
|
||||
|
|
@ -433,26 +445,19 @@ type CertificateStatus struct {
|
|||
|
||||
LastExpirationNagSent time.Time `db:"lastExpirationNagSent"`
|
||||
|
||||
// The encoded and signed OCSP response.
|
||||
OCSPResponse []byte `db:"ocspResponse"`
|
||||
|
||||
// For performance reasons[0] we duplicate the `Expires` field of the
|
||||
// `Certificates` object/table in `CertificateStatus` to avoid a costly `JOIN`
|
||||
// later on just to retrieve this `Time` value. This helps both the OCSP
|
||||
// updater and the expiration-mailer stay performant.
|
||||
//
|
||||
// Similarly, we add an explicit `IsExpired` boolean to `CertificateStatus`
|
||||
// table that the OCSP updater so that the database can create a meaningful
|
||||
// index on `(isExpired, ocspLastUpdated)` without a `JOIN` on `certificates`.
|
||||
// For more detail see Boulder #1864[0].
|
||||
//
|
||||
// [0]: https://github.com/letsencrypt/boulder/issues/1864
|
||||
// NotAfter and IsExpired are convenience columns which allow expensive
|
||||
// queries to quickly filter out certificates that we don't need to care about
|
||||
// anymore. These are particularly useful for the expiration mailer and CRL
|
||||
// updater. See https://github.com/letsencrypt/boulder/issues/1864.
|
||||
NotAfter time.Time `db:"notAfter"`
|
||||
IsExpired bool `db:"isExpired"`
|
||||
|
||||
// TODO(#5152): Change this to an issuance.Issuer(Name)ID after it no longer
|
||||
// has to support both IssuerNameIDs and IssuerIDs.
|
||||
IssuerID int64
|
||||
// Note: this is not an issuance.IssuerNameID because that would create an
|
||||
// import cycle between core and issuance.
|
||||
// Note2: This field used to be called `issuerID`. We keep the old name in
|
||||
// the DB, but update the Go field name to be clear which type of ID this
|
||||
// is.
|
||||
IssuerNameID int64 `db:"issuerID"`
|
||||
}
|
||||
|
||||
// FQDNSet contains the SHA256 hash of the lowercased, comma joined dNSNames
|
||||
|
|
@ -501,7 +506,7 @@ func RenewalInfoSimple(issued time.Time, expires time.Time) RenewalInfo {
|
|||
}
|
||||
|
||||
// RenewalInfoImmediate constructs a `RenewalInfo` object with a suggested
|
||||
// window in the past. Per the draft-ietf-acme-ari-00 spec, clients should
|
||||
// window in the past. Per the draft-ietf-acme-ari-01 spec, clients should
|
||||
// attempt to renew immediately if the suggested window is in the past. The
|
||||
// passed `now` is assumed to be a timestamp representing the current moment in
|
||||
// time.
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
|
|
@ -16,6 +17,7 @@ import (
|
|||
"math/big"
|
||||
mrand "math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
|
|
@ -23,7 +25,7 @@ import (
|
|||
"time"
|
||||
"unicode"
|
||||
|
||||
jose "gopkg.in/go-jose/go-jose.v2"
|
||||
"gopkg.in/go-jose/go-jose.v2"
|
||||
)
|
||||
|
||||
const Unspecified = "Unspecified"
|
||||
|
|
@ -96,7 +98,7 @@ func KeyDigest(key crypto.PublicKey) (Sha256Digest, error) {
|
|||
switch t := key.(type) {
|
||||
case *jose.JSONWebKey:
|
||||
if t == nil {
|
||||
return Sha256Digest{}, fmt.Errorf("Cannot compute digest of nil key")
|
||||
return Sha256Digest{}, errors.New("cannot compute digest of nil key")
|
||||
}
|
||||
return KeyDigest(t.Key)
|
||||
case jose.JSONWebKey:
|
||||
|
|
@ -132,21 +134,16 @@ func KeyDigestEquals(j, k crypto.PublicKey) bool {
|
|||
return digestJ == digestK
|
||||
}
|
||||
|
||||
// PublicKeysEqual determines whether two public keys have the same marshalled
|
||||
// bytes as one another
|
||||
func PublicKeysEqual(a, b interface{}) (bool, error) {
|
||||
if a == nil || b == nil {
|
||||
return false, errors.New("One or more nil arguments to PublicKeysEqual")
|
||||
// PublicKeysEqual determines whether two public keys are identical.
|
||||
func PublicKeysEqual(a, b crypto.PublicKey) (bool, error) {
|
||||
switch ak := a.(type) {
|
||||
case *rsa.PublicKey:
|
||||
return ak.Equal(b), nil
|
||||
case *ecdsa.PublicKey:
|
||||
return ak.Equal(b), nil
|
||||
default:
|
||||
return false, fmt.Errorf("unsupported public key type %T", ak)
|
||||
}
|
||||
aBytes, err := x509.MarshalPKIXPublicKey(a)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
bBytes, err := x509.MarshalPKIXPublicKey(b)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return bytes.Equal(aBytes, bBytes), nil
|
||||
}
|
||||
|
||||
// SerialToString converts a certificate serial number (big.Int) to a String
|
||||
|
|
@ -160,7 +157,7 @@ func SerialToString(serial *big.Int) string {
|
|||
func StringToSerial(serial string) (*big.Int, error) {
|
||||
var serialNum big.Int
|
||||
if !ValidSerial(serial) {
|
||||
return &serialNum, errors.New("Invalid serial number")
|
||||
return &serialNum, fmt.Errorf("invalid serial number %q", serial)
|
||||
}
|
||||
_, err := fmt.Sscanf(serial, "%036x", &serialNum)
|
||||
return &serialNum, err
|
||||
|
|
@ -245,6 +242,14 @@ func UniqueLowerNames(names []string) (unique []string) {
|
|||
return
|
||||
}
|
||||
|
||||
// HashNames returns a hash of the names requested. This is intended for use
|
||||
// when interacting with the orderFqdnSets table and rate limiting.
|
||||
func HashNames(names []string) []byte {
|
||||
names = UniqueLowerNames(names)
|
||||
hash := sha256.Sum256([]byte(strings.Join(names, ",")))
|
||||
return hash[:]
|
||||
}
|
||||
|
||||
// LoadCert loads a PEM certificate specified by filename or returns an error
|
||||
func LoadCert(filename string) (*x509.Certificate, error) {
|
||||
certPEM, err := os.ReadFile(filename)
|
||||
|
|
@ -253,7 +258,7 @@ func LoadCert(filename string) (*x509.Certificate, error) {
|
|||
}
|
||||
block, _ := pem.Decode(certPEM)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("No data in cert PEM file %s", filename)
|
||||
return nil, fmt.Errorf("no data in cert PEM file %q", filename)
|
||||
}
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
|
|
@ -298,3 +303,7 @@ func IsASCII(str string) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func Command() string {
|
||||
return path.Base(os.Args[0])
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,194 +0,0 @@
|
|||
// Package errors provides internal-facing error types for use in Boulder. Many
|
||||
// of these are transformed directly into Problem Details documents by the WFE.
|
||||
// Some, like NotFound, may be handled internally. We avoid using Problem
|
||||
// Details documents as part of our internal error system to avoid layering
|
||||
// confusions.
|
||||
//
|
||||
// These errors are specifically for use in errors that cross RPC boundaries.
|
||||
// An error type that does not need to be passed through an RPC can use a plain
|
||||
// Go type locally. Our gRPC code is aware of these error types and will
|
||||
// serialize and deserialize them automatically.
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/letsencrypt/boulder/identifier"
|
||||
)
|
||||
|
||||
// ErrorType provides a coarse category for BoulderErrors.
|
||||
// Objects of type ErrorType should never be directly returned by other
|
||||
// functions; instead use the methods below to create an appropriate
|
||||
// BoulderError wrapping one of these types.
|
||||
type ErrorType int
|
||||
|
||||
// These numeric constants are used when sending berrors through gRPC.
|
||||
const (
|
||||
// InternalServer is deprecated. Instead, pass a plain Go error. That will get
|
||||
// turned into a probs.InternalServerError by the WFE.
|
||||
InternalServer ErrorType = iota
|
||||
_
|
||||
Malformed
|
||||
Unauthorized
|
||||
NotFound
|
||||
RateLimit
|
||||
RejectedIdentifier
|
||||
InvalidEmail
|
||||
ConnectionFailure
|
||||
_ // Reserved, previously WrongAuthorizationState
|
||||
CAA
|
||||
MissingSCTs
|
||||
Duplicate
|
||||
OrderNotReady
|
||||
DNS
|
||||
BadPublicKey
|
||||
BadCSR
|
||||
AlreadyRevoked
|
||||
BadRevocationReason
|
||||
)
|
||||
|
||||
func (ErrorType) Error() string {
|
||||
return "urn:ietf:params:acme:error"
|
||||
}
|
||||
|
||||
// BoulderError represents internal Boulder errors
|
||||
type BoulderError struct {
|
||||
Type ErrorType
|
||||
Detail string
|
||||
SubErrors []SubBoulderError
|
||||
|
||||
// RetryAfter the duration a client should wait before retrying the request
|
||||
// which resulted in this error.
|
||||
RetryAfter time.Duration
|
||||
}
|
||||
|
||||
// SubBoulderError represents sub-errors specific to an identifier that are
|
||||
// related to a top-level internal Boulder error.
|
||||
type SubBoulderError struct {
|
||||
*BoulderError
|
||||
Identifier identifier.ACMEIdentifier
|
||||
}
|
||||
|
||||
func (be *BoulderError) Error() string {
|
||||
return be.Detail
|
||||
}
|
||||
|
||||
func (be *BoulderError) Unwrap() error {
|
||||
return be.Type
|
||||
}
|
||||
|
||||
// WithSubErrors returns a new BoulderError instance created by adding the
|
||||
// provided subErrs to the existing BoulderError.
|
||||
func (be *BoulderError) WithSubErrors(subErrs []SubBoulderError) *BoulderError {
|
||||
return &BoulderError{
|
||||
Type: be.Type,
|
||||
Detail: be.Detail,
|
||||
SubErrors: append(be.SubErrors, subErrs...),
|
||||
RetryAfter: be.RetryAfter,
|
||||
}
|
||||
}
|
||||
|
||||
// New is a convenience function for creating a new BoulderError
|
||||
func New(errType ErrorType, msg string, args ...interface{}) error {
|
||||
return &BoulderError{
|
||||
Type: errType,
|
||||
Detail: fmt.Sprintf(msg, args...),
|
||||
}
|
||||
}
|
||||
|
||||
func InternalServerError(msg string, args ...interface{}) error {
|
||||
return New(InternalServer, msg, args...)
|
||||
}
|
||||
|
||||
func MalformedError(msg string, args ...interface{}) error {
|
||||
return New(Malformed, msg, args...)
|
||||
}
|
||||
|
||||
func UnauthorizedError(msg string, args ...interface{}) error {
|
||||
return New(Unauthorized, msg, args...)
|
||||
}
|
||||
|
||||
func NotFoundError(msg string, args ...interface{}) error {
|
||||
return New(NotFound, msg, args...)
|
||||
}
|
||||
|
||||
func RateLimitError(retryAfter time.Duration, msg string, args ...interface{}) error {
|
||||
return &BoulderError{
|
||||
Type: RateLimit,
|
||||
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/rate-limits/", args...),
|
||||
RetryAfter: retryAfter,
|
||||
}
|
||||
}
|
||||
|
||||
func DuplicateCertificateError(retryAfter time.Duration, msg string, args ...interface{}) error {
|
||||
return &BoulderError{
|
||||
Type: RateLimit,
|
||||
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/duplicate-certificate-limit/", args...),
|
||||
RetryAfter: retryAfter,
|
||||
}
|
||||
}
|
||||
|
||||
func FailedValidationError(retryAfter time.Duration, msg string, args ...interface{}) error {
|
||||
return &BoulderError{
|
||||
Type: RateLimit,
|
||||
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/failed-validation-limit/", args...),
|
||||
RetryAfter: retryAfter,
|
||||
}
|
||||
}
|
||||
|
||||
func RegistrationsPerIPError(retryAfter time.Duration, msg string, args ...interface{}) error {
|
||||
return &BoulderError{
|
||||
Type: RateLimit,
|
||||
Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/too-many-registrations-for-this-ip/", args...),
|
||||
RetryAfter: retryAfter,
|
||||
}
|
||||
}
|
||||
|
||||
func RejectedIdentifierError(msg string, args ...interface{}) error {
|
||||
return New(RejectedIdentifier, msg, args...)
|
||||
}
|
||||
|
||||
func InvalidEmailError(msg string, args ...interface{}) error {
|
||||
return New(InvalidEmail, msg, args...)
|
||||
}
|
||||
|
||||
func ConnectionFailureError(msg string, args ...interface{}) error {
|
||||
return New(ConnectionFailure, msg, args...)
|
||||
}
|
||||
|
||||
func CAAError(msg string, args ...interface{}) error {
|
||||
return New(CAA, msg, args...)
|
||||
}
|
||||
|
||||
func MissingSCTsError(msg string, args ...interface{}) error {
|
||||
return New(MissingSCTs, msg, args...)
|
||||
}
|
||||
|
||||
func DuplicateError(msg string, args ...interface{}) error {
|
||||
return New(Duplicate, msg, args...)
|
||||
}
|
||||
|
||||
func OrderNotReadyError(msg string, args ...interface{}) error {
|
||||
return New(OrderNotReady, msg, args...)
|
||||
}
|
||||
|
||||
func DNSError(msg string, args ...interface{}) error {
|
||||
return New(DNS, msg, args...)
|
||||
}
|
||||
|
||||
func BadPublicKeyError(msg string, args ...interface{}) error {
|
||||
return New(BadPublicKey, msg, args...)
|
||||
}
|
||||
|
||||
func BadCSRError(msg string, args ...interface{}) error {
|
||||
return New(BadCSR, msg, args...)
|
||||
}
|
||||
|
||||
func AlreadyRevokedError(msg string, args ...interface{}) error {
|
||||
return New(AlreadyRevoked, msg, args...)
|
||||
}
|
||||
|
||||
func BadRevocationReasonError(reason int64) error {
|
||||
return New(BadRevocationReason, "disallowed revocation reason: %d", reason)
|
||||
}
|
||||
|
|
@ -9,8 +9,7 @@ import (
|
|||
"os"
|
||||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
"github.com/letsencrypt/boulder/strictyaml"
|
||||
)
|
||||
|
||||
// blockedKeys is a type for maintaining a map of SHA256 hashes
|
||||
|
|
@ -58,7 +57,7 @@ func loadBlockedKeysList(filename string) (*blockedKeys, error) {
|
|||
BlockedHashes []string `yaml:"blocked"`
|
||||
BlockedHashesHex []string `yaml:"blockedHashesHex"`
|
||||
}
|
||||
err = yaml.Unmarshal(yamlBytes, &list)
|
||||
err = strictyaml.Unmarshal(yamlBytes, &list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/letsencrypt/boulder/core"
|
||||
berrors "github.com/letsencrypt/boulder/errors"
|
||||
|
||||
"github.com/titanous/rocacheck"
|
||||
)
|
||||
|
|
@ -136,7 +135,7 @@ func (policy *KeyPolicy) GoodKey(ctx context.Context, key crypto.PublicKey) erro
|
|||
// that has been administratively blocked.
|
||||
if policy.blockedList != nil {
|
||||
if blocked, err := policy.blockedList.blocked(key); err != nil {
|
||||
return berrors.InternalServerError("error checking blocklist for key: %v", key)
|
||||
return fmt.Errorf("error checking blocklist for key: %v", key)
|
||||
} else if blocked {
|
||||
return badKey("public key is forbidden")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,29 +7,33 @@ import (
|
|||
"github.com/letsencrypt/boulder/identifier"
|
||||
)
|
||||
|
||||
// Error types that can be used in ACME payloads
|
||||
const (
|
||||
// Error types that can be used in ACME payloads. These are sorted in the
|
||||
// same order as they are defined in RFC8555 Section 6.7. We do not implement
|
||||
// the `compound`, `externalAccountRequired`, or `userActionRequired` errors,
|
||||
// because we have no path that would return them.
|
||||
AccountDoesNotExistProblem = ProblemType("accountDoesNotExist")
|
||||
AlreadyRevokedProblem = ProblemType("alreadyRevoked")
|
||||
BadCSRProblem = ProblemType("badCSR")
|
||||
BadNonceProblem = ProblemType("badNonce")
|
||||
BadPublicKeyProblem = ProblemType("badPublicKey")
|
||||
BadRevocationReasonProblem = ProblemType("badRevocationReason")
|
||||
BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm")
|
||||
CAAProblem = ProblemType("caa")
|
||||
ConnectionProblem = ProblemType("connection")
|
||||
DNSProblem = ProblemType("dns")
|
||||
InvalidContactProblem = ProblemType("invalidContact")
|
||||
MalformedProblem = ProblemType("malformed")
|
||||
OrderNotReadyProblem = ProblemType("orderNotReady")
|
||||
RateLimitedProblem = ProblemType("rateLimited")
|
||||
RejectedIdentifierProblem = ProblemType("rejectedIdentifier")
|
||||
ServerInternalProblem = ProblemType("serverInternal")
|
||||
TLSProblem = ProblemType("tls")
|
||||
UnauthorizedProblem = ProblemType("unauthorized")
|
||||
RateLimitedProblem = ProblemType("rateLimited")
|
||||
BadNonceProblem = ProblemType("badNonce")
|
||||
InvalidEmailProblem = ProblemType("invalidEmail")
|
||||
RejectedIdentifierProblem = ProblemType("rejectedIdentifier")
|
||||
AccountDoesNotExistProblem = ProblemType("accountDoesNotExist")
|
||||
CAAProblem = ProblemType("caa")
|
||||
DNSProblem = ProblemType("dns")
|
||||
AlreadyRevokedProblem = ProblemType("alreadyRevoked")
|
||||
OrderNotReadyProblem = ProblemType("orderNotReady")
|
||||
BadSignatureAlgorithmProblem = ProblemType("badSignatureAlgorithm")
|
||||
BadPublicKeyProblem = ProblemType("badPublicKey")
|
||||
BadRevocationReasonProblem = ProblemType("badRevocationReason")
|
||||
BadCSRProblem = ProblemType("badCSR")
|
||||
UnsupportedContactProblem = ProblemType("unsupportedContact")
|
||||
UnsupportedIdentifierProblem = ProblemType("unsupportedIdentifier")
|
||||
|
||||
V1ErrorNS = "urn:acme:error:"
|
||||
V2ErrorNS = "urn:ietf:params:acme:error:"
|
||||
ErrorNS = "urn:ietf:params:acme:error:"
|
||||
)
|
||||
|
||||
// ProblemType defines the error types in the ACME protocol
|
||||
|
|
@ -71,40 +75,35 @@ func (pd *ProblemDetails) WithSubProblems(subProbs []SubProblemDetails) *Problem
|
|||
}
|
||||
}
|
||||
|
||||
// statusTooManyRequests is the HTTP status code meant for rate limiting
|
||||
// errors. It's not currently in the net/http library so we add it here.
|
||||
const statusTooManyRequests = 429
|
||||
// Helper functions which construct the basic RFC8555 Problem Documents, with
|
||||
// the Type already set and the Details supplied by the caller.
|
||||
|
||||
// ProblemDetailsToStatusCode inspects the given ProblemDetails to figure out
|
||||
// what HTTP status code it should represent. It should only be used by the WFE
|
||||
// but is included in this package because of its reliance on ProblemTypes.
|
||||
func ProblemDetailsToStatusCode(prob *ProblemDetails) int {
|
||||
if prob.HTTPStatus != 0 {
|
||||
return prob.HTTPStatus
|
||||
// AccountDoesNotExist returns a ProblemDetails representing an
|
||||
// AccountDoesNotExistProblem error
|
||||
func AccountDoesNotExist(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: AccountDoesNotExistProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
switch prob.Type {
|
||||
case
|
||||
ConnectionProblem,
|
||||
MalformedProblem,
|
||||
BadSignatureAlgorithmProblem,
|
||||
BadPublicKeyProblem,
|
||||
TLSProblem,
|
||||
BadNonceProblem,
|
||||
InvalidEmailProblem,
|
||||
RejectedIdentifierProblem,
|
||||
AccountDoesNotExistProblem,
|
||||
BadRevocationReasonProblem:
|
||||
return http.StatusBadRequest
|
||||
case ServerInternalProblem:
|
||||
return http.StatusInternalServerError
|
||||
case
|
||||
UnauthorizedProblem,
|
||||
CAAProblem:
|
||||
return http.StatusForbidden
|
||||
case RateLimitedProblem:
|
||||
return statusTooManyRequests
|
||||
default:
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad
|
||||
// Request status code.
|
||||
func AlreadyRevoked(detail string, a ...any) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: AlreadyRevokedProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// BadCSR returns a ProblemDetails representing a BadCSRProblem.
|
||||
func BadCSR(detail string, a ...any) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: BadCSRProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -118,75 +117,9 @@ func BadNonce(detail string) *ProblemDetails {
|
|||
}
|
||||
}
|
||||
|
||||
// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad
|
||||
// Request status code.
|
||||
func RejectedIdentifier(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: RejectedIdentifierProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict
|
||||
// status code.
|
||||
func Conflict(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: MalformedProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusConflict,
|
||||
}
|
||||
}
|
||||
|
||||
// AlreadyRevoked returns a ProblemDetails with a AlreadyRevokedProblem and a 400 Bad
|
||||
// Request status code.
|
||||
func AlreadyRevoked(detail string, a ...interface{}) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: AlreadyRevokedProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad
|
||||
// Request status code.
|
||||
func Malformed(detail string, args ...interface{}) *ProblemDetails {
|
||||
if len(args) > 0 {
|
||||
detail = fmt.Sprintf(detail, args...)
|
||||
}
|
||||
return &ProblemDetails{
|
||||
Type: MalformedProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request
|
||||
// Timeout status code.
|
||||
func Canceled(detail string, args ...interface{}) *ProblemDetails {
|
||||
if len(args) > 0 {
|
||||
detail = fmt.Sprintf(detail, args...)
|
||||
}
|
||||
return &ProblemDetails{
|
||||
Type: MalformedProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusRequestTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem
|
||||
// and a 400 Bad Request status code.
|
||||
func BadSignatureAlgorithm(detail string, a ...interface{}) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: BadSignatureAlgorithmProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// BadPublicKey returns a ProblemDetails with a BadPublicKeyProblem and a 400 Bad
|
||||
// Request status code.
|
||||
func BadPublicKey(detail string, a ...interface{}) *ProblemDetails {
|
||||
func BadPublicKey(detail string, a ...any) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: BadPublicKeyProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
|
|
@ -194,13 +127,101 @@ func BadPublicKey(detail string, a ...interface{}) *ProblemDetails {
|
|||
}
|
||||
}
|
||||
|
||||
// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found
|
||||
// status code.
|
||||
func NotFound(detail string) *ProblemDetails {
|
||||
// BadRevocationReason returns a ProblemDetails representing
|
||||
// a BadRevocationReasonProblem
|
||||
func BadRevocationReason(detail string, a ...any) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: BadRevocationReasonProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// BadSignatureAlgorithm returns a ProblemDetails with a BadSignatureAlgorithmProblem
|
||||
// and a 400 Bad Request status code.
|
||||
func BadSignatureAlgorithm(detail string, a ...any) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: BadSignatureAlgorithmProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// CAA returns a ProblemDetails representing a CAAProblem
|
||||
func CAA(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: CAAProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
|
||||
// Connection returns a ProblemDetails representing a ConnectionProblem
|
||||
// error
|
||||
func Connection(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: ConnectionProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// DNS returns a ProblemDetails representing a DNSProblem
|
||||
func DNS(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: DNSProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// InvalidContact returns a ProblemDetails representing an InvalidContactProblem.
|
||||
func InvalidContact(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: InvalidContactProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// Malformed returns a ProblemDetails with a MalformedProblem and a 400 Bad
|
||||
// Request status code.
|
||||
func Malformed(detail string, a ...any) *ProblemDetails {
|
||||
if len(a) > 0 {
|
||||
detail = fmt.Sprintf(detail, a...)
|
||||
}
|
||||
return &ProblemDetails{
|
||||
Type: MalformedProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusNotFound,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem
|
||||
func OrderNotReady(detail string, a ...any) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: OrderNotReadyProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
HTTPStatus: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
|
||||
// RateLimited returns a ProblemDetails representing a RateLimitedProblem error
|
||||
func RateLimited(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: RateLimitedProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusTooManyRequests,
|
||||
}
|
||||
}
|
||||
|
||||
// RejectedIdentifier returns a ProblemDetails with a RejectedIdentifierProblem and a 400 Bad
|
||||
// Request status code.
|
||||
func RejectedIdentifier(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: RejectedIdentifierProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -214,6 +235,15 @@ func ServerInternal(detail string) *ProblemDetails {
|
|||
}
|
||||
}
|
||||
|
||||
// TLS returns a ProblemDetails representing a TLSProblem error
|
||||
func TLS(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: TLSProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// Unauthorized returns a ProblemDetails with an UnauthorizedProblem and a 403
|
||||
// Forbidden status code.
|
||||
func Unauthorized(detail string) *ProblemDetails {
|
||||
|
|
@ -224,13 +254,49 @@ func Unauthorized(detail string) *ProblemDetails {
|
|||
}
|
||||
}
|
||||
|
||||
// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP
|
||||
// method error.
|
||||
func MethodNotAllowed() *ProblemDetails {
|
||||
// UnsupportedContact returns a ProblemDetails representing an
|
||||
// UnsupportedContactProblem
|
||||
func UnsupportedContact(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: UnsupportedContactProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// UnsupportedIdentifier returns a ProblemDetails representing an
|
||||
// UnsupportedIdentifierProblem
|
||||
func UnsupportedIdentifier(detail string, a ...any) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: UnsupportedIdentifierProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// Additional helper functions that return variations on MalformedProblem with
|
||||
// different HTTP status codes set.
|
||||
|
||||
// Canceled returns a ProblemDetails with a MalformedProblem and a 408 Request
|
||||
// Timeout status code.
|
||||
func Canceled(detail string, a ...any) *ProblemDetails {
|
||||
if len(a) > 0 {
|
||||
detail = fmt.Sprintf(detail, a...)
|
||||
}
|
||||
return &ProblemDetails{
|
||||
Type: MalformedProblem,
|
||||
Detail: "Method not allowed",
|
||||
HTTPStatus: http.StatusMethodNotAllowed,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusRequestTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Conflict returns a ProblemDetails with a MalformedProblem and a 409 Conflict
|
||||
// status code.
|
||||
func Conflict(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: MalformedProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusConflict,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -254,96 +320,22 @@ func InvalidContentType(detail string) *ProblemDetails {
|
|||
}
|
||||
}
|
||||
|
||||
// InvalidEmail returns a ProblemDetails representing an invalid email address
|
||||
// error
|
||||
func InvalidEmail(detail string) *ProblemDetails {
|
||||
// MethodNotAllowed returns a ProblemDetails representing a disallowed HTTP
|
||||
// method error.
|
||||
func MethodNotAllowed() *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: InvalidEmailProblem,
|
||||
Type: MalformedProblem,
|
||||
Detail: "Method not allowed",
|
||||
HTTPStatus: http.StatusMethodNotAllowed,
|
||||
}
|
||||
}
|
||||
|
||||
// NotFound returns a ProblemDetails with a MalformedProblem and a 404 Not Found
|
||||
// status code.
|
||||
func NotFound(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: MalformedProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectionFailure returns a ProblemDetails representing a ConnectionProblem
|
||||
// error
|
||||
func ConnectionFailure(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: ConnectionProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// RateLimited returns a ProblemDetails representing a RateLimitedProblem error
|
||||
func RateLimited(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: RateLimitedProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: statusTooManyRequests,
|
||||
}
|
||||
}
|
||||
|
||||
// TLSError returns a ProblemDetails representing a TLSProblem error
|
||||
func TLSError(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: TLSProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// AccountDoesNotExist returns a ProblemDetails representing an
|
||||
// AccountDoesNotExistProblem error
|
||||
func AccountDoesNotExist(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: AccountDoesNotExistProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// CAA returns a ProblemDetails representing a CAAProblem
|
||||
func CAA(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: CAAProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
|
||||
// DNS returns a ProblemDetails representing a DNSProblem
|
||||
func DNS(detail string) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: DNSProblem,
|
||||
Detail: detail,
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// OrderNotReady returns a ProblemDetails representing a OrderNotReadyProblem
|
||||
func OrderNotReady(detail string, a ...interface{}) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: OrderNotReadyProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
HTTPStatus: http.StatusForbidden,
|
||||
}
|
||||
}
|
||||
|
||||
// BadRevocationReason returns a ProblemDetails representing
|
||||
// a BadRevocationReasonProblem
|
||||
func BadRevocationReason(detail string, a ...interface{}) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: BadRevocationReasonProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
}
|
||||
}
|
||||
|
||||
// BadCSR returns a ProblemDetails representing a BadCSRProblem.
|
||||
func BadCSR(detail string, a ...interface{}) *ProblemDetails {
|
||||
return &ProblemDetails{
|
||||
Type: BadCSRProblem,
|
||||
Detail: fmt.Sprintf(detail, a...),
|
||||
HTTPStatus: http.StatusBadRequest,
|
||||
HTTPStatus: http.StatusNotFound,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,46 @@
|
|||
// Package strictyaml provides a strict YAML unmarshaller based on `go-yaml/yaml`
|
||||
package strictyaml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Unmarshal takes a byte array and an interface passed by reference. The
|
||||
// d.Decode will read the next YAML-encoded value from its input and store it in
|
||||
// the value pointed to by yamlObj. Any config keys from the incoming YAML
|
||||
// document which do not correspond to expected keys in the config struct will
|
||||
// result in errors.
|
||||
//
|
||||
// TODO(https://github.com/go-yaml/yaml/issues/639): Replace this function with
|
||||
// yaml.Unmarshal once a more ergonomic way to set unmarshal options is added
|
||||
// upstream.
|
||||
func Unmarshal(b []byte, yamlObj interface{}) error {
|
||||
r := bytes.NewReader(b)
|
||||
|
||||
d := yaml.NewDecoder(r)
|
||||
d.KnownFields(true)
|
||||
|
||||
// d.Decode will mutate yamlObj
|
||||
err := d.Decode(yamlObj)
|
||||
|
||||
if err != nil {
|
||||
// io.EOF is returned when the YAML document is empty.
|
||||
if errors.Is(err, io.EOF) {
|
||||
return fmt.Errorf("unmarshalling YAML, bytes cannot be nil: %w", err)
|
||||
}
|
||||
return fmt.Errorf("unmarshalling YAML: %w", err)
|
||||
}
|
||||
|
||||
// As bytes are read by the decoder, the length of the byte buffer should
|
||||
// decrease. If it doesn't, there's a problem.
|
||||
if r.Len() != 0 {
|
||||
return fmt.Errorf("yaml object of size %d bytes had %d bytes of unexpected unconsumed trailers", r.Size(), r.Len())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -21,7 +21,6 @@ package sqlite3
|
|||
#cgo CFLAGS: -DSQLITE_DEFAULT_WAL_SYNCHRONOUS=1
|
||||
#cgo CFLAGS: -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT
|
||||
#cgo CFLAGS: -Wno-deprecated-declarations
|
||||
#cgo linux,!android CFLAGS: -DHAVE_PREAD64=1 -DHAVE_PWRITE64=1
|
||||
#cgo openbsd CFLAGS: -I/usr/local/include
|
||||
#cgo openbsd LDFLAGS: -L/usr/local/lib
|
||||
#ifndef USE_LIBSQLITE3
|
||||
|
|
@ -48,6 +47,18 @@ package sqlite3
|
|||
# define SQLITE_DETERMINISTIC 0
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_PREAD64) && defined(HAVE_PWRITE64)
|
||||
# undef USE_PREAD
|
||||
# undef USE_PWRITE
|
||||
# define USE_PREAD64 1
|
||||
# define USE_PWRITE64 1
|
||||
#elif defined(HAVE_PREAD) && defined(HAVE_PWRITE)
|
||||
# undef USE_PREAD
|
||||
# undef USE_PWRITE
|
||||
# define USE_PREAD64 1
|
||||
# define USE_PWRITE64 1
|
||||
#endif
|
||||
|
||||
static int
|
||||
_sqlite3_open_v2(const char *filename, sqlite3 **ppDb, int flags, const char *zVfs) {
|
||||
#ifdef SQLITE_OPEN_URI
|
||||
|
|
|
|||
|
|
@ -42,11 +42,9 @@ func main() {
|
|||
mpb.BarStyle().Lbound("╢").Filler("▌").Tip("▌").Padding("░").Rbound("╟"),
|
||||
mpb.PrependDecorators(
|
||||
// display our name with one space on the right
|
||||
decor.Name(name, decor.WC{W: len(name) + 1, C: decor.DidentRight}),
|
||||
decor.Name(name, decor.WC{C: decor.DindentRight | decor.DextraSpace}),
|
||||
// replace ETA decorator with "done" message, OnComplete event
|
||||
decor.OnComplete(
|
||||
decor.AverageETA(decor.ET_STYLE_GO, decor.WC{W: 4}), "done",
|
||||
),
|
||||
decor.OnComplete(decor.AverageETA(decor.ET_STYLE_GO), "done"),
|
||||
),
|
||||
mpb.AppendDecorators(decor.Percentage()),
|
||||
)
|
||||
|
|
|
|||
|
|
@ -429,13 +429,11 @@ func (b *Bar) render(tw int) {
|
|||
return
|
||||
}
|
||||
}
|
||||
frame := &renderFrame{
|
||||
rows: rows,
|
||||
shutdown: s.shutdown,
|
||||
rmOnComplete: s.rmOnComplete,
|
||||
noPop: s.noPop,
|
||||
}
|
||||
frame := &renderFrame{rows: rows}
|
||||
if s.completed || s.aborted {
|
||||
frame.shutdown = s.shutdown
|
||||
frame.rmOnComplete = s.rmOnComplete
|
||||
frame.noPop = s.noPop
|
||||
// post increment makes sure OnComplete decorators are rendered
|
||||
s.shutdown++
|
||||
}
|
||||
|
|
@ -460,12 +458,15 @@ func (b *Bar) triggerCompletion(s *bState) {
|
|||
}
|
||||
|
||||
func (b *Bar) tryEarlyRefresh(renderReq chan<- time.Time) {
|
||||
var anyOtherRunning bool
|
||||
var otherRunning int
|
||||
b.container.traverseBars(func(bar *Bar) bool {
|
||||
anyOtherRunning = b != bar && bar.IsRunning()
|
||||
return anyOtherRunning
|
||||
if b != bar && bar.IsRunning() {
|
||||
otherRunning++
|
||||
return false // stop traverse
|
||||
}
|
||||
return true // continue traverse
|
||||
})
|
||||
if !anyOtherRunning {
|
||||
if otherRunning == 0 {
|
||||
for {
|
||||
select {
|
||||
case renderReq <- time.Now():
|
||||
|
|
|
|||
|
|
@ -8,29 +8,27 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// DidentRight bit specifies identation direction.
|
||||
// DindentRight sets indentation from right to left.
|
||||
//
|
||||
// |foo |b | With DidentRight
|
||||
// | foo| b| Without DidentRight
|
||||
DidentRight = 1 << iota
|
||||
// |foo |b | DindentRight is set
|
||||
// | foo| b| DindentRight is not set
|
||||
DindentRight = 1 << iota
|
||||
|
||||
// DextraSpace bit adds extra space, makes sense with DSyncWidth only.
|
||||
// When DidentRight bit set, the space will be added to the right,
|
||||
// otherwise to the left.
|
||||
// DextraSpace bit adds extra indentation space.
|
||||
DextraSpace
|
||||
|
||||
// DSyncWidth bit enables same column width synchronization.
|
||||
// Effective with multiple bars only.
|
||||
DSyncWidth
|
||||
|
||||
// DSyncWidthR is shortcut for DSyncWidth|DidentRight
|
||||
DSyncWidthR = DSyncWidth | DidentRight
|
||||
// DSyncWidthR is shortcut for DSyncWidth|DindentRight
|
||||
DSyncWidthR = DSyncWidth | DindentRight
|
||||
|
||||
// DSyncSpace is shortcut for DSyncWidth|DextraSpace
|
||||
DSyncSpace = DSyncWidth | DextraSpace
|
||||
|
||||
// DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DidentRight
|
||||
DSyncSpaceR = DSyncWidth | DextraSpace | DidentRight
|
||||
// DSyncSpaceR is shortcut for DSyncWidth|DextraSpace|DindentRight
|
||||
DSyncSpaceR = DSyncWidth | DextraSpace | DindentRight
|
||||
)
|
||||
|
||||
// TimeStyle enum.
|
||||
|
|
@ -143,11 +141,10 @@ func (wc WC) Format(str string) (string, int) {
|
|||
viewWidth := runewidth.StringWidth(str)
|
||||
if wc.W > viewWidth {
|
||||
viewWidth = wc.W
|
||||
}
|
||||
if (wc.C & DSyncWidth) != 0 {
|
||||
if (wc.C & DextraSpace) != 0 {
|
||||
} else if (wc.C & DextraSpace) != 0 {
|
||||
viewWidth++
|
||||
}
|
||||
if (wc.C & DSyncWidth) != 0 {
|
||||
wc.wsync <- viewWidth
|
||||
viewWidth = <-wc.wsync
|
||||
}
|
||||
|
|
@ -156,7 +153,7 @@ func (wc WC) Format(str string) (string, int) {
|
|||
|
||||
// Init initializes width related config.
|
||||
func (wc *WC) Init() WC {
|
||||
if (wc.C & DidentRight) != 0 {
|
||||
if (wc.C & DindentRight) != 0 {
|
||||
wc.fill = runewidth.FillRight
|
||||
} else {
|
||||
wc.fill = runewidth.FillLeft
|
||||
|
|
|
|||
|
|
@ -68,14 +68,17 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
|
|||
ctx = context.Background()
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
delayRC := make(chan struct{}, 1)
|
||||
delayRC <- struct{}{}
|
||||
s := &pState{
|
||||
ctx: ctx,
|
||||
hm: make(heapManager),
|
||||
dropS: make(chan struct{}),
|
||||
dropD: make(chan struct{}),
|
||||
renderReq: make(chan time.Time),
|
||||
refreshRate: defaultRefreshRate,
|
||||
popPriority: math.MinInt32,
|
||||
refreshRate: defaultRefreshRate,
|
||||
delayRC: delayRC,
|
||||
queueBars: make(map[*Bar]*Bar),
|
||||
output: os.Stdout,
|
||||
debugOut: io.Discard,
|
||||
|
|
@ -191,7 +194,7 @@ func (p *Progress) traverseBars(cb func(b *Bar) bool) {
|
|||
select {
|
||||
case p.operateState <- func(s *pState) { s.hm.iter(iter, drop) }:
|
||||
for b := range iter {
|
||||
if cb(b) {
|
||||
if !cb(b) {
|
||||
close(drop)
|
||||
break
|
||||
}
|
||||
|
|
@ -258,34 +261,52 @@ func (p *Progress) Shutdown() {
|
|||
|
||||
func (p *Progress) serve(s *pState, cw *cwriter.Writer) {
|
||||
defer p.pwg.Done()
|
||||
render := func() error { return s.render(cw) }
|
||||
var err error
|
||||
w := cwriter.New(io.Discard)
|
||||
renderReq := s.renderReq
|
||||
operateState := p.operateState
|
||||
interceptIO := p.interceptIO
|
||||
|
||||
for {
|
||||
select {
|
||||
case op := <-p.operateState:
|
||||
case <-s.delayRC:
|
||||
w, cw = cw, nil
|
||||
s.delayRC = nil
|
||||
case op := <-operateState:
|
||||
op(s)
|
||||
case fn := <-p.interceptIO:
|
||||
fn(cw)
|
||||
case fn := <-interceptIO:
|
||||
fn(w)
|
||||
case <-renderReq:
|
||||
err = s.render(w)
|
||||
if err != nil {
|
||||
// (*pState).(autoRefreshListener|manualRefreshListener) may block
|
||||
// if not launching following short lived goroutine
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.renderReq:
|
||||
e := render()
|
||||
if e != nil {
|
||||
case <-p.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
p.cancel() // cancel all bars
|
||||
render = func() error { return nil }
|
||||
err = e
|
||||
renderReq = nil
|
||||
operateState = nil
|
||||
interceptIO = nil
|
||||
}
|
||||
case <-p.done:
|
||||
update := make(chan bool)
|
||||
for s.autoRefresh && err == nil {
|
||||
s.hm.state(update)
|
||||
if <-update {
|
||||
err = render()
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintln(s.debugOut, err.Error())
|
||||
} else if s.autoRefresh {
|
||||
update := make(chan bool)
|
||||
for i := 0; i == 0 || <-update; i++ {
|
||||
if err := s.render(w); err != nil {
|
||||
_, _ = fmt.Fprintln(s.debugOut, err.Error())
|
||||
break
|
||||
}
|
||||
s.hm.state(update)
|
||||
}
|
||||
}
|
||||
s.hm.end(s.shutdownNotifier)
|
||||
return
|
||||
|
|
@ -293,10 +314,7 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s pState) autoRefreshListener(done chan struct{}) {
|
||||
if s.delayRC != nil {
|
||||
<-s.delayRC
|
||||
}
|
||||
func (s *pState) autoRefreshListener(done chan struct{}) {
|
||||
ticker := time.NewTicker(s.refreshRate)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
|
|
@ -310,7 +328,7 @@ func (s pState) autoRefreshListener(done chan struct{}) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s pState) manualRefreshListener(done chan struct{}) {
|
||||
func (s *pState) manualRefreshListener(done chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case x := <-s.manualRC:
|
||||
|
|
@ -342,9 +360,9 @@ func (s *pState) render(cw *cwriter.Writer) (err error) {
|
|||
if s.reqWidth > 0 {
|
||||
width = s.reqWidth
|
||||
} else {
|
||||
width = 100
|
||||
width = 80
|
||||
}
|
||||
height = 100
|
||||
height = width
|
||||
}
|
||||
|
||||
for b := range iter {
|
||||
|
|
@ -420,7 +438,7 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error {
|
|||
return cw.Flush(len(rows) - popCount)
|
||||
}
|
||||
|
||||
func (s pState) push(wg *sync.WaitGroup, b *Bar, sync bool) {
|
||||
func (s *pState) push(wg *sync.WaitGroup, b *Bar, sync bool) {
|
||||
s.hm.push(b, sync)
|
||||
wg.Done()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,17 +14,23 @@ import (
|
|||
)
|
||||
|
||||
// ArrayCodec is the Codec used for bsoncore.Array values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// ArrayCodec registered.
|
||||
type ArrayCodec struct{}
|
||||
|
||||
var defaultArrayCodec = NewArrayCodec()
|
||||
|
||||
// NewArrayCodec returns an ArrayCodec.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// ArrayCodec registered.
|
||||
func NewArrayCodec() *ArrayCodec {
|
||||
return &ArrayCodec{}
|
||||
}
|
||||
|
||||
// EncodeValue is the ValueEncoder for bsoncore.Array values.
|
||||
func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tCoreArray {
|
||||
return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val}
|
||||
}
|
||||
|
|
@ -34,7 +40,7 @@ func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val r
|
|||
}
|
||||
|
||||
// DecodeValue is the ValueDecoder for bsoncore.Array values.
|
||||
func (ac *ArrayCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tCoreArray {
|
||||
return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ var (
|
|||
// Marshaler is an interface implemented by types that can marshal themselves
|
||||
// into a BSON document represented as bytes. The bytes returned must be a valid
|
||||
// BSON document if the error is nil.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead.
|
||||
type Marshaler interface {
|
||||
MarshalBSON() ([]byte, error)
|
||||
}
|
||||
|
|
@ -31,6 +33,8 @@ type Marshaler interface {
|
|||
// themselves into a BSON value as bytes. The type must be the valid type for
|
||||
// the bytes returned. The bytes and byte type together must be valid if the
|
||||
// error is nil.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead.
|
||||
type ValueMarshaler interface {
|
||||
MarshalBSONValue() (bsontype.Type, []byte, error)
|
||||
}
|
||||
|
|
@ -39,6 +43,8 @@ type ValueMarshaler interface {
|
|||
// document representation of themselves. The BSON bytes can be assumed to be
|
||||
// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data
|
||||
// after returning.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalBSON([]byte) error
|
||||
}
|
||||
|
|
@ -47,6 +53,8 @@ type Unmarshaler interface {
|
|||
// BSON value representation of themselves. The BSON bytes and type can be
|
||||
// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
|
||||
// wishes to retain the data after returning.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead.
|
||||
type ValueUnmarshaler interface {
|
||||
UnmarshalBSONValue(bsontype.Type, []byte) error
|
||||
}
|
||||
|
|
@ -111,13 +119,93 @@ func (vde ValueDecoderError) Error() string {
|
|||
// value.
|
||||
type EncodeContext struct {
|
||||
*Registry
|
||||
|
||||
// MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64,
|
||||
// uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits)
|
||||
// that can represent the integer value.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.IntMinSize instead.
|
||||
MinSize bool
|
||||
|
||||
errorOnInlineDuplicates bool
|
||||
stringifyMapKeysWithFmt bool
|
||||
nilMapAsEmpty bool
|
||||
nilSliceAsEmpty bool
|
||||
nilByteSliceAsEmpty bool
|
||||
omitZeroStruct bool
|
||||
useJSONStructTags bool
|
||||
}
|
||||
|
||||
// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in
|
||||
// the marshaled BSON when the "inline" struct tag option is set.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead.
|
||||
func (ec *EncodeContext) ErrorOnInlineDuplicates() {
|
||||
ec.errorOnInlineDuplicates = true
|
||||
}
|
||||
|
||||
// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name
|
||||
// strings using fmt.Sprintf() instead of the default string conversion logic.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead.
|
||||
func (ec *EncodeContext) StringifyMapKeysWithFmt() {
|
||||
ec.stringifyMapKeysWithFmt = true
|
||||
}
|
||||
|
||||
// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON
|
||||
// null.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead.
|
||||
func (ec *EncodeContext) NilMapAsEmpty() {
|
||||
ec.nilMapAsEmpty = true
|
||||
}
|
||||
|
||||
// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON
|
||||
// null.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead.
|
||||
func (ec *EncodeContext) NilSliceAsEmpty() {
|
||||
ec.nilSliceAsEmpty = true
|
||||
}
|
||||
|
||||
// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values
|
||||
// instead of BSON null.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead.
|
||||
func (ec *EncodeContext) NilByteSliceAsEmpty() {
|
||||
ec.nilByteSliceAsEmpty = true
|
||||
}
|
||||
|
||||
// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{})
|
||||
// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set.
|
||||
//
|
||||
// Note that the Encoder only examines exported struct fields when determining if a struct is the
|
||||
// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead.
|
||||
func (ec *EncodeContext) OmitZeroStruct() {
|
||||
ec.omitZeroStruct = true
|
||||
}
|
||||
|
||||
// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson"
|
||||
// struct tag is not specified.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead.
|
||||
func (ec *EncodeContext) UseJSONStructTags() {
|
||||
ec.useJSONStructTags = true
|
||||
}
|
||||
|
||||
// DecodeContext is the contextual information required for a Codec to decode a
|
||||
// value.
|
||||
type DecodeContext struct {
|
||||
*Registry
|
||||
|
||||
// Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double"
|
||||
// values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64,
|
||||
// uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to
|
||||
// BSON "decimal128" values.
|
||||
//
|
||||
// Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead.
|
||||
Truncate bool
|
||||
|
||||
// Ancestor is the type of a containing document. This is mainly used to determine what type
|
||||
|
|
@ -125,7 +213,7 @@ type DecodeContext struct {
|
|||
// Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface
|
||||
// will be decoded into a bson.M.
|
||||
//
|
||||
// Deprecated: Use DefaultDocumentM or DefaultDocumentD instead.
|
||||
// Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead.
|
||||
Ancestor reflect.Type
|
||||
|
||||
// defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the
|
||||
|
|
@ -133,22 +221,74 @@ type DecodeContext struct {
|
|||
// set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an
|
||||
// error. DocumentType overrides the Ancestor field.
|
||||
defaultDocumentType reflect.Type
|
||||
|
||||
binaryAsSlice bool
|
||||
useJSONStructTags bool
|
||||
useLocalTimeZone bool
|
||||
zeroMaps bool
|
||||
zeroStructs bool
|
||||
}
|
||||
|
||||
// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as
|
||||
// "interface{}" or "map[string]interface{}".
|
||||
// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or
|
||||
// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead.
|
||||
func (dc *DecodeContext) BinaryAsSlice() {
|
||||
dc.binaryAsSlice = true
|
||||
}
|
||||
|
||||
// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson"
|
||||
// struct tag is not specified.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead.
|
||||
func (dc *DecodeContext) UseJSONStructTags() {
|
||||
dc.useJSONStructTags = true
|
||||
}
|
||||
|
||||
// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead
|
||||
// of the UTC timezone.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead.
|
||||
func (dc *DecodeContext) UseLocalTimeZone() {
|
||||
dc.useLocalTimeZone = true
|
||||
}
|
||||
|
||||
// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value
|
||||
// passed to Decode before unmarshaling BSON documents into them.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead.
|
||||
func (dc *DecodeContext) ZeroMaps() {
|
||||
dc.zeroMaps = true
|
||||
}
|
||||
|
||||
// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination
|
||||
// value passed to Decode before unmarshaling BSON documents into them.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead.
|
||||
func (dc *DecodeContext) ZeroStructs() {
|
||||
dc.zeroStructs = true
|
||||
}
|
||||
|
||||
// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This
|
||||
// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead.
|
||||
func (dc *DecodeContext) DefaultDocumentM() {
|
||||
dc.defaultDocumentType = reflect.TypeOf(primitive.M{})
|
||||
}
|
||||
|
||||
// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as
|
||||
// "interface{}" or "map[string]interface{}".
|
||||
// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This
|
||||
// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead.
|
||||
func (dc *DecodeContext) DefaultDocumentD() {
|
||||
dc.defaultDocumentType = reflect.TypeOf(primitive.D{})
|
||||
}
|
||||
|
||||
// ValueCodec is the interface that groups the methods to encode and decode
|
||||
// ValueCodec is an interface for encoding and decoding a reflect.Value.
|
||||
// values.
|
||||
//
|
||||
// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead.
|
||||
type ValueCodec interface {
|
||||
ValueEncoder
|
||||
ValueDecoder
|
||||
|
|
@ -233,6 +373,10 @@ func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext
|
|||
|
||||
// CodecZeroer is the interface implemented by Codecs that can also determine if
|
||||
// a value of the type that would be encoded is zero.
|
||||
//
|
||||
// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver
|
||||
// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to
|
||||
// nil instead.
|
||||
type CodecZeroer interface {
|
||||
IsTypeZero(interface{}) bool
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,18 +16,30 @@ import (
|
|||
)
|
||||
|
||||
// ByteSliceCodec is the Codec used for []byte values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// ByteSliceCodec registered.
|
||||
type ByteSliceCodec struct {
|
||||
// EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values
|
||||
// instead of BSON null.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.NilByteSliceAsEmpty instead.
|
||||
EncodeNilAsEmpty bool
|
||||
}
|
||||
|
||||
var (
|
||||
defaultByteSliceCodec = NewByteSliceCodec()
|
||||
|
||||
_ ValueCodec = defaultByteSliceCodec
|
||||
// Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be
|
||||
// used by collection type decoders (e.g. map, slice, etc) to set individual values in a
|
||||
// collection.
|
||||
_ typeDecoder = defaultByteSliceCodec
|
||||
)
|
||||
|
||||
// NewByteSliceCodec returns a StringCodec with options opts.
|
||||
// NewByteSliceCodec returns a ByteSliceCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// ByteSliceCodec registered.
|
||||
func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec {
|
||||
byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...)
|
||||
codec := ByteSliceCodec{}
|
||||
|
|
@ -42,13 +54,13 @@ func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter,
|
|||
if !val.IsValid() || val.Type() != tByteSlice {
|
||||
return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
|
||||
}
|
||||
if val.IsNil() && !bsc.EncodeNilAsEmpty {
|
||||
if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty {
|
||||
return vw.WriteNull()
|
||||
}
|
||||
return vw.WriteBinary(val.Interface().([]byte))
|
||||
}
|
||||
|
||||
func (bsc *ByteSliceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tByteSlice {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "ByteSliceDecodeValue",
|
||||
|
|
|
|||
166
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go
generated
vendored
Normal file
166
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/codec_cache.go
generated
vendored
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
// Copyright (C) MongoDB, Inc. 2017-present.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
package bsoncodec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Runtime check that the kind encoder and decoder caches can store any valid
|
||||
// reflect.Kind constant.
|
||||
func init() {
|
||||
if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" {
|
||||
panic("The capacity of kindEncoderCache is too small.\n" +
|
||||
"This is due to a new type being added to reflect.Kind.")
|
||||
}
|
||||
}
|
||||
|
||||
// statically assert array size
|
||||
var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer]
|
||||
var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer]
|
||||
|
||||
type typeEncoderCache struct {
|
||||
cache sync.Map // map[reflect.Type]ValueEncoder
|
||||
}
|
||||
|
||||
func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) {
|
||||
c.cache.Store(rt, enc)
|
||||
}
|
||||
|
||||
func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) {
|
||||
if v, _ := c.cache.Load(rt); v != nil {
|
||||
return v.(ValueEncoder), true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder {
|
||||
if v, loaded := c.cache.LoadOrStore(rt, enc); loaded {
|
||||
enc = v.(ValueEncoder)
|
||||
}
|
||||
return enc
|
||||
}
|
||||
|
||||
func (c *typeEncoderCache) Clone() *typeEncoderCache {
|
||||
cc := new(typeEncoderCache)
|
||||
c.cache.Range(func(k, v interface{}) bool {
|
||||
if k != nil && v != nil {
|
||||
cc.cache.Store(k, v)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return cc
|
||||
}
|
||||
|
||||
type typeDecoderCache struct {
|
||||
cache sync.Map // map[reflect.Type]ValueDecoder
|
||||
}
|
||||
|
||||
func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) {
|
||||
c.cache.Store(rt, dec)
|
||||
}
|
||||
|
||||
func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) {
|
||||
if v, _ := c.cache.Load(rt); v != nil {
|
||||
return v.(ValueDecoder), true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder {
|
||||
if v, loaded := c.cache.LoadOrStore(rt, dec); loaded {
|
||||
dec = v.(ValueDecoder)
|
||||
}
|
||||
return dec
|
||||
}
|
||||
|
||||
func (c *typeDecoderCache) Clone() *typeDecoderCache {
|
||||
cc := new(typeDecoderCache)
|
||||
c.cache.Range(func(k, v interface{}) bool {
|
||||
if k != nil && v != nil {
|
||||
cc.cache.Store(k, v)
|
||||
}
|
||||
return true
|
||||
})
|
||||
return cc
|
||||
}
|
||||
|
||||
// atomic.Value requires that all calls to Store() have the same concrete type
|
||||
// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type
|
||||
// is always the same (since different concrete types may implement the
|
||||
// ValueEncoder interface).
|
||||
type kindEncoderCacheEntry struct {
|
||||
enc ValueEncoder
|
||||
}
|
||||
|
||||
type kindEncoderCache struct {
|
||||
entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry
|
||||
}
|
||||
|
||||
func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) {
|
||||
if enc != nil && rt < reflect.Kind(len(c.entries)) {
|
||||
c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc})
|
||||
}
|
||||
}
|
||||
|
||||
func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) {
|
||||
if rt < reflect.Kind(len(c.entries)) {
|
||||
if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok {
|
||||
return ent.enc, ent.enc != nil
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (c *kindEncoderCache) Clone() *kindEncoderCache {
|
||||
cc := new(kindEncoderCache)
|
||||
for i, v := range c.entries {
|
||||
if val := v.Load(); val != nil {
|
||||
cc.entries[i].Store(val)
|
||||
}
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
// atomic.Value requires that all calls to Store() have the same concrete type
|
||||
// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type
|
||||
// is always the same (since different concrete types may implement the
|
||||
// ValueDecoder interface).
|
||||
type kindDecoderCacheEntry struct {
|
||||
dec ValueDecoder
|
||||
}
|
||||
|
||||
type kindDecoderCache struct {
|
||||
entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry
|
||||
}
|
||||
|
||||
func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) {
|
||||
if rt < reflect.Kind(len(c.entries)) {
|
||||
c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec})
|
||||
}
|
||||
}
|
||||
|
||||
func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) {
|
||||
if rt < reflect.Kind(len(c.entries)) {
|
||||
if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok {
|
||||
return ent.dec, ent.dec != nil
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (c *kindDecoderCache) Clone() *kindDecoderCache {
|
||||
cc := new(kindDecoderCache)
|
||||
for i, v := range c.entries {
|
||||
if val := v.Load(); val != nil {
|
||||
cc.entries[i].Store(val)
|
||||
}
|
||||
}
|
||||
return cc
|
||||
}
|
||||
148
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go
generated
vendored
148
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go
generated
vendored
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
var (
|
||||
defaultValueDecoders DefaultValueDecoders
|
||||
errCannotTruncate = errors.New("float64 can only be truncated to an integer type when truncation is enabled")
|
||||
errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled")
|
||||
)
|
||||
|
||||
type decodeBinaryError struct {
|
||||
|
|
@ -48,6 +48,9 @@ func newDefaultStructCodec() *StructCodec {
|
|||
|
||||
// DefaultValueDecoders is a namespace type for the default ValueDecoders used
|
||||
// when creating a registry.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
type DefaultValueDecoders struct{}
|
||||
|
||||
// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with
|
||||
|
|
@ -56,6 +59,9 @@ type DefaultValueDecoders struct{}
|
|||
// There is no support for decoding map[string]interface{} because there is no decoder for
|
||||
// interface{}, so users must either register this decoder themselves or use the
|
||||
// EmptyInterfaceDecoder available in the bson package.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) {
|
||||
if rb == nil {
|
||||
panic(errors.New("argument to RegisterDefaultDecoders must not be nil"))
|
||||
|
|
@ -132,6 +138,9 @@ func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) {
|
|||
}
|
||||
|
||||
// DDecodeValue is the ValueDecoderFunc for primitive.D instances.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.IsValid() || !val.CanSet() || val.Type() != tD {
|
||||
return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
|
||||
|
|
@ -188,7 +197,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe
|
|||
return nil
|
||||
}
|
||||
|
||||
func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t.Kind() != reflect.Bool {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "BooleanDecodeValue",
|
||||
|
|
@ -235,6 +244,9 @@ func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw.
|
|||
}
|
||||
|
||||
// BooleanDecodeValue is the ValueDecoderFunc for bool types.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool {
|
||||
return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
|
||||
|
|
@ -333,6 +345,9 @@ func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReade
|
|||
}
|
||||
|
||||
// IntDecodeValue is the ValueDecoderFunc for int types.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() {
|
||||
return ValueDecoderError{
|
||||
|
|
@ -434,7 +449,7 @@ func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.Valu
|
|||
return nil
|
||||
}
|
||||
|
||||
func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
var f float64
|
||||
var err error
|
||||
switch vrType := vr.Type(); vrType {
|
||||
|
|
@ -477,7 +492,7 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu
|
|||
|
||||
switch t.Kind() {
|
||||
case reflect.Float32:
|
||||
if !ec.Truncate && float64(float32(f)) != f {
|
||||
if !dc.Truncate && float64(float32(f)) != f {
|
||||
return emptyValue, errCannotTruncate
|
||||
}
|
||||
|
||||
|
|
@ -494,6 +509,9 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu
|
|||
}
|
||||
|
||||
// FloatDecodeValue is the ValueDecoderFunc for float types.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() {
|
||||
return ValueDecoderError{
|
||||
|
|
@ -515,7 +533,7 @@ func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.Val
|
|||
// StringDecodeValue is the ValueDecoderFunc for string types.
|
||||
//
|
||||
// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead.
|
||||
func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
var str string
|
||||
var err error
|
||||
switch vr.Type() {
|
||||
|
|
@ -536,7 +554,7 @@ func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.
|
|||
return nil
|
||||
}
|
||||
|
||||
func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tJavaScript {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "JavaScriptDecodeValue",
|
||||
|
|
@ -565,6 +583,9 @@ func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.V
|
|||
}
|
||||
|
||||
// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tJavaScript {
|
||||
return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
|
||||
|
|
@ -579,7 +600,7 @@ func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bso
|
|||
return nil
|
||||
}
|
||||
|
||||
func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tSymbol {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "SymbolDecodeValue",
|
||||
|
|
@ -620,6 +641,9 @@ func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.Value
|
|||
}
|
||||
|
||||
// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tSymbol {
|
||||
return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val}
|
||||
|
|
@ -634,7 +658,7 @@ func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.
|
|||
return nil
|
||||
}
|
||||
|
||||
func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tBinary {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "BinaryDecodeValue",
|
||||
|
|
@ -664,6 +688,9 @@ func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueRe
|
|||
}
|
||||
|
||||
// BinaryDecodeValue is the ValueDecoderFunc for Binary.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tBinary {
|
||||
return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val}
|
||||
|
|
@ -678,7 +705,7 @@ func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.Va
|
|||
return nil
|
||||
}
|
||||
|
||||
func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tUndefined {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "UndefinedDecodeValue",
|
||||
|
|
@ -704,6 +731,9 @@ func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.Valu
|
|||
}
|
||||
|
||||
// UndefinedDecodeValue is the ValueDecoderFunc for Undefined.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tUndefined {
|
||||
return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val}
|
||||
|
|
@ -719,7 +749,7 @@ func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw
|
|||
}
|
||||
|
||||
// Accept both 12-byte string and pretty-printed 24-byte hex string formats.
|
||||
func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tOID {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "ObjectIDDecodeValue",
|
||||
|
|
@ -765,6 +795,9 @@ func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.V
|
|||
}
|
||||
|
||||
// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tOID {
|
||||
return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val}
|
||||
|
|
@ -779,7 +812,7 @@ func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.
|
|||
return nil
|
||||
}
|
||||
|
||||
func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tDateTime {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "DateTimeDecodeValue",
|
||||
|
|
@ -808,6 +841,9 @@ func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.Value
|
|||
}
|
||||
|
||||
// DateTimeDecodeValue is the ValueDecoderFunc for DateTime.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tDateTime {
|
||||
return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val}
|
||||
|
|
@ -822,7 +858,7 @@ func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.
|
|||
return nil
|
||||
}
|
||||
|
||||
func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tNull {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "NullDecodeValue",
|
||||
|
|
@ -848,6 +884,9 @@ func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueRead
|
|||
}
|
||||
|
||||
// NullDecodeValue is the ValueDecoderFunc for Null.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tNull {
|
||||
return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val}
|
||||
|
|
@ -862,7 +901,7 @@ func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.Valu
|
|||
return nil
|
||||
}
|
||||
|
||||
func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tRegex {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "RegexDecodeValue",
|
||||
|
|
@ -891,6 +930,9 @@ func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueRea
|
|||
}
|
||||
|
||||
// RegexDecodeValue is the ValueDecoderFunc for Regex.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tRegex {
|
||||
return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val}
|
||||
|
|
@ -905,7 +947,7 @@ func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.Val
|
|||
return nil
|
||||
}
|
||||
|
||||
func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tDBPointer {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "DBPointerDecodeValue",
|
||||
|
|
@ -935,6 +977,9 @@ func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.Valu
|
|||
}
|
||||
|
||||
// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tDBPointer {
|
||||
return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
|
||||
|
|
@ -949,7 +994,7 @@ func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw
|
|||
return nil
|
||||
}
|
||||
|
||||
func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) {
|
||||
func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) {
|
||||
if reflectType != tTimestamp {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "TimestampDecodeValue",
|
||||
|
|
@ -978,6 +1023,9 @@ func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.Valu
|
|||
}
|
||||
|
||||
// TimestampDecodeValue is the ValueDecoderFunc for Timestamp.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tTimestamp {
|
||||
return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
|
||||
|
|
@ -992,7 +1040,7 @@ func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw
|
|||
return nil
|
||||
}
|
||||
|
||||
func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tMinKey {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "MinKeyDecodeValue",
|
||||
|
|
@ -1020,6 +1068,9 @@ func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe
|
|||
}
|
||||
|
||||
// MinKeyDecodeValue is the ValueDecoderFunc for MinKey.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tMinKey {
|
||||
return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val}
|
||||
|
|
@ -1034,7 +1085,7 @@ func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.Va
|
|||
return nil
|
||||
}
|
||||
|
||||
func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tMaxKey {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "MaxKeyDecodeValue",
|
||||
|
|
@ -1062,6 +1113,9 @@ func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe
|
|||
}
|
||||
|
||||
// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tMaxKey {
|
||||
return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
|
||||
|
|
@ -1076,7 +1130,7 @@ func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.Va
|
|||
return nil
|
||||
}
|
||||
|
||||
func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tDecimal {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "Decimal128DecodeValue",
|
||||
|
|
@ -1105,6 +1159,9 @@ func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bson
|
|||
}
|
||||
|
||||
// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tDecimal {
|
||||
return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val}
|
||||
|
|
@ -1119,7 +1176,7 @@ func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bso
|
|||
return nil
|
||||
}
|
||||
|
||||
func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tJSONNumber {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "JSONNumberDecodeValue",
|
||||
|
|
@ -1164,6 +1221,9 @@ func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw
|
|||
}
|
||||
|
||||
// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tJSONNumber {
|
||||
return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
|
||||
|
|
@ -1178,7 +1238,7 @@ func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonr
|
|||
return nil
|
||||
}
|
||||
|
||||
func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t != tURL {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "URLDecodeValue",
|
||||
|
|
@ -1213,6 +1273,9 @@ func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueR
|
|||
}
|
||||
|
||||
// URLDecodeValue is the ValueDecoderFunc for url.URL.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tURL {
|
||||
return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val}
|
||||
|
|
@ -1230,7 +1293,7 @@ func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.Value
|
|||
// TimeDecodeValue is the ValueDecoderFunc for time.Time.
|
||||
//
|
||||
// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead.
|
||||
func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if vr.Type() != bsontype.DateTime {
|
||||
return fmt.Errorf("cannot decode %v into a time.Time", vr.Type())
|
||||
}
|
||||
|
|
@ -1251,7 +1314,7 @@ func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.Valu
|
|||
// ByteSliceDecodeValue is the ValueDecoderFunc for []byte.
|
||||
//
|
||||
// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead.
|
||||
func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null {
|
||||
return fmt.Errorf("cannot decode %v into a []byte", vr.Type())
|
||||
}
|
||||
|
|
@ -1336,6 +1399,9 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value
|
|||
}
|
||||
|
||||
// ArrayDecodeValue is the ValueDecoderFunc for array types.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Kind() != reflect.Array {
|
||||
return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
|
||||
|
|
@ -1447,7 +1513,10 @@ func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.Val
|
|||
}
|
||||
|
||||
// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations.
|
||||
func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) {
|
||||
return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
|
||||
}
|
||||
|
|
@ -1471,16 +1540,19 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr
|
|||
return err
|
||||
}
|
||||
|
||||
fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue")
|
||||
errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0]
|
||||
if !errVal.IsNil() {
|
||||
return errVal.Interface().(error)
|
||||
m, ok := val.Interface().(ValueUnmarshaler)
|
||||
if !ok {
|
||||
// NB: this error should be unreachable due to the above checks
|
||||
return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
|
||||
}
|
||||
return nil
|
||||
return m.UnmarshalBSONValue(t, src)
|
||||
}
|
||||
|
||||
// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations.
|
||||
func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) {
|
||||
return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
|
||||
}
|
||||
|
|
@ -1516,12 +1588,12 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson
|
|||
val = val.Addr() // If the type doesn't implement the interface, a pointer to it must.
|
||||
}
|
||||
|
||||
fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON")
|
||||
errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0]
|
||||
if !errVal.IsNil() {
|
||||
return errVal.Interface().(error)
|
||||
m, ok := val.Interface().(Unmarshaler)
|
||||
if !ok {
|
||||
// NB: this error should be unreachable due to the above checks
|
||||
return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
|
||||
}
|
||||
return nil
|
||||
return m.UnmarshalBSON(src)
|
||||
}
|
||||
|
||||
// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}.
|
||||
|
|
@ -1565,7 +1637,10 @@ func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr b
|
|||
}
|
||||
|
||||
// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document.
|
||||
func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tCoreDocument {
|
||||
return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
|
||||
}
|
||||
|
|
@ -1671,6 +1746,9 @@ func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bso
|
|||
}
|
||||
|
||||
// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value decoders registered.
|
||||
func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Type() != tCodeWithScope {
|
||||
return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}
|
||||
|
|
|
|||
180
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go
generated
vendored
180
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go
generated
vendored
|
|
@ -58,10 +58,16 @@ func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) er
|
|||
|
||||
// DefaultValueEncoders is a namespace type for the default ValueEncoders used
|
||||
// when creating a registry.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
type DefaultValueEncoders struct{}
|
||||
|
||||
// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with
|
||||
// the provided RegistryBuilder.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) {
|
||||
if rb == nil {
|
||||
panic(errors.New("argument to RegisterDefaultEncoders must not be nil"))
|
||||
|
|
@ -113,7 +119,10 @@ func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) {
|
|||
}
|
||||
|
||||
// BooleanEncodeValue is the ValueEncoderFunc for bool types.
|
||||
func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Kind() != reflect.Bool {
|
||||
return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
|
||||
}
|
||||
|
|
@ -125,6 +134,9 @@ func fitsIn32Bits(i int64) bool {
|
|||
}
|
||||
|
||||
// IntEncodeValue is the ValueEncoderFunc for int types.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
switch val.Kind() {
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32:
|
||||
|
|
@ -176,7 +188,10 @@ func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.Valu
|
|||
}
|
||||
|
||||
// FloatEncodeValue is the ValueEncoderFunc for float types.
|
||||
func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
switch val.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return vw.WriteDouble(val.Float())
|
||||
|
|
@ -188,7 +203,7 @@ func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.Val
|
|||
// StringEncodeValue is the ValueEncoderFunc for string types.
|
||||
//
|
||||
// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead.
|
||||
func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if val.Kind() != reflect.String {
|
||||
return ValueEncoderError{
|
||||
Name: "StringEncodeValue",
|
||||
|
|
@ -201,7 +216,10 @@ func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.
|
|||
}
|
||||
|
||||
// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID.
|
||||
func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tOID {
|
||||
return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val}
|
||||
}
|
||||
|
|
@ -209,7 +227,10 @@ func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.
|
|||
}
|
||||
|
||||
// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128.
|
||||
func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tDecimal {
|
||||
return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val}
|
||||
}
|
||||
|
|
@ -217,6 +238,9 @@ func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonr
|
|||
}
|
||||
|
||||
// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tJSONNumber {
|
||||
return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
|
||||
|
|
@ -237,7 +261,10 @@ func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonr
|
|||
}
|
||||
|
||||
// URLEncodeValue is the ValueEncoderFunc for url.URL.
|
||||
func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tURL {
|
||||
return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val}
|
||||
}
|
||||
|
|
@ -248,7 +275,7 @@ func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.Value
|
|||
// TimeEncodeValue is the ValueEncoderFunc for time.TIme.
|
||||
//
|
||||
// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead.
|
||||
func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tTime {
|
||||
return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
|
||||
}
|
||||
|
|
@ -260,7 +287,7 @@ func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.Valu
|
|||
// ByteSliceEncodeValue is the ValueEncoderFunc for []byte.
|
||||
//
|
||||
// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead.
|
||||
func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tByteSlice {
|
||||
return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
|
||||
}
|
||||
|
|
@ -343,6 +370,9 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum
|
|||
}
|
||||
|
||||
// ArrayEncodeValue is the ValueEncoderFunc for array types.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Kind() != reflect.Array {
|
||||
return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
|
||||
|
|
@ -515,7 +545,10 @@ func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw b
|
|||
}
|
||||
|
||||
// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations.
|
||||
func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
// Either val or a pointer to val must implement ValueMarshaler
|
||||
switch {
|
||||
case !val.IsValid():
|
||||
|
|
@ -531,17 +564,22 @@ func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw b
|
|||
return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val}
|
||||
}
|
||||
|
||||
fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue")
|
||||
returns := fn.Call(nil)
|
||||
if !returns[2].IsNil() {
|
||||
return returns[2].Interface().(error)
|
||||
m, ok := val.Interface().(ValueMarshaler)
|
||||
if !ok {
|
||||
return vw.WriteNull()
|
||||
}
|
||||
t, data, err := m.MarshalBSONValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte)
|
||||
return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data)
|
||||
}
|
||||
|
||||
// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations.
|
||||
func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
// Either val or a pointer to val must implement Marshaler
|
||||
switch {
|
||||
case !val.IsValid():
|
||||
|
|
@ -557,16 +595,21 @@ func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw
|
|||
return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val}
|
||||
}
|
||||
|
||||
fn := val.Convert(tMarshaler).MethodByName("MarshalBSON")
|
||||
returns := fn.Call(nil)
|
||||
if !returns[1].IsNil() {
|
||||
return returns[1].Interface().(error)
|
||||
m, ok := val.Interface().(Marshaler)
|
||||
if !ok {
|
||||
return vw.WriteNull()
|
||||
}
|
||||
data, err := m.MarshalBSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data := returns[0].Interface().([]byte)
|
||||
return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data)
|
||||
}
|
||||
|
||||
// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
// Either val or a pointer to val must implement Proxy
|
||||
switch {
|
||||
|
|
@ -583,27 +626,38 @@ func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.Val
|
|||
return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val}
|
||||
}
|
||||
|
||||
fn := val.Convert(tProxy).MethodByName("ProxyBSON")
|
||||
returns := fn.Call(nil)
|
||||
if !returns[1].IsNil() {
|
||||
return returns[1].Interface().(error)
|
||||
}
|
||||
data := returns[0]
|
||||
var encoder ValueEncoder
|
||||
var err error
|
||||
if data.Elem().IsValid() {
|
||||
encoder, err = ec.LookupEncoder(data.Elem().Type())
|
||||
} else {
|
||||
encoder, err = ec.LookupEncoder(nil)
|
||||
m, ok := val.Interface().(Proxy)
|
||||
if !ok {
|
||||
return vw.WriteNull()
|
||||
}
|
||||
v, err := m.ProxyBSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return encoder.EncodeValue(ec, vw, data.Elem())
|
||||
if v == nil {
|
||||
encoder, err := ec.LookupEncoder(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil))
|
||||
}
|
||||
vv := reflect.ValueOf(v)
|
||||
switch vv.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
vv = vv.Elem()
|
||||
}
|
||||
encoder, err := ec.LookupEncoder(vv.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return encoder.EncodeValue(ec, vw, vv)
|
||||
}
|
||||
|
||||
// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type.
|
||||
func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tJavaScript {
|
||||
return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
|
||||
}
|
||||
|
|
@ -612,7 +666,10 @@ func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.
|
|||
}
|
||||
|
||||
// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type.
|
||||
func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tSymbol {
|
||||
return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val}
|
||||
}
|
||||
|
|
@ -621,7 +678,10 @@ func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.Valu
|
|||
}
|
||||
|
||||
// BinaryEncodeValue is the ValueEncoderFunc for Binary.
|
||||
func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tBinary {
|
||||
return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val}
|
||||
}
|
||||
|
|
@ -631,7 +691,10 @@ func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueW
|
|||
}
|
||||
|
||||
// UndefinedEncodeValue is the ValueEncoderFunc for Undefined.
|
||||
func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tUndefined {
|
||||
return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val}
|
||||
}
|
||||
|
|
@ -640,7 +703,10 @@ func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.Val
|
|||
}
|
||||
|
||||
// DateTimeEncodeValue is the ValueEncoderFunc for DateTime.
|
||||
func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tDateTime {
|
||||
return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val}
|
||||
}
|
||||
|
|
@ -649,7 +715,10 @@ func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.Valu
|
|||
}
|
||||
|
||||
// NullEncodeValue is the ValueEncoderFunc for Null.
|
||||
func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tNull {
|
||||
return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val}
|
||||
}
|
||||
|
|
@ -658,7 +727,10 @@ func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWri
|
|||
}
|
||||
|
||||
// RegexEncodeValue is the ValueEncoderFunc for Regex.
|
||||
func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tRegex {
|
||||
return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val}
|
||||
}
|
||||
|
|
@ -669,7 +741,10 @@ func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWr
|
|||
}
|
||||
|
||||
// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer.
|
||||
func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tDBPointer {
|
||||
return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
|
||||
}
|
||||
|
|
@ -680,7 +755,10 @@ func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.Val
|
|||
}
|
||||
|
||||
// TimestampEncodeValue is the ValueEncoderFunc for Timestamp.
|
||||
func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tTimestamp {
|
||||
return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
|
||||
}
|
||||
|
|
@ -691,7 +769,10 @@ func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.Val
|
|||
}
|
||||
|
||||
// MinKeyEncodeValue is the ValueEncoderFunc for MinKey.
|
||||
func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tMinKey {
|
||||
return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val}
|
||||
}
|
||||
|
|
@ -700,7 +781,10 @@ func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW
|
|||
}
|
||||
|
||||
// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey.
|
||||
func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tMaxKey {
|
||||
return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
|
||||
}
|
||||
|
|
@ -709,7 +793,10 @@ func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW
|
|||
}
|
||||
|
||||
// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document.
|
||||
func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tCoreDocument {
|
||||
return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
|
||||
}
|
||||
|
|
@ -720,6 +807,9 @@ func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.
|
|||
}
|
||||
|
||||
// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
|
||||
// value encoders registered.
|
||||
func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tCodeWithScope {
|
||||
return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}
|
||||
|
|
|
|||
|
|
@ -31,35 +31,39 @@
|
|||
// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext
|
||||
// instance is provided and serves similar functionality to the EncodeContext.
|
||||
//
|
||||
// # Registry and RegistryBuilder
|
||||
// # Registry
|
||||
//
|
||||
// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type
|
||||
// documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a
|
||||
// RegistryBuilder, which handles three main types of codecs:
|
||||
// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type
|
||||
// documentation for examples of registering various custom encoders and decoders. A Registry can
|
||||
// have three main types of codecs:
|
||||
//
|
||||
// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and RegisterTypeDecoder methods.
|
||||
// The registered codec will be invoked when encoding/decoding a value whose type matches the registered type exactly.
|
||||
// If the registered type is an interface, the codec will be invoked when encoding or decoding values whose type is the
|
||||
// interface, but not for values with concrete types that implement the interface.
|
||||
// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and
|
||||
// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value
|
||||
// whose type matches the registered type exactly.
|
||||
// If the registered type is an interface, the codec will be invoked when encoding or decoding
|
||||
// values whose type is the interface, but not for values with concrete types that implement the
|
||||
// interface.
|
||||
//
|
||||
// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and RegisterHookDecoder methods.
|
||||
// These methods only accept interface types and the registered codecs will be invoked when encoding or decoding values
|
||||
// whose types implement the interface. An example of a hook defined by the driver is bson.Marshaler. The driver will
|
||||
// call the MarshalBSON method for any value whose type implements bson.Marshaler, regardless of the value's concrete
|
||||
// type.
|
||||
// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and
|
||||
// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs
|
||||
// will be invoked when encoding or decoding values whose types implement the interface. An example
|
||||
// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method
|
||||
// for any value whose type implements bson.Marshaler, regardless of the value's concrete type.
|
||||
//
|
||||
// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type associations are used when
|
||||
// decoding into a bson.D/bson.M or a struct field of type interface{}. For example, by default, BSON int32 and int64
|
||||
// values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would
|
||||
// change the behavior so these values decode as Go int instances instead:
|
||||
// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type
|
||||
// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}.
|
||||
// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances,
|
||||
// respectively, when decoding into a bson.D. The following code would change the behavior so these
|
||||
// values decode as Go int instances instead:
|
||||
//
|
||||
// intType := reflect.TypeOf(int(0))
|
||||
// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType)
|
||||
// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType)
|
||||
//
|
||||
// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder
|
||||
// methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the
|
||||
// registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first.
|
||||
// These methods should be used to change the behavior for all values for a specific kind.
|
||||
// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and
|
||||
// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding
|
||||
// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't
|
||||
// match a registered type or hook encoder/decoder first. These methods should be used to change the
|
||||
// behavior for all values for a specific kind.
|
||||
//
|
||||
// # Registry Lookup Procedure
|
||||
//
|
||||
|
|
@ -67,17 +71,18 @@
|
|||
//
|
||||
// 1. A type encoder registered for the exact type of the value.
|
||||
//
|
||||
// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to the value. If the
|
||||
// value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and bsoncodec.ValueMarshaler), the first
|
||||
// one registered will be selected. Note that registries constructed using bson.NewRegistryBuilder have driver-defined
|
||||
// hooks registered for the bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those
|
||||
// will take precedence over any new hooks.
|
||||
// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to
|
||||
// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and
|
||||
// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries
|
||||
// constructed using bson.NewRegistry have driver-defined hooks registered for the
|
||||
// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take
|
||||
// precedence over any new hooks.
|
||||
//
|
||||
// 3. A kind encoder registered for the value's kind.
|
||||
//
|
||||
// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The same precedence
|
||||
// rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is
|
||||
// found.
|
||||
// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The
|
||||
// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder
|
||||
// will be returned if no decoder is found.
|
||||
//
|
||||
// # DefaultValueEncoders and DefaultValueDecoders
|
||||
//
|
||||
|
|
|
|||
16
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go
generated
vendored
16
common/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go
generated
vendored
|
|
@ -16,18 +16,30 @@ import (
|
|||
)
|
||||
|
||||
// EmptyInterfaceCodec is the Codec used for interface{} values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// EmptyInterfaceCodec registered.
|
||||
type EmptyInterfaceCodec struct {
|
||||
// DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the
|
||||
// "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary.
|
||||
//
|
||||
// Deprecated: Use bson.Decoder.BinaryAsSlice instead.
|
||||
DecodeBinaryAsSlice bool
|
||||
}
|
||||
|
||||
var (
|
||||
defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec()
|
||||
|
||||
_ ValueCodec = defaultEmptyInterfaceCodec
|
||||
// Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it
|
||||
// to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a
|
||||
// collection.
|
||||
_ typeDecoder = defaultEmptyInterfaceCodec
|
||||
)
|
||||
|
||||
// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// EmptyInterfaceCodec registered.
|
||||
func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec {
|
||||
interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...)
|
||||
|
||||
|
|
@ -121,7 +133,7 @@ func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReade
|
|||
return emptyValue, err
|
||||
}
|
||||
|
||||
if eic.DecodeBinaryAsSlice && rtype == tBinary {
|
||||
if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary {
|
||||
binElem := elem.Interface().(primitive.Binary)
|
||||
if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld {
|
||||
elem = reflect.ValueOf(binElem.Data)
|
||||
|
|
|
|||
|
|
@ -20,14 +20,29 @@ import (
|
|||
var defaultMapCodec = NewMapCodec()
|
||||
|
||||
// MapCodec is the Codec used for map values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// MapCodec registered.
|
||||
type MapCodec struct {
|
||||
// DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination
|
||||
// value passed to Decode before unmarshaling BSON documents into them.
|
||||
//
|
||||
// Deprecated: Use bson.Decoder.ZeroMaps instead.
|
||||
DecodeZerosMap bool
|
||||
|
||||
// EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of
|
||||
// BSON null.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.NilMapAsEmpty instead.
|
||||
EncodeNilAsEmpty bool
|
||||
|
||||
// EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name
|
||||
// strings using fmt.Sprintf() instead of the default string conversion logic.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt instead.
|
||||
EncodeKeysWithStringer bool
|
||||
}
|
||||
|
||||
var _ ValueCodec = &MapCodec{}
|
||||
|
||||
// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key.
|
||||
// This applies to types used as map keys and is similar to encoding.TextMarshaler.
|
||||
type KeyMarshaler interface {
|
||||
|
|
@ -45,6 +60,9 @@ type KeyUnmarshaler interface {
|
|||
}
|
||||
|
||||
// NewMapCodec returns a MapCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// MapCodec registered.
|
||||
func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec {
|
||||
mapOpt := bsonoptions.MergeMapCodecOptions(opts...)
|
||||
|
||||
|
|
@ -67,7 +85,7 @@ func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val ref
|
|||
return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
|
||||
}
|
||||
|
||||
if val.IsNil() && !mc.EncodeNilAsEmpty {
|
||||
if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty {
|
||||
// If we have a nil map but we can't WriteNull, that means we're probably trying to encode
|
||||
// to a TopLevel document. We can't currently tell if this is what actually happened, but if
|
||||
// there's a deeper underlying problem, the error will also be returned from WriteDocument,
|
||||
|
|
@ -100,7 +118,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v
|
|||
|
||||
keys := val.MapKeys()
|
||||
for _, key := range keys {
|
||||
keyStr, err := mc.encodeKey(key)
|
||||
keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -163,7 +181,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref
|
|||
val.Set(reflect.MakeMap(val.Type()))
|
||||
}
|
||||
|
||||
if val.Len() > 0 && mc.DecodeZerosMap {
|
||||
if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) {
|
||||
clearMap(val)
|
||||
}
|
||||
|
||||
|
|
@ -211,8 +229,8 @@ func clearMap(m reflect.Value) {
|
|||
}
|
||||
}
|
||||
|
||||
func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) {
|
||||
if mc.EncodeKeysWithStringer {
|
||||
func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) {
|
||||
if mc.EncodeKeysWithStringer || encodeKeysWithStringer {
|
||||
return fmt.Sprint(val), nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ package bsoncodec
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"go.mongodb.org/mongo-driver/bson/bsonrw"
|
||||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
|
|
@ -18,18 +17,20 @@ var _ ValueEncoder = &PointerCodec{}
|
|||
var _ ValueDecoder = &PointerCodec{}
|
||||
|
||||
// PointerCodec is the Codec used for pointers.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// PointerCodec registered.
|
||||
type PointerCodec struct {
|
||||
ecache map[reflect.Type]ValueEncoder
|
||||
dcache map[reflect.Type]ValueDecoder
|
||||
l sync.RWMutex
|
||||
ecache typeEncoderCache
|
||||
dcache typeDecoderCache
|
||||
}
|
||||
|
||||
// NewPointerCodec returns a PointerCodec that has been initialized.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// PointerCodec registered.
|
||||
func NewPointerCodec() *PointerCodec {
|
||||
return &PointerCodec{
|
||||
ecache: make(map[reflect.Type]ValueEncoder),
|
||||
dcache: make(map[reflect.Type]ValueDecoder),
|
||||
}
|
||||
return &PointerCodec{}
|
||||
}
|
||||
|
||||
// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil
|
||||
|
|
@ -46,24 +47,19 @@ func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val
|
|||
return vw.WriteNull()
|
||||
}
|
||||
|
||||
pc.l.RLock()
|
||||
enc, ok := pc.ecache[val.Type()]
|
||||
pc.l.RUnlock()
|
||||
if ok {
|
||||
if enc == nil {
|
||||
return ErrNoEncoder{Type: val.Type()}
|
||||
typ := val.Type()
|
||||
if v, ok := pc.ecache.Load(typ); ok {
|
||||
if v == nil {
|
||||
return ErrNoEncoder{Type: typ}
|
||||
}
|
||||
return enc.EncodeValue(ec, vw, val.Elem())
|
||||
return v.EncodeValue(ec, vw, val.Elem())
|
||||
}
|
||||
|
||||
enc, err := ec.LookupEncoder(val.Type().Elem())
|
||||
pc.l.Lock()
|
||||
pc.ecache[val.Type()] = enc
|
||||
pc.l.Unlock()
|
||||
// TODO(charlie): handle concurrent requests for the same type
|
||||
enc, err := ec.LookupEncoder(typ.Elem())
|
||||
enc = pc.ecache.LoadOrStore(typ, enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return enc.EncodeValue(ec, vw, val.Elem())
|
||||
}
|
||||
|
||||
|
|
@ -74,36 +70,31 @@ func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val
|
|||
return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
|
||||
}
|
||||
|
||||
typ := val.Type()
|
||||
if vr.Type() == bsontype.Null {
|
||||
val.Set(reflect.Zero(val.Type()))
|
||||
val.Set(reflect.Zero(typ))
|
||||
return vr.ReadNull()
|
||||
}
|
||||
if vr.Type() == bsontype.Undefined {
|
||||
val.Set(reflect.Zero(val.Type()))
|
||||
val.Set(reflect.Zero(typ))
|
||||
return vr.ReadUndefined()
|
||||
}
|
||||
|
||||
if val.IsNil() {
|
||||
val.Set(reflect.New(val.Type().Elem()))
|
||||
val.Set(reflect.New(typ.Elem()))
|
||||
}
|
||||
|
||||
pc.l.RLock()
|
||||
dec, ok := pc.dcache[val.Type()]
|
||||
pc.l.RUnlock()
|
||||
if ok {
|
||||
if dec == nil {
|
||||
return ErrNoDecoder{Type: val.Type()}
|
||||
if v, ok := pc.dcache.Load(typ); ok {
|
||||
if v == nil {
|
||||
return ErrNoDecoder{Type: typ}
|
||||
}
|
||||
return dec.DecodeValue(dc, vr, val.Elem())
|
||||
return v.DecodeValue(dc, vr, val.Elem())
|
||||
}
|
||||
|
||||
dec, err := dc.LookupDecoder(val.Type().Elem())
|
||||
pc.l.Lock()
|
||||
pc.dcache[val.Type()] = dec
|
||||
pc.l.Unlock()
|
||||
// TODO(charlie): handle concurrent requests for the same type
|
||||
dec, err := dc.LookupDecoder(typ.Elem())
|
||||
dec = pc.dcache.LoadOrStore(typ, dec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dec.DecodeValue(dc, vr, val.Elem())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,12 +16,18 @@ import (
|
|||
)
|
||||
|
||||
// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder.
|
||||
//
|
||||
// Deprecated: ErrNilType will not be supported in Go Driver 2.0.
|
||||
var ErrNilType = errors.New("cannot perform a decoder lookup on <nil>")
|
||||
|
||||
// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder.
|
||||
//
|
||||
// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0.
|
||||
var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder")
|
||||
|
||||
// ErrNoEncoder is returned when there wasn't an encoder available for a type.
|
||||
//
|
||||
// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0.
|
||||
type ErrNoEncoder struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
|
|
@ -34,6 +40,8 @@ func (ene ErrNoEncoder) Error() string {
|
|||
}
|
||||
|
||||
// ErrNoDecoder is returned when there wasn't a decoder available for a type.
|
||||
//
|
||||
// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0.
|
||||
type ErrNoDecoder struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
|
|
@ -43,6 +51,8 @@ func (end ErrNoDecoder) Error() string {
|
|||
}
|
||||
|
||||
// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type.
|
||||
//
|
||||
// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0.
|
||||
type ErrNoTypeMapEntry struct {
|
||||
Type bsontype.Type
|
||||
}
|
||||
|
|
@ -52,63 +62,30 @@ func (entme ErrNoTypeMapEntry) Error() string {
|
|||
}
|
||||
|
||||
// ErrNotInterface is returned when the provided type is not an interface.
|
||||
//
|
||||
// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0.
|
||||
var ErrNotInterface = errors.New("The provided type is not an interface")
|
||||
|
||||
// A RegistryBuilder is used to build a Registry. This type is not goroutine
|
||||
// safe.
|
||||
//
|
||||
// Deprecated: Use Registry instead.
|
||||
type RegistryBuilder struct {
|
||||
typeEncoders map[reflect.Type]ValueEncoder
|
||||
interfaceEncoders []interfaceValueEncoder
|
||||
kindEncoders map[reflect.Kind]ValueEncoder
|
||||
|
||||
typeDecoders map[reflect.Type]ValueDecoder
|
||||
interfaceDecoders []interfaceValueDecoder
|
||||
kindDecoders map[reflect.Kind]ValueDecoder
|
||||
|
||||
typeMap map[bsontype.Type]reflect.Type
|
||||
}
|
||||
|
||||
// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main
|
||||
// typed passed around and Encoders and Decoders are constructed from it.
|
||||
type Registry struct {
|
||||
typeEncoders map[reflect.Type]ValueEncoder
|
||||
typeDecoders map[reflect.Type]ValueDecoder
|
||||
|
||||
interfaceEncoders []interfaceValueEncoder
|
||||
interfaceDecoders []interfaceValueDecoder
|
||||
|
||||
kindEncoders map[reflect.Kind]ValueEncoder
|
||||
kindDecoders map[reflect.Kind]ValueDecoder
|
||||
|
||||
typeMap map[bsontype.Type]reflect.Type
|
||||
|
||||
mu sync.RWMutex
|
||||
registry *Registry
|
||||
}
|
||||
|
||||
// NewRegistryBuilder creates a new empty RegistryBuilder.
|
||||
//
|
||||
// Deprecated: Use NewRegistry instead.
|
||||
func NewRegistryBuilder() *RegistryBuilder {
|
||||
return &RegistryBuilder{
|
||||
typeEncoders: make(map[reflect.Type]ValueEncoder),
|
||||
typeDecoders: make(map[reflect.Type]ValueDecoder),
|
||||
|
||||
interfaceEncoders: make([]interfaceValueEncoder, 0),
|
||||
interfaceDecoders: make([]interfaceValueDecoder, 0),
|
||||
|
||||
kindEncoders: make(map[reflect.Kind]ValueEncoder),
|
||||
kindDecoders: make(map[reflect.Kind]ValueDecoder),
|
||||
|
||||
typeMap: make(map[bsontype.Type]reflect.Type),
|
||||
registry: NewRegistry(),
|
||||
}
|
||||
}
|
||||
|
||||
func buildDefaultRegistry() *Registry {
|
||||
rb := NewRegistryBuilder()
|
||||
defaultValueEncoders.RegisterDefaultEncoders(rb)
|
||||
defaultValueDecoders.RegisterDefaultDecoders(rb)
|
||||
return rb.Build()
|
||||
}
|
||||
|
||||
// RegisterCodec will register the provided ValueCodec for the provided type.
|
||||
//
|
||||
// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead.
|
||||
func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder {
|
||||
rb.RegisterTypeEncoder(t, codec)
|
||||
rb.RegisterTypeDecoder(t, codec)
|
||||
|
|
@ -120,31 +97,22 @@ func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *Regi
|
|||
// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered
|
||||
// for a pointer to that type.
|
||||
//
|
||||
// If the given type is an interface, the encoder will be called when marshalling a type that is that interface. It
|
||||
// will not be called when marshalling a non-interface type that implements the interface.
|
||||
// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It
|
||||
// will not be called when marshaling a non-interface type that implements the interface.
|
||||
//
|
||||
// Deprecated: Use Registry.RegisterTypeEncoder instead.
|
||||
func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
|
||||
rb.typeEncoders[t] = enc
|
||||
rb.registry.RegisterTypeEncoder(t, enc)
|
||||
return rb
|
||||
}
|
||||
|
||||
// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when
|
||||
// marshalling a type if the type implements t or a pointer to the type implements t. If the provided type is not
|
||||
// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not
|
||||
// an interface (i.e. t.Kind() != reflect.Interface), this method will panic.
|
||||
//
|
||||
// Deprecated: Use Registry.RegisterInterfaceEncoder instead.
|
||||
func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
|
||||
if t.Kind() != reflect.Interface {
|
||||
panicStr := fmt.Sprintf("RegisterHookEncoder expects a type with kind reflect.Interface, "+
|
||||
"got type %s with kind %s", t, t.Kind())
|
||||
panic(panicStr)
|
||||
}
|
||||
|
||||
for idx, encoder := range rb.interfaceEncoders {
|
||||
if encoder.i == t {
|
||||
rb.interfaceEncoders[idx].ve = enc
|
||||
return rb
|
||||
}
|
||||
}
|
||||
|
||||
rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc})
|
||||
rb.registry.RegisterInterfaceEncoder(t, enc)
|
||||
return rb
|
||||
}
|
||||
|
||||
|
|
@ -153,97 +121,78 @@ func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder)
|
|||
// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered
|
||||
// for a pointer to that type.
|
||||
//
|
||||
// If the given type is an interface, the decoder will be called when unmarshalling into a type that is that interface.
|
||||
// It will not be called when unmarshalling into a non-interface type that implements the interface.
|
||||
// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface.
|
||||
// It will not be called when unmarshaling into a non-interface type that implements the interface.
|
||||
//
|
||||
// Deprecated: Use Registry.RegisterTypeDecoder instead.
|
||||
func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
|
||||
rb.typeDecoders[t] = dec
|
||||
rb.registry.RegisterTypeDecoder(t, dec)
|
||||
return rb
|
||||
}
|
||||
|
||||
// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when
|
||||
// unmarshalling into a type if the type implements t or a pointer to the type implements t. If the provided type is not
|
||||
// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not
|
||||
// an interface (i.e. t.Kind() != reflect.Interface), this method will panic.
|
||||
//
|
||||
// Deprecated: Use Registry.RegisterInterfaceDecoder instead.
|
||||
func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
|
||||
if t.Kind() != reflect.Interface {
|
||||
panicStr := fmt.Sprintf("RegisterHookDecoder expects a type with kind reflect.Interface, "+
|
||||
"got type %s with kind %s", t, t.Kind())
|
||||
panic(panicStr)
|
||||
}
|
||||
|
||||
for idx, decoder := range rb.interfaceDecoders {
|
||||
if decoder.i == t {
|
||||
rb.interfaceDecoders[idx].vd = dec
|
||||
return rb
|
||||
}
|
||||
}
|
||||
|
||||
rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec})
|
||||
rb.registry.RegisterInterfaceDecoder(t, dec)
|
||||
return rb
|
||||
}
|
||||
|
||||
// RegisterEncoder registers the provided type and encoder pair.
|
||||
//
|
||||
// Deprecated: Use RegisterTypeEncoder or RegisterHookEncoder instead.
|
||||
// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead.
|
||||
func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
|
||||
if t == tEmpty {
|
||||
rb.typeEncoders[t] = enc
|
||||
rb.registry.RegisterTypeEncoder(t, enc)
|
||||
return rb
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Interface:
|
||||
for idx, ir := range rb.interfaceEncoders {
|
||||
if ir.i == t {
|
||||
rb.interfaceEncoders[idx].ve = enc
|
||||
return rb
|
||||
}
|
||||
}
|
||||
|
||||
rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc})
|
||||
rb.registry.RegisterInterfaceEncoder(t, enc)
|
||||
default:
|
||||
rb.typeEncoders[t] = enc
|
||||
rb.registry.RegisterTypeEncoder(t, enc)
|
||||
}
|
||||
return rb
|
||||
}
|
||||
|
||||
// RegisterDecoder registers the provided type and decoder pair.
|
||||
//
|
||||
// Deprecated: Use RegisterTypeDecoder or RegisterHookDecoder instead.
|
||||
// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead.
|
||||
func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
|
||||
if t == nil {
|
||||
rb.typeDecoders[nil] = dec
|
||||
rb.registry.RegisterTypeDecoder(t, dec)
|
||||
return rb
|
||||
}
|
||||
if t == tEmpty {
|
||||
rb.typeDecoders[t] = dec
|
||||
rb.registry.RegisterTypeDecoder(t, dec)
|
||||
return rb
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Interface:
|
||||
for idx, ir := range rb.interfaceDecoders {
|
||||
if ir.i == t {
|
||||
rb.interfaceDecoders[idx].vd = dec
|
||||
return rb
|
||||
}
|
||||
}
|
||||
|
||||
rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec})
|
||||
rb.registry.RegisterInterfaceDecoder(t, dec)
|
||||
default:
|
||||
rb.typeDecoders[t] = dec
|
||||
rb.registry.RegisterTypeDecoder(t, dec)
|
||||
}
|
||||
return rb
|
||||
}
|
||||
|
||||
// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided
|
||||
// RegisterDefaultEncoder will register the provided ValueEncoder to the provided
|
||||
// kind.
|
||||
//
|
||||
// Deprecated: Use Registry.RegisterKindEncoder instead.
|
||||
func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder {
|
||||
rb.kindEncoders[kind] = enc
|
||||
rb.registry.RegisterKindEncoder(kind, enc)
|
||||
return rb
|
||||
}
|
||||
|
||||
// RegisterDefaultDecoder will register the provided ValueDecoder to the
|
||||
// provided kind.
|
||||
//
|
||||
// Deprecated: Use Registry.RegisterKindDecoder instead.
|
||||
func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder {
|
||||
rb.kindDecoders[kind] = dec
|
||||
rb.registry.RegisterKindDecoder(kind, dec)
|
||||
return rb
|
||||
}
|
||||
|
||||
|
|
@ -256,120 +205,233 @@ func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDe
|
|||
// to decode to bson.Raw, use the following code:
|
||||
//
|
||||
// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{}))
|
||||
//
|
||||
// Deprecated: Use Registry.RegisterTypeMapEntry instead.
|
||||
func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder {
|
||||
rb.typeMap[bt] = rt
|
||||
rb.registry.RegisterTypeMapEntry(bt, rt)
|
||||
return rb
|
||||
}
|
||||
|
||||
// Build creates a Registry from the current state of this RegistryBuilder.
|
||||
//
|
||||
// Deprecated: Use NewRegistry instead.
|
||||
func (rb *RegistryBuilder) Build() *Registry {
|
||||
registry := new(Registry)
|
||||
|
||||
registry.typeEncoders = make(map[reflect.Type]ValueEncoder)
|
||||
for t, enc := range rb.typeEncoders {
|
||||
registry.typeEncoders[t] = enc
|
||||
r := &Registry{
|
||||
interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...),
|
||||
interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...),
|
||||
typeEncoders: rb.registry.typeEncoders.Clone(),
|
||||
typeDecoders: rb.registry.typeDecoders.Clone(),
|
||||
kindEncoders: rb.registry.kindEncoders.Clone(),
|
||||
kindDecoders: rb.registry.kindDecoders.Clone(),
|
||||
}
|
||||
|
||||
registry.typeDecoders = make(map[reflect.Type]ValueDecoder)
|
||||
for t, dec := range rb.typeDecoders {
|
||||
registry.typeDecoders[t] = dec
|
||||
rb.registry.typeMap.Range(func(k, v interface{}) bool {
|
||||
if k != nil && v != nil {
|
||||
r.typeMap.Store(k, v)
|
||||
}
|
||||
|
||||
registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders))
|
||||
copy(registry.interfaceEncoders, rb.interfaceEncoders)
|
||||
|
||||
registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders))
|
||||
copy(registry.interfaceDecoders, rb.interfaceDecoders)
|
||||
|
||||
registry.kindEncoders = make(map[reflect.Kind]ValueEncoder)
|
||||
for kind, enc := range rb.kindEncoders {
|
||||
registry.kindEncoders[kind] = enc
|
||||
}
|
||||
|
||||
registry.kindDecoders = make(map[reflect.Kind]ValueDecoder)
|
||||
for kind, dec := range rb.kindDecoders {
|
||||
registry.kindDecoders[kind] = dec
|
||||
}
|
||||
|
||||
registry.typeMap = make(map[bsontype.Type]reflect.Type)
|
||||
for bt, rt := range rb.typeMap {
|
||||
registry.typeMap[bt] = rt
|
||||
}
|
||||
|
||||
return registry
|
||||
return true
|
||||
})
|
||||
return r
|
||||
}
|
||||
|
||||
// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows:
|
||||
// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main
|
||||
// typed passed around and Encoders and Decoders are constructed from it.
|
||||
type Registry struct {
|
||||
interfaceEncoders []interfaceValueEncoder
|
||||
interfaceDecoders []interfaceValueDecoder
|
||||
typeEncoders *typeEncoderCache
|
||||
typeDecoders *typeDecoderCache
|
||||
kindEncoders *kindEncoderCache
|
||||
kindDecoders *kindDecoderCache
|
||||
typeMap sync.Map // map[bsontype.Type]reflect.Type
|
||||
}
|
||||
|
||||
// NewRegistry creates a new empty Registry.
|
||||
func NewRegistry() *Registry {
|
||||
return &Registry{
|
||||
typeEncoders: new(typeEncoderCache),
|
||||
typeDecoders: new(typeDecoderCache),
|
||||
kindEncoders: new(kindEncoderCache),
|
||||
kindDecoders: new(kindDecoderCache),
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterTypeEncoder registers the provided ValueEncoder for the provided type.
|
||||
//
|
||||
// 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using
|
||||
// RegisterTypeEncoder for the interface will be selected.
|
||||
// The type will be used as provided, so an encoder can be registered for a type and a different
|
||||
// encoder can be registered for a pointer to that type.
|
||||
//
|
||||
// 2. An encoder registered using RegisterHookEncoder for an interface implemented by the type or by a pointer to the
|
||||
// type.
|
||||
// If the given type is an interface, the encoder will be called when marshaling a type that is
|
||||
// that interface. It will not be called when marshaling a non-interface type that implements the
|
||||
// interface. To get the latter behavior, call RegisterHookEncoder instead.
|
||||
//
|
||||
// 3. An encoder registered for the reflect.Kind of the value.
|
||||
// RegisterTypeEncoder should not be called concurrently with any other Registry method.
|
||||
func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) {
|
||||
r.typeEncoders.Store(valueType, enc)
|
||||
}
|
||||
|
||||
// RegisterTypeDecoder registers the provided ValueDecoder for the provided type.
|
||||
//
|
||||
// If no encoder is found, an error of type ErrNoEncoder is returned.
|
||||
func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) {
|
||||
encodererr := ErrNoEncoder{Type: t}
|
||||
r.mu.RLock()
|
||||
enc, found := r.lookupTypeEncoder(t)
|
||||
r.mu.RUnlock()
|
||||
// The type will be used as provided, so a decoder can be registered for a type and a different
|
||||
// decoder can be registered for a pointer to that type.
|
||||
//
|
||||
// If the given type is an interface, the decoder will be called when unmarshaling into a type that
|
||||
// is that interface. It will not be called when unmarshaling into a non-interface type that
|
||||
// implements the interface. To get the latter behavior, call RegisterHookDecoder instead.
|
||||
//
|
||||
// RegisterTypeDecoder should not be called concurrently with any other Registry method.
|
||||
func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) {
|
||||
r.typeDecoders.Store(valueType, dec)
|
||||
}
|
||||
|
||||
// RegisterKindEncoder registers the provided ValueEncoder for the provided kind.
|
||||
//
|
||||
// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For
|
||||
// example, consider the type MyInt defined as
|
||||
//
|
||||
// type MyInt int32
|
||||
//
|
||||
// To define an encoder for MyInt and int32, use RegisterKindEncoder like
|
||||
//
|
||||
// reg.RegisterKindEncoder(reflect.Int32, myEncoder)
|
||||
//
|
||||
// RegisterKindEncoder should not be called concurrently with any other Registry method.
|
||||
func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) {
|
||||
r.kindEncoders.Store(kind, enc)
|
||||
}
|
||||
|
||||
// RegisterKindDecoder registers the provided ValueDecoder for the provided kind.
|
||||
//
|
||||
// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For
|
||||
// example, consider the type MyInt defined as
|
||||
//
|
||||
// type MyInt int32
|
||||
//
|
||||
// To define an decoder for MyInt and int32, use RegisterKindDecoder like
|
||||
//
|
||||
// reg.RegisterKindDecoder(reflect.Int32, myDecoder)
|
||||
//
|
||||
// RegisterKindDecoder should not be called concurrently with any other Registry method.
|
||||
func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) {
|
||||
r.kindDecoders.Store(kind, dec)
|
||||
}
|
||||
|
||||
// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will
|
||||
// be called when marshaling a type if the type implements iface or a pointer to the type
|
||||
// implements iface. If the provided type is not an interface
|
||||
// (i.e. iface.Kind() != reflect.Interface), this method will panic.
|
||||
//
|
||||
// RegisterInterfaceEncoder should not be called concurrently with any other Registry method.
|
||||
func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) {
|
||||
if iface.Kind() != reflect.Interface {
|
||||
panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+
|
||||
"got type %s with kind %s", iface, iface.Kind())
|
||||
panic(panicStr)
|
||||
}
|
||||
|
||||
for idx, encoder := range r.interfaceEncoders {
|
||||
if encoder.i == iface {
|
||||
r.interfaceEncoders[idx].ve = enc
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc})
|
||||
}
|
||||
|
||||
// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will
|
||||
// be called when unmarshaling into a type if the type implements iface or a pointer to the type
|
||||
// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface),
|
||||
// this method will panic.
|
||||
//
|
||||
// RegisterInterfaceDecoder should not be called concurrently with any other Registry method.
|
||||
func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) {
|
||||
if iface.Kind() != reflect.Interface {
|
||||
panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+
|
||||
"got type %s with kind %s", iface, iface.Kind())
|
||||
panic(panicStr)
|
||||
}
|
||||
|
||||
for idx, decoder := range r.interfaceDecoders {
|
||||
if decoder.i == iface {
|
||||
r.interfaceDecoders[idx].vd = dec
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec})
|
||||
}
|
||||
|
||||
// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this
|
||||
// mapping is decoding situations where an empty interface is used and a default type needs to be
|
||||
// created and decoded into.
|
||||
//
|
||||
// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON
|
||||
// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents
|
||||
// to decode to bson.Raw, use the following code:
|
||||
//
|
||||
// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{}))
|
||||
func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) {
|
||||
r.typeMap.Store(bt, rt)
|
||||
}
|
||||
|
||||
// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup
|
||||
// order:
|
||||
//
|
||||
// 1. An encoder registered for the exact type. If the given type is an interface, an encoder
|
||||
// registered using RegisterTypeEncoder for that interface will be selected.
|
||||
//
|
||||
// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type
|
||||
// or by a pointer to the type.
|
||||
//
|
||||
// 3. An encoder registered using RegisterKindEncoder for the kind of value.
|
||||
//
|
||||
// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for
|
||||
// concurrent use by multiple goroutines after all codecs and encoders are registered.
|
||||
func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) {
|
||||
if valueType == nil {
|
||||
return nil, ErrNoEncoder{Type: valueType}
|
||||
}
|
||||
enc, found := r.lookupTypeEncoder(valueType)
|
||||
if found {
|
||||
if enc == nil {
|
||||
return nil, ErrNoEncoder{Type: t}
|
||||
return nil, ErrNoEncoder{Type: valueType}
|
||||
}
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
enc, found = r.lookupInterfaceEncoder(t, true)
|
||||
enc, found = r.lookupInterfaceEncoder(valueType, true)
|
||||
if found {
|
||||
r.mu.Lock()
|
||||
r.typeEncoders[t] = enc
|
||||
r.mu.Unlock()
|
||||
return enc, nil
|
||||
return r.typeEncoders.LoadOrStore(valueType, enc), nil
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
r.mu.Lock()
|
||||
r.typeEncoders[t] = nil
|
||||
r.mu.Unlock()
|
||||
return nil, encodererr
|
||||
if v, ok := r.kindEncoders.Load(valueType.Kind()); ok {
|
||||
return r.storeTypeEncoder(valueType, v), nil
|
||||
}
|
||||
|
||||
enc, found = r.kindEncoders[t.Kind()]
|
||||
if !found {
|
||||
r.mu.Lock()
|
||||
r.typeEncoders[t] = nil
|
||||
r.mu.Unlock()
|
||||
return nil, encodererr
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
r.typeEncoders[t] = enc
|
||||
r.mu.Unlock()
|
||||
return enc, nil
|
||||
return nil, ErrNoEncoder{Type: valueType}
|
||||
}
|
||||
|
||||
func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) {
|
||||
enc, found := r.typeEncoders[t]
|
||||
return enc, found
|
||||
func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder {
|
||||
return r.typeEncoders.LoadOrStore(rt, enc)
|
||||
}
|
||||
|
||||
func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (ValueEncoder, bool) {
|
||||
if t == nil {
|
||||
func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) {
|
||||
return r.typeEncoders.Load(rt)
|
||||
}
|
||||
|
||||
func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) {
|
||||
if valueType == nil {
|
||||
return nil, false
|
||||
}
|
||||
for _, ienc := range r.interfaceEncoders {
|
||||
if t.Implements(ienc.i) {
|
||||
if valueType.Implements(ienc.i) {
|
||||
return ienc.ve, true
|
||||
}
|
||||
if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(ienc.i) {
|
||||
// if *t implements an interface, this will catch if t implements an interface further ahead
|
||||
// in interfaceEncoders
|
||||
defaultEnc, found := r.lookupInterfaceEncoder(t, false)
|
||||
if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) {
|
||||
// if *t implements an interface, this will catch if t implements an interface further
|
||||
// ahead in interfaceEncoders
|
||||
defaultEnc, found := r.lookupInterfaceEncoder(valueType, false)
|
||||
if !found {
|
||||
defaultEnc = r.kindEncoders[t.Kind()]
|
||||
defaultEnc, _ = r.kindEncoders.Load(valueType.Kind())
|
||||
}
|
||||
return newCondAddrEncoder(ienc.ve, defaultEnc), true
|
||||
}
|
||||
|
|
@ -377,70 +439,61 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value
|
|||
return nil, false
|
||||
}
|
||||
|
||||
// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows:
|
||||
// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup
|
||||
// order:
|
||||
//
|
||||
// 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using
|
||||
// RegisterTypeDecoder for the interface will be selected.
|
||||
// 1. A decoder registered for the exact type. If the given type is an interface, a decoder
|
||||
// registered using RegisterTypeDecoder for that interface will be selected.
|
||||
//
|
||||
// 2. A decoder registered using RegisterHookDecoder for an interface implemented by the type or by a pointer to the
|
||||
// type.
|
||||
// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by
|
||||
// a pointer to the type.
|
||||
//
|
||||
// 3. A decoder registered for the reflect.Kind of the value.
|
||||
// 3. A decoder registered using RegisterKindDecoder for the kind of value.
|
||||
//
|
||||
// If no decoder is found, an error of type ErrNoDecoder is returned.
|
||||
func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) {
|
||||
if t == nil {
|
||||
// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for
|
||||
// concurrent use by multiple goroutines after all codecs and decoders are registered.
|
||||
func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) {
|
||||
if valueType == nil {
|
||||
return nil, ErrNilType
|
||||
}
|
||||
decodererr := ErrNoDecoder{Type: t}
|
||||
r.mu.RLock()
|
||||
dec, found := r.lookupTypeDecoder(t)
|
||||
r.mu.RUnlock()
|
||||
dec, found := r.lookupTypeDecoder(valueType)
|
||||
if found {
|
||||
if dec == nil {
|
||||
return nil, ErrNoDecoder{Type: t}
|
||||
return nil, ErrNoDecoder{Type: valueType}
|
||||
}
|
||||
return dec, nil
|
||||
}
|
||||
|
||||
dec, found = r.lookupInterfaceDecoder(t, true)
|
||||
dec, found = r.lookupInterfaceDecoder(valueType, true)
|
||||
if found {
|
||||
r.mu.Lock()
|
||||
r.typeDecoders[t] = dec
|
||||
r.mu.Unlock()
|
||||
return dec, nil
|
||||
return r.storeTypeDecoder(valueType, dec), nil
|
||||
}
|
||||
|
||||
dec, found = r.kindDecoders[t.Kind()]
|
||||
if !found {
|
||||
r.mu.Lock()
|
||||
r.typeDecoders[t] = nil
|
||||
r.mu.Unlock()
|
||||
return nil, decodererr
|
||||
if v, ok := r.kindDecoders.Load(valueType.Kind()); ok {
|
||||
return r.storeTypeDecoder(valueType, v), nil
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
r.typeDecoders[t] = dec
|
||||
r.mu.Unlock()
|
||||
return dec, nil
|
||||
return nil, ErrNoDecoder{Type: valueType}
|
||||
}
|
||||
|
||||
func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) {
|
||||
dec, found := r.typeDecoders[t]
|
||||
return dec, found
|
||||
func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) {
|
||||
return r.typeDecoders.Load(valueType)
|
||||
}
|
||||
|
||||
func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (ValueDecoder, bool) {
|
||||
func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder {
|
||||
return r.typeDecoders.LoadOrStore(typ, dec)
|
||||
}
|
||||
|
||||
func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) {
|
||||
for _, idec := range r.interfaceDecoders {
|
||||
if t.Implements(idec.i) {
|
||||
if valueType.Implements(idec.i) {
|
||||
return idec.vd, true
|
||||
}
|
||||
if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(idec.i) {
|
||||
// if *t implements an interface, this will catch if t implements an interface further ahead
|
||||
// in interfaceDecoders
|
||||
defaultDec, found := r.lookupInterfaceDecoder(t, false)
|
||||
if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) {
|
||||
// if *t implements an interface, this will catch if t implements an interface further
|
||||
// ahead in interfaceDecoders
|
||||
defaultDec, found := r.lookupInterfaceDecoder(valueType, false)
|
||||
if !found {
|
||||
defaultDec = r.kindDecoders[t.Kind()]
|
||||
defaultDec, _ = r.kindDecoders.Load(valueType.Kind())
|
||||
}
|
||||
return newCondAddrDecoder(idec.vd, defaultDec), true
|
||||
}
|
||||
|
|
@ -450,12 +503,14 @@ func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (Value
|
|||
|
||||
// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON
|
||||
// type. If no type is found, ErrNoTypeMapEntry is returned.
|
||||
//
|
||||
// LookupTypeMapEntry should not be called concurrently with any other Registry method.
|
||||
func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) {
|
||||
t, ok := r.typeMap[bt]
|
||||
if !ok || t == nil {
|
||||
v, ok := r.typeMap.Load(bt)
|
||||
if v == nil || !ok {
|
||||
return nil, ErrNoTypeMapEntry{Type: bt}
|
||||
}
|
||||
return t, nil
|
||||
return v.(reflect.Type), nil
|
||||
}
|
||||
|
||||
type interfaceValueEncoder struct {
|
||||
|
|
|
|||
|
|
@ -19,13 +19,21 @@ import (
|
|||
var defaultSliceCodec = NewSliceCodec()
|
||||
|
||||
// SliceCodec is the Codec used for slice values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// SliceCodec registered.
|
||||
type SliceCodec struct {
|
||||
// EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of
|
||||
// BSON null.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.NilSliceAsEmpty instead.
|
||||
EncodeNilAsEmpty bool
|
||||
}
|
||||
|
||||
var _ ValueCodec = &MapCodec{}
|
||||
|
||||
// NewSliceCodec returns a MapCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// SliceCodec registered.
|
||||
func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec {
|
||||
sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...)
|
||||
|
||||
|
|
@ -42,21 +50,19 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re
|
|||
return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
|
||||
}
|
||||
|
||||
if val.IsNil() && !sc.EncodeNilAsEmpty {
|
||||
if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty {
|
||||
return vw.WriteNull()
|
||||
}
|
||||
|
||||
// If we have a []byte we want to treat it as a binary instead of as an array.
|
||||
if val.Type().Elem() == tByte {
|
||||
var byteSlice []byte
|
||||
for idx := 0; idx < val.Len(); idx++ {
|
||||
byteSlice = append(byteSlice, val.Index(idx).Interface().(byte))
|
||||
}
|
||||
byteSlice := make([]byte, val.Len())
|
||||
reflect.Copy(reflect.ValueOf(byteSlice), val)
|
||||
return vw.WriteBinary(byteSlice)
|
||||
}
|
||||
|
||||
// If we have a []primitive.E we want to treat it as a document instead of as an array.
|
||||
if val.Type().ConvertibleTo(tD) {
|
||||
if val.Type() == tD || val.Type().ConvertibleTo(tD) {
|
||||
d := val.Convert(tD).Interface().(primitive.D)
|
||||
|
||||
dw, err := vw.WriteDocument()
|
||||
|
|
@ -145,11 +151,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r
|
|||
if val.IsNil() {
|
||||
val.Set(reflect.MakeSlice(val.Type(), 0, len(data)))
|
||||
}
|
||||
|
||||
val.SetLen(0)
|
||||
for _, elem := range data {
|
||||
val.Set(reflect.Append(val, reflect.ValueOf(elem)))
|
||||
}
|
||||
val.Set(reflect.AppendSlice(val, reflect.ValueOf(data)))
|
||||
return nil
|
||||
case bsontype.String:
|
||||
if sliceType := val.Type().Elem(); sliceType != tByte {
|
||||
|
|
@ -164,11 +167,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r
|
|||
if val.IsNil() {
|
||||
val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr)))
|
||||
}
|
||||
|
||||
val.SetLen(0)
|
||||
for _, elem := range byteStr {
|
||||
val.Set(reflect.Append(val, reflect.ValueOf(elem)))
|
||||
}
|
||||
val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr)))
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("cannot decode %v into a slice", vrType)
|
||||
|
|
|
|||
|
|
@ -15,26 +15,38 @@ import (
|
|||
"go.mongodb.org/mongo-driver/bson/bsontype"
|
||||
)
|
||||
|
||||
// StringCodec is the Codec used for struct values.
|
||||
// StringCodec is the Codec used for string values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// StringCodec registered.
|
||||
type StringCodec struct {
|
||||
// DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation.
|
||||
// If false, a string made from the raw object ID bytes will be used. Defaults to true.
|
||||
//
|
||||
// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0.
|
||||
DecodeObjectIDAsHex bool
|
||||
}
|
||||
|
||||
var (
|
||||
defaultStringCodec = NewStringCodec()
|
||||
|
||||
_ ValueCodec = defaultStringCodec
|
||||
// Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be
|
||||
// used by collection type decoders (e.g. map, slice, etc) to set individual values in a
|
||||
// collection.
|
||||
_ typeDecoder = defaultStringCodec
|
||||
)
|
||||
|
||||
// NewStringCodec returns a StringCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// StringCodec registered.
|
||||
func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec {
|
||||
stringOpt := bsonoptions.MergeStringCodecOptions(opts...)
|
||||
return &StringCodec{*stringOpt.DecodeObjectIDAsHex}
|
||||
}
|
||||
|
||||
// EncodeValue is the ValueEncoder for string types.
|
||||
func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if val.Kind() != reflect.String {
|
||||
return ValueEncoderError{
|
||||
Name: "StringEncodeValue",
|
||||
|
|
@ -46,7 +58,7 @@ func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, va
|
|||
return vw.WriteString(val.String())
|
||||
}
|
||||
|
||||
func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
|
||||
if t.Kind() != reflect.String {
|
||||
return emptyValue, ValueDecoderError{
|
||||
Name: "StringDecodeValue",
|
||||
|
|
@ -71,6 +83,7 @@ func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t ref
|
|||
if sc.DecodeObjectIDAsHex {
|
||||
str = oid.Hex()
|
||||
} else {
|
||||
// TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string.
|
||||
byteArray := [12]byte(oid)
|
||||
str = string(byteArray[:])
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,14 +59,43 @@ type Zeroer interface {
|
|||
}
|
||||
|
||||
// StructCodec is the Codec used for struct values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// StructCodec registered.
|
||||
type StructCodec struct {
|
||||
cache map[reflect.Type]*structDescription
|
||||
l sync.RWMutex
|
||||
cache sync.Map // map[reflect.Type]*structDescription
|
||||
parser StructTagParser
|
||||
|
||||
// DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the
|
||||
// destination value passed to Decode before unmarshaling BSON documents into them.
|
||||
//
|
||||
// Deprecated: Use bson.Decoder.ZeroStructs instead.
|
||||
DecodeZeroStruct bool
|
||||
|
||||
// DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the
|
||||
// destination value passed to Decode before unmarshaling BSON documents into them.
|
||||
//
|
||||
// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0.
|
||||
DecodeDeepZeroInline bool
|
||||
|
||||
// EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g.
|
||||
// MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag
|
||||
// option is set.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.OmitZeroStruct instead.
|
||||
EncodeOmitDefaultStruct bool
|
||||
|
||||
// AllowUnexportedFields allows encoding and decoding values from un-exported struct fields.
|
||||
//
|
||||
// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be
|
||||
// supported in Go Driver 2.0.
|
||||
AllowUnexportedFields bool
|
||||
|
||||
// OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is
|
||||
// a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The
|
||||
// default value is true.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates instead.
|
||||
OverwriteDuplicatedInlinedFields bool
|
||||
}
|
||||
|
||||
|
|
@ -74,6 +103,9 @@ var _ ValueEncoder = &StructCodec{}
|
|||
var _ ValueDecoder = &StructCodec{}
|
||||
|
||||
// NewStructCodec returns a StructCodec that uses p for struct tag parsing.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// StructCodec registered.
|
||||
func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) {
|
||||
if p == nil {
|
||||
return nil, errors.New("a StructTagParser must be provided to NewStructCodec")
|
||||
|
|
@ -82,7 +114,6 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions)
|
|||
structOpt := bsonoptions.MergeStructCodecOptions(opts...)
|
||||
|
||||
codec := &StructCodec{
|
||||
cache: make(map[reflect.Type]*structDescription),
|
||||
parser: p,
|
||||
}
|
||||
|
||||
|
|
@ -106,12 +137,12 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions)
|
|||
}
|
||||
|
||||
// EncodeValue handles encoding generic struct types.
|
||||
func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Kind() != reflect.Struct {
|
||||
return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
|
||||
}
|
||||
|
||||
sd, err := sc.describeStruct(r.Registry, val.Type())
|
||||
sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -131,7 +162,7 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r
|
|||
}
|
||||
}
|
||||
|
||||
desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(r, desc.encoder, rv)
|
||||
desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv)
|
||||
|
||||
if err != nil && err != errInvalidValue {
|
||||
return err
|
||||
|
|
@ -158,17 +189,17 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r
|
|||
|
||||
encoder := desc.encoder
|
||||
|
||||
var isZero bool
|
||||
rvInterface := rv.Interface()
|
||||
var zero bool
|
||||
if cz, ok := encoder.(CodecZeroer); ok {
|
||||
isZero = cz.IsTypeZero(rvInterface)
|
||||
zero = cz.IsTypeZero(rv.Interface())
|
||||
} else if rv.Kind() == reflect.Interface {
|
||||
// sc.isZero will not treat an interface rv as an interface, so we need to check for the zero interface separately.
|
||||
isZero = rv.IsNil()
|
||||
// isZero will not treat an interface rv as an interface, so we need to check for the
|
||||
// zero interface separately.
|
||||
zero = rv.IsNil()
|
||||
} else {
|
||||
isZero = sc.isZero(rvInterface)
|
||||
zero = isZero(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct)
|
||||
}
|
||||
if desc.omitEmpty && isZero {
|
||||
if desc.omitEmpty && zero {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -177,7 +208,17 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r
|
|||
return err
|
||||
}
|
||||
|
||||
ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize}
|
||||
ectx := EncodeContext{
|
||||
Registry: ec.Registry,
|
||||
MinSize: desc.minSize || ec.MinSize,
|
||||
errorOnInlineDuplicates: ec.errorOnInlineDuplicates,
|
||||
stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt,
|
||||
nilMapAsEmpty: ec.nilMapAsEmpty,
|
||||
nilSliceAsEmpty: ec.nilSliceAsEmpty,
|
||||
nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty,
|
||||
omitZeroStruct: ec.omitZeroStruct,
|
||||
useJSONStructTags: ec.useJSONStructTags,
|
||||
}
|
||||
err = encoder.EncodeValue(ectx, vw2, rv)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -191,7 +232,7 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r
|
|||
return exists
|
||||
}
|
||||
|
||||
return defaultMapCodec.mapEncodeValue(r, dw, rv, collisionFn)
|
||||
return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn)
|
||||
}
|
||||
|
||||
return dw.WriteDocumentEnd()
|
||||
|
|
@ -213,7 +254,7 @@ func newDecodeError(key string, original error) error {
|
|||
// DecodeValue implements the Codec interface.
|
||||
// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr.
|
||||
// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared.
|
||||
func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
|
||||
if !val.CanSet() || val.Kind() != reflect.Struct {
|
||||
return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
|
||||
}
|
||||
|
|
@ -238,12 +279,12 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
|
|||
return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type())
|
||||
}
|
||||
|
||||
sd, err := sc.describeStruct(r.Registry, val.Type())
|
||||
sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sc.DecodeZeroStruct {
|
||||
if sc.DecodeZeroStruct || dc.zeroStructs {
|
||||
val.Set(reflect.Zero(val.Type()))
|
||||
}
|
||||
if sc.DecodeDeepZeroInline && sd.inline {
|
||||
|
|
@ -254,7 +295,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
|
|||
var inlineMap reflect.Value
|
||||
if sd.inlineMap >= 0 {
|
||||
inlineMap = val.Field(sd.inlineMap)
|
||||
decoder, err = r.LookupDecoder(inlineMap.Type().Elem())
|
||||
decoder, err = dc.LookupDecoder(inlineMap.Type().Elem())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -298,8 +339,8 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
|
|||
}
|
||||
|
||||
elem := reflect.New(inlineMap.Type().Elem()).Elem()
|
||||
r.Ancestor = inlineMap.Type()
|
||||
err = decoder.DecodeValue(r, vr, elem)
|
||||
dc.Ancestor = inlineMap.Type()
|
||||
err = decoder.DecodeValue(dc, vr, elem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -326,7 +367,17 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
|
|||
}
|
||||
field = field.Addr()
|
||||
|
||||
dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate || r.Truncate}
|
||||
dctx := DecodeContext{
|
||||
Registry: dc.Registry,
|
||||
Truncate: fd.truncate || dc.Truncate,
|
||||
defaultDocumentType: dc.defaultDocumentType,
|
||||
binaryAsSlice: dc.binaryAsSlice,
|
||||
useJSONStructTags: dc.useJSONStructTags,
|
||||
useLocalTimeZone: dc.useLocalTimeZone,
|
||||
zeroMaps: dc.zeroMaps,
|
||||
zeroStructs: dc.zeroStructs,
|
||||
}
|
||||
|
||||
if fd.decoder == nil {
|
||||
return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()})
|
||||
}
|
||||
|
|
@ -340,51 +391,32 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
|
|||
return nil
|
||||
}
|
||||
|
||||
func (sc *StructCodec) isZero(i interface{}) bool {
|
||||
v := reflect.ValueOf(i)
|
||||
|
||||
// check the value validity
|
||||
if !v.IsValid() {
|
||||
return true
|
||||
func isZero(v reflect.Value, omitZeroStruct bool) bool {
|
||||
kind := v.Kind()
|
||||
if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) {
|
||||
return v.Interface().(Zeroer).IsZero()
|
||||
}
|
||||
|
||||
if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
|
||||
return z.IsZero()
|
||||
if kind == reflect.Struct {
|
||||
if !omitZeroStruct {
|
||||
return false
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
case reflect.Struct:
|
||||
if sc.EncodeOmitDefaultStruct {
|
||||
vt := v.Type()
|
||||
if vt == tTime {
|
||||
return v.Interface().(time.Time).IsZero()
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
|
||||
numField := vt.NumField()
|
||||
for i := 0; i < numField; i++ {
|
||||
ff := vt.Field(i)
|
||||
if ff.PkgPath != "" && !ff.Anonymous {
|
||||
continue // Private field
|
||||
}
|
||||
fld := v.Field(i)
|
||||
if !sc.isZero(fld.Interface()) {
|
||||
if !isZero(v.Field(i), omitZeroStruct) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return !v.IsValid() || v.IsZero()
|
||||
}
|
||||
|
||||
type structDescription struct {
|
||||
|
|
@ -435,16 +467,35 @@ func (bi byIndex) Less(i, j int) bool {
|
|||
return len(bi[i].inline) < len(bi[j].inline)
|
||||
}
|
||||
|
||||
func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) {
|
||||
func (sc *StructCodec) describeStruct(
|
||||
r *Registry,
|
||||
t reflect.Type,
|
||||
useJSONStructTags bool,
|
||||
errorOnDuplicates bool,
|
||||
) (*structDescription, error) {
|
||||
// We need to analyze the struct, including getting the tags, collecting
|
||||
// information about inlining, and create a map of the field name to the field.
|
||||
sc.l.RLock()
|
||||
ds, exists := sc.cache[t]
|
||||
sc.l.RUnlock()
|
||||
if exists {
|
||||
return ds, nil
|
||||
if v, ok := sc.cache.Load(t); ok {
|
||||
return v.(*structDescription), nil
|
||||
}
|
||||
// TODO(charlie): Only describe the struct once when called
|
||||
// concurrently with the same type.
|
||||
ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v, loaded := sc.cache.LoadOrStore(t, ds); loaded {
|
||||
ds = v.(*structDescription)
|
||||
}
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
func (sc *StructCodec) describeStructSlow(
|
||||
r *Registry,
|
||||
t reflect.Type,
|
||||
useJSONStructTags bool,
|
||||
errorOnDuplicates bool,
|
||||
) (*structDescription, error) {
|
||||
numFields := t.NumField()
|
||||
sd := &structDescription{
|
||||
fm: make(map[string]fieldDescription, numFields),
|
||||
|
|
@ -477,7 +528,14 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
|
|||
decoder: decoder,
|
||||
}
|
||||
|
||||
stags, err := sc.parser.ParseStructTags(sf)
|
||||
var stags StructTags
|
||||
// If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser
|
||||
// instead of the parser defined on the codec.
|
||||
if useJSONStructTags {
|
||||
stags, err = JSONFallbackStructTagParser.ParseStructTags(sf)
|
||||
} else {
|
||||
stags, err = sc.parser.ParseStructTags(sf)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -507,7 +565,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
|
|||
}
|
||||
fallthrough
|
||||
case reflect.Struct:
|
||||
inlinesf, err := sc.describeStruct(r, sfType)
|
||||
inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -559,7 +617,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
|
|||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if !ok || !sc.OverwriteDuplicatedInlinedFields {
|
||||
if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates {
|
||||
return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name)
|
||||
}
|
||||
sd.fl = append(sd.fl, dominant)
|
||||
|
|
@ -568,10 +626,6 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
|
|||
|
||||
sort.Sort(byIndex(sd.fl))
|
||||
|
||||
sc.l.Lock()
|
||||
sc.cache[t] = sd
|
||||
sc.l.Unlock()
|
||||
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
|
|
@ -629,21 +683,21 @@ func getInlineField(val reflect.Value, index []int) (reflect.Value, error) {
|
|||
|
||||
// DeepZero returns recursive zero object
|
||||
func deepZero(st reflect.Type) (result reflect.Value) {
|
||||
if st.Kind() == reflect.Struct {
|
||||
numField := st.NumField()
|
||||
for i := 0; i < numField; i++ {
|
||||
if result == emptyValue {
|
||||
result = reflect.Indirect(reflect.New(st))
|
||||
|
||||
if result.Kind() == reflect.Struct {
|
||||
for i := 0; i < result.NumField(); i++ {
|
||||
if f := result.Field(i); f.Kind() == reflect.Ptr {
|
||||
}
|
||||
f := result.Field(i)
|
||||
if f.CanInterface() {
|
||||
if ft := reflect.TypeOf(f.Interface()); ft.Elem().Kind() == reflect.Struct {
|
||||
result.Field(i).Set(recursivePointerTo(deepZero(ft.Elem())))
|
||||
if f.Type().Kind() == reflect.Struct {
|
||||
result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem())))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return result
|
||||
}
|
||||
|
||||
// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside
|
||||
|
|
|
|||
|
|
@ -12,12 +12,16 @@ import (
|
|||
)
|
||||
|
||||
// StructTagParser returns the struct tags for a given struct field.
|
||||
//
|
||||
// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
|
||||
type StructTagParser interface {
|
||||
ParseStructTags(reflect.StructField) (StructTags, error)
|
||||
}
|
||||
|
||||
// StructTagParserFunc is an adapter that allows a generic function to be used
|
||||
// as a StructTagParser.
|
||||
//
|
||||
// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
|
||||
type StructTagParserFunc func(reflect.StructField) (StructTags, error)
|
||||
|
||||
// ParseStructTags implements the StructTagParser interface.
|
||||
|
|
@ -50,7 +54,7 @@ func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructT
|
|||
// Skip This struct field should be skipped. This is usually denoted by parsing a "-"
|
||||
// for the name.
|
||||
//
|
||||
// TODO(skriptble): Add tags for undefined as nil and for null as nil.
|
||||
// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
|
||||
type StructTags struct {
|
||||
Name string
|
||||
OmitEmpty bool
|
||||
|
|
@ -85,6 +89,8 @@ type StructTags struct {
|
|||
// A struct tag either consisting entirely of '-' or with a bson key with a
|
||||
// value consisting entirely of '-' will return a StructTags with Skip true and
|
||||
// the remaining fields will be their default values.
|
||||
//
|
||||
// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0.
|
||||
var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
|
||||
key := strings.ToLower(sf.Name)
|
||||
tag, ok := sf.Tag.Lookup("bson")
|
||||
|
|
@ -125,6 +131,9 @@ func parseTags(key string, tag string) (StructTags, error) {
|
|||
// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser
|
||||
// but will also fallback to parsing the json tag instead on a field where the
|
||||
// bson tag isn't available.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and
|
||||
// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead.
|
||||
var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
|
||||
key := strings.ToLower(sf.Name)
|
||||
tag, ok := sf.Tag.Lookup("bson")
|
||||
|
|
|
|||
|
|
@ -22,18 +22,28 @@ const (
|
|||
)
|
||||
|
||||
// TimeCodec is the Codec used for time.Time values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// TimeCodec registered.
|
||||
type TimeCodec struct {
|
||||
// UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use bson.Decoder.UseLocalTimeZone instead.
|
||||
UseLocalTimeZone bool
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTimeCodec = NewTimeCodec()
|
||||
|
||||
_ ValueCodec = defaultTimeCodec
|
||||
// Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used
|
||||
// by collection type decoders (e.g. map, slice, etc) to set individual values in a collection.
|
||||
_ typeDecoder = defaultTimeCodec
|
||||
)
|
||||
|
||||
// NewTimeCodec returns a TimeCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// TimeCodec registered.
|
||||
func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec {
|
||||
timeOpt := bsonoptions.MergeTimeCodecOptions(opts...)
|
||||
|
||||
|
|
@ -95,7 +105,7 @@ func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t refle
|
|||
return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType)
|
||||
}
|
||||
|
||||
if !tc.UseLocalTimeZone {
|
||||
if !tc.UseLocalTimeZone && !dc.useLocalTimeZone {
|
||||
timeVal = timeVal.UTC()
|
||||
}
|
||||
return reflect.ValueOf(timeVal), nil
|
||||
|
|
@ -117,7 +127,7 @@ func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val re
|
|||
}
|
||||
|
||||
// EncodeValue is the ValueEncoderFunc for time.TIme.
|
||||
func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
|
||||
if !val.IsValid() || val.Type() != tTime {
|
||||
return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem()
|
|||
var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem()
|
||||
var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem()
|
||||
|
||||
var tBinary = reflect.TypeOf(primitive.Binary{})
|
||||
var tUndefined = reflect.TypeOf(primitive.Undefined{})
|
||||
|
|
|
|||
|
|
@ -17,18 +17,29 @@ import (
|
|||
)
|
||||
|
||||
// UIntCodec is the Codec used for uint values.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// UIntCodec registered.
|
||||
type UIntCodec struct {
|
||||
// EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the
|
||||
// minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value.
|
||||
//
|
||||
// Deprecated: Use bson.Encoder.IntMinSize instead.
|
||||
EncodeToMinSize bool
|
||||
}
|
||||
|
||||
var (
|
||||
defaultUIntCodec = NewUIntCodec()
|
||||
|
||||
_ ValueCodec = defaultUIntCodec
|
||||
// Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used
|
||||
// by collection type decoders (e.g. map, slice, etc) to set individual values in a collection.
|
||||
_ typeDecoder = defaultUIntCodec
|
||||
)
|
||||
|
||||
// NewUIntCodec returns a UIntCodec with options opts.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
|
||||
// UIntCodec registered.
|
||||
func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec {
|
||||
uintOpt := bsonoptions.MergeUIntCodecOptions(opts...)
|
||||
|
||||
|
|
|
|||
11
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go
generated
vendored
11
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go
generated
vendored
|
|
@ -7,22 +7,33 @@
|
|||
package bsonoptions
|
||||
|
||||
// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding.
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
type ByteSliceCodecOptions struct {
|
||||
EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false.
|
||||
}
|
||||
|
||||
// ByteSliceCodec creates a new *ByteSliceCodecOptions
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
func ByteSliceCodec() *ByteSliceCodecOptions {
|
||||
return &ByteSliceCodecOptions{}
|
||||
}
|
||||
|
||||
// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead.
|
||||
func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions {
|
||||
bs.EncodeNilAsEmpty = &b
|
||||
return bs
|
||||
}
|
||||
|
||||
// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion.
|
||||
//
|
||||
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
|
||||
// single options struct instead.
|
||||
func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions {
|
||||
bs := ByteSliceCodec()
|
||||
for _, opt := range opts {
|
||||
|
|
|
|||
|
|
@ -7,22 +7,33 @@
|
|||
package bsonoptions
|
||||
|
||||
// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding.
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
type EmptyInterfaceCodecOptions struct {
|
||||
DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false.
|
||||
}
|
||||
|
||||
// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions {
|
||||
return &EmptyInterfaceCodecOptions{}
|
||||
}
|
||||
|
||||
// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead.
|
||||
func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions {
|
||||
e.DecodeBinaryAsSlice = &b
|
||||
return e
|
||||
}
|
||||
|
||||
// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion.
|
||||
//
|
||||
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
|
||||
// single options struct instead.
|
||||
func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions {
|
||||
e := EmptyInterfaceCodec()
|
||||
for _, opt := range opts {
|
||||
|
|
|
|||
15
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go
generated
vendored
15
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go
generated
vendored
|
|
@ -7,6 +7,9 @@
|
|||
package bsonoptions
|
||||
|
||||
// MapCodecOptions represents all possible options for map encoding and decoding.
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
type MapCodecOptions struct {
|
||||
DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false.
|
||||
EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false.
|
||||
|
|
@ -19,17 +22,24 @@ type MapCodecOptions struct {
|
|||
}
|
||||
|
||||
// MapCodec creates a new *MapCodecOptions
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
func MapCodec() *MapCodecOptions {
|
||||
return &MapCodecOptions{}
|
||||
}
|
||||
|
||||
// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead.
|
||||
func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions {
|
||||
t.DecodeZerosMap = &b
|
||||
return t
|
||||
}
|
||||
|
||||
// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead.
|
||||
func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions {
|
||||
t.EncodeNilAsEmpty = &b
|
||||
return t
|
||||
|
|
@ -40,12 +50,17 @@ func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions {
|
|||
// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with
|
||||
// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer
|
||||
// will override TextMarshaler/TextUnmarshaler. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead.
|
||||
func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions {
|
||||
t.EncodeKeysWithStringer = &b
|
||||
return t
|
||||
}
|
||||
|
||||
// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion.
|
||||
//
|
||||
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
|
||||
// single options struct instead.
|
||||
func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions {
|
||||
s := MapCodec()
|
||||
for _, opt := range opts {
|
||||
|
|
|
|||
11
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go
generated
vendored
11
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go
generated
vendored
|
|
@ -7,22 +7,33 @@
|
|||
package bsonoptions
|
||||
|
||||
// SliceCodecOptions represents all possible options for slice encoding and decoding.
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
type SliceCodecOptions struct {
|
||||
EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false.
|
||||
}
|
||||
|
||||
// SliceCodec creates a new *SliceCodecOptions
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
func SliceCodec() *SliceCodecOptions {
|
||||
return &SliceCodecOptions{}
|
||||
}
|
||||
|
||||
// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead.
|
||||
func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions {
|
||||
s.EncodeNilAsEmpty = &b
|
||||
return s
|
||||
}
|
||||
|
||||
// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion.
|
||||
//
|
||||
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
|
||||
// single options struct instead.
|
||||
func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions {
|
||||
s := SliceCodec()
|
||||
for _, opt := range opts {
|
||||
|
|
|
|||
11
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go
generated
vendored
11
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go
generated
vendored
|
|
@ -9,23 +9,34 @@ package bsonoptions
|
|||
var defaultDecodeOIDAsHex = true
|
||||
|
||||
// StringCodecOptions represents all possible options for string encoding and decoding.
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
type StringCodecOptions struct {
|
||||
DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true.
|
||||
}
|
||||
|
||||
// StringCodec creates a new *StringCodecOptions
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
func StringCodec() *StringCodecOptions {
|
||||
return &StringCodecOptions{}
|
||||
}
|
||||
|
||||
// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made
|
||||
// from the raw object ID bytes will be used. Defaults to true.
|
||||
//
|
||||
// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0.
|
||||
func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions {
|
||||
t.DecodeObjectIDAsHex = &b
|
||||
return t
|
||||
}
|
||||
|
||||
// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion.
|
||||
//
|
||||
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
|
||||
// single options struct instead.
|
||||
func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions {
|
||||
s := &StringCodecOptions{&defaultDecodeOIDAsHex}
|
||||
for _, opt := range opts {
|
||||
|
|
|
|||
20
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go
generated
vendored
20
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go
generated
vendored
|
|
@ -9,6 +9,9 @@ package bsonoptions
|
|||
var defaultOverwriteDuplicatedInlinedFields = true
|
||||
|
||||
// StructCodecOptions represents all possible options for struct encoding and decoding.
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
type StructCodecOptions struct {
|
||||
DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false.
|
||||
DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false.
|
||||
|
|
@ -18,17 +21,24 @@ type StructCodecOptions struct {
|
|||
}
|
||||
|
||||
// StructCodec creates a new *StructCodecOptions
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
func StructCodec() *StructCodecOptions {
|
||||
return &StructCodecOptions{}
|
||||
}
|
||||
|
||||
// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead.
|
||||
func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions {
|
||||
t.DecodeZeroStruct = &b
|
||||
return t
|
||||
}
|
||||
|
||||
// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false.
|
||||
//
|
||||
// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0.
|
||||
func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions {
|
||||
t.DecodeDeepZeroInline = &b
|
||||
return t
|
||||
|
|
@ -36,6 +46,8 @@ func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions
|
|||
|
||||
// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all
|
||||
// its values set to their default value. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead.
|
||||
func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions {
|
||||
t.EncodeOmitDefaultStruct = &b
|
||||
return t
|
||||
|
|
@ -45,18 +57,26 @@ func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOpti
|
|||
// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when
|
||||
// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if
|
||||
// there are duplicate keys after the struct is inlined. Defaults to true.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead.
|
||||
func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions {
|
||||
t.OverwriteDuplicatedInlinedFields = &b
|
||||
return t
|
||||
}
|
||||
|
||||
// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false.
|
||||
//
|
||||
// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be
|
||||
// supported in Go Driver 2.0.
|
||||
func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions {
|
||||
t.AllowUnexportedFields = &b
|
||||
return t
|
||||
}
|
||||
|
||||
// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion.
|
||||
//
|
||||
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
|
||||
// single options struct instead.
|
||||
func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions {
|
||||
s := &StructCodecOptions{
|
||||
OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields,
|
||||
|
|
|
|||
11
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go
generated
vendored
11
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go
generated
vendored
|
|
@ -7,22 +7,33 @@
|
|||
package bsonoptions
|
||||
|
||||
// TimeCodecOptions represents all possible options for time.Time encoding and decoding.
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
type TimeCodecOptions struct {
|
||||
UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false.
|
||||
}
|
||||
|
||||
// TimeCodec creates a new *TimeCodecOptions
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
func TimeCodec() *TimeCodecOptions {
|
||||
return &TimeCodecOptions{}
|
||||
}
|
||||
|
||||
// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead.
|
||||
func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions {
|
||||
t.UseLocalTimeZone = &b
|
||||
return t
|
||||
}
|
||||
|
||||
// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion.
|
||||
//
|
||||
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
|
||||
// single options struct instead.
|
||||
func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions {
|
||||
t := TimeCodec()
|
||||
for _, opt := range opts {
|
||||
|
|
|
|||
11
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go
generated
vendored
11
common/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go
generated
vendored
|
|
@ -7,22 +7,33 @@
|
|||
package bsonoptions
|
||||
|
||||
// UIntCodecOptions represents all possible options for uint encoding and decoding.
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
type UIntCodecOptions struct {
|
||||
EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false.
|
||||
}
|
||||
|
||||
// UIntCodec creates a new *UIntCodecOptions
|
||||
//
|
||||
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
|
||||
// and unmarshal behavior instead.
|
||||
func UIntCodec() *UIntCodecOptions {
|
||||
return &UIntCodecOptions{}
|
||||
}
|
||||
|
||||
// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false.
|
||||
//
|
||||
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead.
|
||||
func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions {
|
||||
u.EncodeToMinSize = &b
|
||||
return u
|
||||
}
|
||||
|
||||
// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion.
|
||||
//
|
||||
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
|
||||
// single options struct instead.
|
||||
func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions {
|
||||
u := UIntCodec()
|
||||
for _, opt := range opts {
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue