mirror of https://github.com/containers/podman.git
vendor: update containers/image
Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
parent
c81c77109b
commit
7f29233a3f
26
go.mod
26
go.mod
|
@ -2,7 +2,7 @@ module github.com/containers/podman/v5
|
|||
|
||||
// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates
|
||||
|
||||
go 1.22.0
|
||||
go 1.22.6
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.4.0
|
||||
|
@ -16,7 +16,7 @@ require (
|
|||
github.com/containers/common v0.60.1-0.20240920125326-ff6611ae40ad
|
||||
github.com/containers/conmon v2.0.20+incompatible
|
||||
github.com/containers/gvisor-tap-vsock v0.7.5
|
||||
github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6
|
||||
github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46
|
||||
github.com/containers/libhvee v0.7.1
|
||||
github.com/containers/ocicrypt v1.2.0
|
||||
github.com/containers/psgo v1.9.0
|
||||
|
@ -86,7 +86,7 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.6 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
|
@ -107,7 +107,7 @@ require (
|
|||
github.com/containernetworking/cni v1.2.3 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/containers/luksy v0.0.0-20240618143119-a8846e21c08c // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.10.0 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
|
@ -122,7 +122,7 @@ require (
|
|||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/gin-gonic/gin v1.9.1 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.4 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
|
@ -145,7 +145,7 @@ require (
|
|||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-containerregistry v0.20.1 // indirect
|
||||
github.com/google/go-containerregistry v0.20.2 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
|
@ -158,7 +158,7 @@ require (
|
|||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||
|
@ -192,12 +192,12 @@ require (
|
|||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sigstore/fulcio v1.4.5 // indirect
|
||||
github.com/sigstore/fulcio v1.6.4 // indirect
|
||||
github.com/sigstore/rekor v1.3.6 // indirect
|
||||
github.com/sigstore/sigstore v1.8.4 // indirect
|
||||
github.com/sigstore/sigstore v1.8.9 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
|
||||
github.com/sylabs/sif/v2 v2.18.0 // indirect
|
||||
github.com/sylabs/sif/v2 v2.19.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
|
@ -218,10 +218,10 @@ require (
|
|||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
golang.org/x/arch v0.7.0 // indirect
|
||||
golang.org/x/mod v0.20.0 // indirect
|
||||
golang.org/x/oauth2 v0.22.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
golang.org/x/tools v0.24.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect
|
||||
google.golang.org/grpc v1.65.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
|
|
104
go.sum
104
go.sum
|
@ -1,6 +1,6 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
|
||||
|
@ -87,8 +87,8 @@ github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6J
|
|||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||
github.com/containers/gvisor-tap-vsock v0.7.5 h1:bTy4u3DOmmUPwurL6me2rsgfypAFDhyeJleUcQmBR/E=
|
||||
github.com/containers/gvisor-tap-vsock v0.7.5/go.mod h1:GW9jOqAEEGdaS20XwTYdm6KCYDHIulOE/yEEOabkoE4=
|
||||
github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6 h1:nXEEUAo8l2HLlMBy+LsHju2AikpA30jvlTSHbnjJXVw=
|
||||
github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6/go.mod h1:r//zsX8SjmVH0F87d+gakcgR4W5HTFGSgSLB4sufW6A=
|
||||
github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46 h1:eIwxm8+oAoTk+PDuOTbZRFG1DBF5tAlFO+niIamyzaM=
|
||||
github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46/go.mod h1:GgaW+YZJaJmcGtyPZNtsggfM4BBYIMfu/fFK62ZKU0o=
|
||||
github.com/containers/libhvee v0.7.1 h1:dWGF5GLq9DZvXo3P8aDp3cNieL5eCaSell4UmeA/jY4=
|
||||
github.com/containers/libhvee v0.7.1/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
|
@ -103,8 +103,8 @@ github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0 h1:0NNBYNpPF
|
|||
github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0/go.mod h1:Gx8WE9kURdCyEuB9cq8Kq5sRDRbpZi34lnOQ3zAGK2s=
|
||||
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
|
||||
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
|
||||
github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
|
||||
github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 h1:OoRAFlvDGCUqDLampLQjk0yeeSGdF9zzst/3G9IkBbc=
|
||||
|
@ -134,8 +134,8 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
|
|||
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE=
|
||||
github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
|
||||
github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
|
||||
|
@ -173,8 +173,8 @@ github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
|||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
|
||||
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
|
||||
github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E=
|
||||
github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
|
@ -211,12 +211,12 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn
|
|||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.17.0 h1:SmVVlfAOtlZncTxRuinDPomC2DkXJ4E5T9gDA0AIH74=
|
||||
github.com/go-playground/validator/v10 v10.17.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-rod/rod v0.116.0 h1:ypRryjTys3EnqHskJ/TdgodFMvXV0EHvmy4bSkKZgHM=
|
||||
github.com/go-rod/rod v0.116.0/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw=
|
||||
github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA=
|
||||
github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
|
||||
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=
|
||||
github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
|
@ -252,8 +252,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.20.1 h1:eTgx9QNYugV4DN5mz4U8hiAGTi1ybXn0TPi4Smd8du0=
|
||||
github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
|
||||
github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
|
||||
github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
|
||||
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
|
||||
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
@ -275,8 +275,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
|||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E=
|
||||
github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
|
@ -326,8 +326,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 h1:aiPrFdHDCCvigNBCkOWj2lv9Bx5xDp210OANZEoiP0I=
|
||||
github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0/go.mod h1:srVwm2N3DC/tWqQ+igZXDrmKlNRN8X/dmJ1wEZrv760=
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec h1:2tTW6cDth2TSgRbAhD7yjZzTQmcN25sDRPEeinR51yQ=
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec/go.mod h1:TmwEoGCwIti7BCeJ9hescZgRtatxRE+A72pCoPfmcfk=
|
||||
github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2 h1:DZMFueDbfz6PNc1GwDRA8+6lBx1TB9UnxDQliCqR73Y=
|
||||
github.com/linuxkit/virtsock v0.0.0-20220523201153-1a23e78aa7a2/go.mod h1:SWzULI85WerrFt3u+nIm5F9l7EvxZTKQvd0InF3nmgM=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
|
@ -384,6 +384,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
|||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
|
||||
github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
|
||||
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
|
@ -425,25 +427,25 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF
|
|||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
|
||||
github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
||||
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw=
|
||||
github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q=
|
||||
github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY=
|
||||
github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rootless-containers/rootlesskit/v2 v2.3.1 h1:wdYtdKxWFvVLby9ThMP6O6/v2q/GmOXbkRi+4m9nPW0=
|
||||
github.com/rootless-containers/rootlesskit/v2 v2.3.1/go.mod h1:tdtfS9ak4bGmwJRmcjsAzcHN5rJ3c5dB7yhSV10KTbk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
|
||||
github.com/sebdah/goldie/v2 v2.5.3/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||
github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
|
||||
github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||
github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
|
||||
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
|
||||
|
@ -458,12 +460,12 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt
|
|||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
||||
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
|
||||
github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc=
|
||||
github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8=
|
||||
github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY=
|
||||
github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
|
||||
github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
|
||||
github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
|
||||
github.com/sigstore/sigstore v1.8.4 h1:g4ICNpiENFnWxjmBzBDWUn62rNFeny/P77HUC8da32w=
|
||||
github.com/sigstore/sigstore v1.8.4/go.mod h1:1jIKtkTFEeISen7en+ZPWdDHazqhxco/+v9CNjc7oNg=
|
||||
github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk=
|
||||
github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
|
||||
|
@ -488,8 +490,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
|||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sylabs/sif/v2 v2.18.0 h1:eXugsS1qx7St2Wu/AJ21KnsQiVCpouPlTigABh+6KYI=
|
||||
github.com/sylabs/sif/v2 v2.18.0/go.mod h1:GOQj7LIBqp15fjqH5i8ZEbLp8SXJi9S+xbRO+QQAdRo=
|
||||
github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0=
|
||||
github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
||||
|
@ -530,12 +532,12 @@ github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ=
|
|||
github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns=
|
||||
github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
|
||||
github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18=
|
||||
github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s=
|
||||
github.com/ysmood/got v0.34.1/go.mod h1:yddyjq/PmAf08RMLSwDjPyCvHvYed+WjHnQxpH851LM=
|
||||
github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q=
|
||||
github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg=
|
||||
github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
|
||||
github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg=
|
||||
github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=
|
||||
github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
|
||||
github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU=
|
||||
github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
|
@ -554,8 +556,8 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIX
|
|||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
|
@ -564,8 +566,8 @@ go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBq
|
|||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
|
@ -610,8 +612,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
|||
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
|
||||
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -669,8 +671,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
|||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
@ -693,11 +695,11 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
|||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
|
|
|
@ -13,6 +13,9 @@
|
|||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Golang/Intellij
|
||||
.idea
|
||||
|
||||
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
|
||||
.glide/
|
||||
|
||||
|
|
|
@ -44,13 +44,21 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
|
|||
|
||||
## Status
|
||||
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
|
||||
No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases.
|
||||
|
||||
### Important notes
|
||||
|
||||
#### 1.0.0
|
||||
|
||||
In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`.
|
||||
In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released.
|
||||
|
||||
If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL:
|
||||
|
||||
```
|
||||
replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16
|
||||
```
|
||||
|
||||
#### 0.3.9
|
||||
|
||||
|
@ -64,55 +72,24 @@ If you were using Mergo before April 6th, 2015, please check your project works
|
|||
|
||||
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
|
||||
|
||||
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
|
||||
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
|
||||
<a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
|
||||
|
||||
### Mergo in the wild
|
||||
|
||||
- [moby/moby](https://github.com/moby/moby)
|
||||
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
- [vmware/dispatch](https://github.com/vmware/dispatch)
|
||||
- [Shopify/themekit](https://github.com/Shopify/themekit)
|
||||
- [imdario/zas](https://github.com/imdario/zas)
|
||||
- [matcornic/hermes](https://github.com/matcornic/hermes)
|
||||
- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
|
||||
- [kataras/iris](https://github.com/kataras/iris)
|
||||
- [michaelsauter/crane](https://github.com/michaelsauter/crane)
|
||||
- [go-task/task](https://github.com/go-task/task)
|
||||
- [sensu/uchiwa](https://github.com/sensu/uchiwa)
|
||||
- [ory/hydra](https://github.com/ory/hydra)
|
||||
- [sisatech/vcli](https://github.com/sisatech/vcli)
|
||||
- [dairycart/dairycart](https://github.com/dairycart/dairycart)
|
||||
- [projectcalico/felix](https://github.com/projectcalico/felix)
|
||||
- [resin-os/balena](https://github.com/resin-os/balena)
|
||||
- [go-kivik/kivik](https://github.com/go-kivik/kivik)
|
||||
- [Telefonica/govice](https://github.com/Telefonica/govice)
|
||||
- [supergiant/supergiant](supergiant/supergiant)
|
||||
- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
|
||||
- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
|
||||
- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
|
||||
- [EagerIO/Stout](https://github.com/EagerIO/Stout)
|
||||
- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
|
||||
- [russross/canvasassignments](https://github.com/russross/canvasassignments)
|
||||
- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
|
||||
- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
|
||||
- [divshot/gitling](https://github.com/divshot/gitling)
|
||||
- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
|
||||
- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
|
||||
- [elwinar/rambler](https://github.com/elwinar/rambler)
|
||||
- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
|
||||
- [jfbus/impressionist](https://github.com/jfbus/impressionist)
|
||||
- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
|
||||
- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
|
||||
- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
|
||||
- [thoas/picfit](https://github.com/thoas/picfit)
|
||||
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
|
||||
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
|
||||
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
|
||||
- [containerssh/containerssh](https://github.com/containerssh/containerssh)
|
||||
- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
|
||||
- [tjpnz/structbot](https://github.com/tjpnz/structbot)
|
||||
Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including:
|
||||
|
||||
* [containerd/containerd](https://github.com/containerd/containerd)
|
||||
* [datadog/datadog-agent](https://github.com/datadog/datadog-agent)
|
||||
* [docker/cli/](https://github.com/docker/cli/)
|
||||
* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
|
||||
* [go-micro/go-micro](https://github.com/go-micro/go-micro)
|
||||
* [grafana/loki](https://github.com/grafana/loki)
|
||||
* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
* [masterminds/sprig](github.com/Masterminds/sprig)
|
||||
* [moby/moby](https://github.com/moby/moby)
|
||||
* [slackhq/nebula](https://github.com/slackhq/nebula)
|
||||
* [volcano-sh/volcano](https://github.com/volcano-sh/volcano)
|
||||
|
||||
## Install
|
||||
|
||||
|
@ -141,6 +118,39 @@ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
|
|||
}
|
||||
```
|
||||
|
||||
If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"dario.cat/mergo"
|
||||
)
|
||||
|
||||
type Foo struct {
|
||||
A *string
|
||||
B int64
|
||||
}
|
||||
|
||||
func main() {
|
||||
first := "first"
|
||||
second := "second"
|
||||
src := Foo{
|
||||
A: &first,
|
||||
B: 2,
|
||||
}
|
||||
|
||||
dest := Foo{
|
||||
A: &second,
|
||||
B: 1,
|
||||
}
|
||||
|
||||
mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference)
|
||||
}
|
||||
```
|
||||
|
||||
Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
|
||||
|
||||
```go
|
||||
|
|
|
@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
|
|||
}
|
||||
fieldName := field.Name
|
||||
fieldName = changeInitialCase(fieldName, unicode.ToLower)
|
||||
if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
|
||||
if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue {
|
||||
dstMap[fieldName] = src.Field(i).Interface()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -269,7 +269,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
|||
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
} else if src.Elem().Kind() != reflect.Struct {
|
||||
if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
|
||||
dst.Set(src)
|
||||
}
|
||||
|
|
|
@ -52,6 +52,16 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
|
|||
}
|
||||
stream.reader = reader
|
||||
|
||||
if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName {
|
||||
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return bpDetectCompressionStepData{}, err
|
||||
}
|
||||
if tocDigest != nil {
|
||||
format = compression.ZstdChunked
|
||||
}
|
||||
|
||||
}
|
||||
res := bpDetectCompressionStepData{
|
||||
isCompressed: decompressor != nil,
|
||||
format: format,
|
||||
|
@ -71,13 +81,14 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
|
|||
|
||||
// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
|
||||
type bpCompressionStepData struct {
|
||||
operation bpcOperation // What we are actually doing
|
||||
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
|
||||
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
|
||||
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
|
||||
srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob.
|
||||
uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
|
||||
closers []io.Closer // Objects to close after the upload is done, if any.
|
||||
operation bpcOperation // What we are actually doing
|
||||
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
|
||||
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
|
||||
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
|
||||
srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob.
|
||||
uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob.
|
||||
uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob.
|
||||
closers []io.Closer // Objects to close after the upload is done, if any.
|
||||
}
|
||||
|
||||
type bpcOperation int
|
||||
|
@ -129,11 +140,12 @@ func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectComp
|
|||
// We can’t do anything with an encrypted blob unless decrypted.
|
||||
logrus.Debugf("Using original blob without modification for encrypted blob")
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpPreserveOpaque,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
|
||||
uploadedCompressorName: internalblobinfocache.UnknownCompression,
|
||||
operation: bpcOpPreserveOpaque,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
|
||||
uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
|
||||
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -157,14 +169,19 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
|
|||
Digest: "",
|
||||
Size: -1,
|
||||
}
|
||||
specificVariantName := uploadedAlgorithm.Name()
|
||||
if specificVariantName == uploadedAlgorithm.BaseVariantName() {
|
||||
specificVariantName = internalblobinfocache.UnknownCompression
|
||||
}
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpCompressUncompressed,
|
||||
uploadedOperation: types.Compress,
|
||||
uploadedAlgorithm: uploadedAlgorithm,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorName: uploadedAlgorithm.Name(),
|
||||
closers: []io.Closer{reader},
|
||||
operation: bpcOpCompressUncompressed,
|
||||
uploadedOperation: types.Compress,
|
||||
uploadedAlgorithm: uploadedAlgorithm,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(),
|
||||
uploadedCompressorSpecificVariantName: specificVariantName,
|
||||
closers: []io.Closer{reader},
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -197,15 +214,20 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
|
|||
Digest: "",
|
||||
Size: -1,
|
||||
}
|
||||
specificVariantName := ic.compressionFormat.Name()
|
||||
if specificVariantName == ic.compressionFormat.BaseVariantName() {
|
||||
specificVariantName = internalblobinfocache.UnknownCompression
|
||||
}
|
||||
succeeded = true
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpRecompressCompressed,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: ic.compressionFormat,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorName: ic.compressionFormat.Name(),
|
||||
closers: []io.Closer{decompressed, recompressed},
|
||||
operation: bpcOpRecompressCompressed,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: ic.compressionFormat,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(),
|
||||
uploadedCompressorSpecificVariantName: specificVariantName,
|
||||
closers: []io.Closer{decompressed, recompressed},
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -226,12 +248,13 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
|
|||
Size: -1,
|
||||
}
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpDecompressCompressed,
|
||||
uploadedOperation: types.Decompress,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorName: internalblobinfocache.Uncompressed,
|
||||
closers: []io.Closer{s},
|
||||
operation: bpcOpDecompressCompressed,
|
||||
uploadedOperation: types.Decompress,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed,
|
||||
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
|
||||
closers: []io.Closer{s},
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -276,7 +299,8 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom
|
|||
// We only record the base variant of the format on upload; we didn’t do anything with
|
||||
// the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger
|
||||
// reuse of any kind between the blob digest and the TOC digest.
|
||||
uploadedCompressorName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -336,24 +360,16 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
|||
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
|
||||
}
|
||||
}
|
||||
if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorName == "" {
|
||||
return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded: %q)",
|
||||
d.srcCompressorBaseVariantName, d.uploadedCompressorName)
|
||||
if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" {
|
||||
return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)",
|
||||
d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName)
|
||||
}
|
||||
if d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Don’t record zstd:chunked algorithms.
|
||||
// There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions,
|
||||
// and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
|
||||
//
|
||||
// We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate
|
||||
// between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName
|
||||
// with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about
|
||||
// inconsistent data to be logged.
|
||||
c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: d.uploadedCompressorName,
|
||||
})
|
||||
}
|
||||
if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
|
||||
c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: d.uploadedCompressorBaseVariantName,
|
||||
SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName,
|
||||
SpecificVariantAnnotations: d.uploadedAnnotations,
|
||||
})
|
||||
}
|
||||
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
|
||||
d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
|
||||
|
@ -361,7 +377,9 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
|||
// blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest,
|
||||
// so record neither the variant name, nor the TOC digest.
|
||||
c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: d.srcCompressorBaseVariantName,
|
||||
BaseVariantCompressor: d.srcCompressorBaseVariantName,
|
||||
SpecificVariantCompressor: internalblobinfocache.UnknownCompression,
|
||||
SpecificVariantAnnotations: nil,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -193,35 +193,33 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
|||
reportWriter = options.ReportWriter
|
||||
}
|
||||
|
||||
// safeClose amends retErr with an error from c.Close(), if any.
|
||||
safeClose := func(name string, c io.Closer) {
|
||||
err := c.Close()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
// Do not use %w for err as we don't want it to be unwrapped by callers.
|
||||
if retErr != nil {
|
||||
retErr = fmt.Errorf(" (%s: %s): %w", name, err.Error(), retErr)
|
||||
} else {
|
||||
retErr = fmt.Errorf(" (%s: %s)", name, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err)
|
||||
}
|
||||
dest := imagedestination.FromPublic(publicDest)
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
if retErr != nil {
|
||||
retErr = fmt.Errorf(" (dest: %v): %w", err, retErr)
|
||||
} else {
|
||||
retErr = fmt.Errorf(" (dest: %v)", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer safeClose("dest", dest)
|
||||
|
||||
publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err)
|
||||
}
|
||||
rawSource := imagesource.FromPublic(publicRawSource)
|
||||
defer func() {
|
||||
if err := rawSource.Close(); err != nil {
|
||||
if retErr != nil {
|
||||
retErr = fmt.Errorf(" (src: %v): %w", err, retErr)
|
||||
} else {
|
||||
retErr = fmt.Errorf(" (src: %v)", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer safeClose("src", rawSource)
|
||||
|
||||
// If reportWriter is not a TTY (e.g., when piping to a file), do not
|
||||
// print the progress bars to avoid long and hard to parse output.
|
||||
|
|
|
@ -24,13 +24,18 @@ func (c *copier) newProgressPool() *mpb.Progress {
|
|||
|
||||
// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
|
||||
func customPartialBlobDecorFunc(s decor.Statistics) string {
|
||||
current := decor.SizeB1024(s.Current)
|
||||
total := decor.SizeB1024(s.Total)
|
||||
refill := decor.SizeB1024(s.Refill)
|
||||
if s.Total == 0 {
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f)"
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
|
||||
return fmt.Sprintf("%.1f / %.1f (skipped: %.1f)", current, total, refill)
|
||||
}
|
||||
// If we didn't do a partial fetch then let's not output a distracting ("skipped: 0.0b = 0.00%")
|
||||
if s.Refill == 0 {
|
||||
return fmt.Sprintf("%.1f / %.1f", current, total)
|
||||
}
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
|
||||
percentage := 100.0 * float64(s.Refill) / float64(s.Total)
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
|
||||
return fmt.Sprintf("%.1f / %.1f (skipped: %.1f = %.2f%%)", current, total, refill, percentage)
|
||||
}
|
||||
|
||||
// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods.
|
||||
|
|
|
@ -106,7 +106,7 @@ func (c *copier) createSignatures(ctx context.Context, manifest []byte, identity
|
|||
if len(c.signers) == 1 {
|
||||
return nil, fmt.Errorf("creating signature: %w", err)
|
||||
} else {
|
||||
return nil, fmt.Errorf("creating signature %d: %w", signerIndex, err)
|
||||
return nil, fmt.Errorf("creating signature %d: %w", signerIndex+1, err)
|
||||
}
|
||||
}
|
||||
res = append(res, newSig)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
|
@ -162,7 +163,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
|||
if format == nil {
|
||||
format = defaultCompressionFormat
|
||||
}
|
||||
if format.Name() == compression.ZstdChunked.Name() {
|
||||
if format.Name() == compressiontypes.ZstdChunkedAlgorithmName {
|
||||
if ic.requireCompressionFormatMatch {
|
||||
return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead")
|
||||
}
|
||||
|
@ -322,10 +323,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
|||
if err != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", err)
|
||||
}
|
||||
wantedPlatforms, err := platform.WantedPlatforms(sys)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting current platform information %#v: %w", sys, err)
|
||||
}
|
||||
wantedPlatforms := platform.WantedPlatforms(sys)
|
||||
|
||||
options := newOrderedSet()
|
||||
match := false
|
||||
|
@ -888,21 +886,33 @@ func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.Reuse
|
|||
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
|
||||
// of the generic code in this package.
|
||||
res := types.BlobInfo{
|
||||
Digest: reusedBlob.Digest,
|
||||
Size: reusedBlob.Size,
|
||||
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
|
||||
Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
|
||||
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
|
||||
Digest: reusedBlob.Digest,
|
||||
Size: reusedBlob.Size,
|
||||
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
|
||||
// FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isn’t
|
||||
// (but those annotations being left with incorrect values should not break pulls).
|
||||
Annotations: maps.Clone(inputInfo.Annotations),
|
||||
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
|
||||
CompressionOperation: reusedBlob.CompressionOperation,
|
||||
CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
|
||||
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.
|
||||
}
|
||||
// The transport is only expected to fill CompressionOperation and CompressionAlgorithm
|
||||
// if the blob was substituted; otherwise, fill it in based
|
||||
// if the blob was substituted; otherwise, it is optional, and if not set, fill it in based
|
||||
// on what we know from the srcInfos we were given.
|
||||
if reusedBlob.Digest == inputInfo.Digest {
|
||||
res.CompressionOperation = inputInfo.CompressionOperation
|
||||
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
|
||||
if res.CompressionOperation == types.PreserveOriginal {
|
||||
res.CompressionOperation = inputInfo.CompressionOperation
|
||||
}
|
||||
if res.CompressionAlgorithm == nil {
|
||||
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
|
||||
}
|
||||
}
|
||||
if len(reusedBlob.CompressionAnnotations) != 0 {
|
||||
if res.Annotations == nil {
|
||||
res.Annotations = map[string]string{}
|
||||
}
|
||||
maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"math/rand/v2"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
@ -158,7 +158,7 @@ func (br *bodyReader) Read(p []byte) (int, error) {
|
|||
logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise
|
||||
}
|
||||
br.body = nil
|
||||
time.Sleep(1*time.Second + time.Duration(rand.Intn(100_000))*time.Microsecond) // Some jitter so that a failure blip doesn’t cause a deterministic stampede
|
||||
time.Sleep(1*time.Second + rand.N(100_000*time.Microsecond)) // Some jitter so that a failure blip doesn’t cause a deterministic stampede
|
||||
|
||||
headers := map[string][]string{
|
||||
"Range": {fmt.Sprintf("bytes=%d-", br.offset)},
|
||||
|
|
|
@ -80,6 +80,7 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
|
|||
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: tlsc,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
|
@ -89,6 +90,7 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
|
|||
func httpConfig() *http.Client {
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: nil,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
|
|
|
@ -91,6 +91,12 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
|||
}
|
||||
for _, tag := range tagsHolder.Tags {
|
||||
if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values
|
||||
// Per https://github.com/containers/skopeo/issues/2409 , Sonatype Nexus 3.58, contrary
|
||||
// to the spec, may include JSON null values in the list; and Go silently parses them as "".
|
||||
if tag == "" {
|
||||
logrus.Debugf("Ignoring invalid empty tag")
|
||||
continue
|
||||
}
|
||||
// Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory,
|
||||
// contrary to the tag format specified in
|
||||
// https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 ,
|
||||
|
|
|
@ -332,6 +332,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||
}
|
||||
|
||||
originalCandidateKnownToBeMissing := false
|
||||
if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
|
||||
|
@ -341,9 +342,17 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
if haveBlob {
|
||||
return true, reusedInfo, nil
|
||||
}
|
||||
originalCandidateKnownToBeMissing = true
|
||||
} else {
|
||||
logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v",
|
||||
optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
|
||||
// We can get here with a blob detected to be zstd when the user wants a zstd:chunked.
|
||||
// In that case we keep originalCandiateKnownToBeMissing = false, so that if we find
|
||||
// a BIC entry for this blob, we do use that entry and return a zstd:chunked entry
|
||||
// with the BIC’s annotations.
|
||||
// This is not quite correct, it only works if the BIC also contains an acceptable _location_.
|
||||
// Ideally, we could look up just the compression algorithm/annotations for info.digest,
|
||||
// and use it even if no location candidate exists and the original dandidate is present.
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
|
@ -387,7 +396,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
// for it in the current repo.
|
||||
candidateRepo = reference.TrimNamed(d.ref.ref)
|
||||
}
|
||||
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
if originalCandidateKnownToBeMissing &&
|
||||
candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
logrus.Debug("... Already tried the primary destination")
|
||||
continue
|
||||
}
|
||||
|
@ -427,10 +437,12 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
|
||||
|
||||
return true, private.ReusedBlob{
|
||||
Digest: candidate.Digest,
|
||||
Size: size,
|
||||
CompressionOperation: candidate.CompressionOperation,
|
||||
CompressionAlgorithm: candidate.CompressionAlgorithm}, nil
|
||||
Digest: candidate.Digest,
|
||||
Size: size,
|
||||
CompressionOperation: candidate.CompressionOperation,
|
||||
CompressionAlgorithm: candidate.CompressionAlgorithm,
|
||||
CompressionAnnotations: candidate.CompressionAnnotations,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return false, private.ReusedBlob{}, nil
|
||||
|
|
|
@ -116,10 +116,10 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
|||
// Don’t just build a string, try to preserve the typed error.
|
||||
primary := &attempts[len(attempts)-1]
|
||||
extras := []string{}
|
||||
for i := 0; i < len(attempts)-1; i++ {
|
||||
for _, attempt := range attempts[:len(attempts)-1] {
|
||||
// This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use.
|
||||
// The paired [] at least have some chance of being unambiguous.
|
||||
extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err))
|
||||
extras = append(extras, fmt.Sprintf("[%s: %v]", attempt.ref.String(), attempt.err))
|
||||
}
|
||||
return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err)
|
||||
}
|
||||
|
@ -464,26 +464,20 @@ func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanc
|
|||
var res []signature.Signature
|
||||
switch {
|
||||
case s.c.supportsSignatures:
|
||||
sigs, err := s.getSignaturesFromAPIExtension(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
if err := s.appendSignaturesFromAPIExtension(ctx, &res, instanceDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, sigs...)
|
||||
case s.c.signatureBase != nil:
|
||||
sigs, err := s.getSignaturesFromLookaside(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
if err := s.appendSignaturesFromLookaside(ctx, &res, instanceDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, sigs...)
|
||||
default:
|
||||
return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration")
|
||||
}
|
||||
|
||||
sigstoreSigs, err := s.getSignaturesFromSigstoreAttachments(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
if err := s.appendSignaturesFromSigstoreAttachments(ctx, &res, instanceDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, sigstoreSigs...)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
|
@ -505,35 +499,35 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *
|
|||
return manifest.Digest(s.cachedManifest)
|
||||
}
|
||||
|
||||
// getSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
|
||||
// which is not nil.
|
||||
func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
|
||||
// appendSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase,
|
||||
// which is not nil, storing the signatures to *dest.
|
||||
// On error, the contents of *dest are undefined.
|
||||
func (s *dockerImageSource) appendSignaturesFromLookaside(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
|
||||
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
signatures := []signature.Signature{}
|
||||
for i := 0; ; i++ {
|
||||
if i >= maxLookasideSignatures {
|
||||
return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
|
||||
return fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
|
||||
}
|
||||
|
||||
sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
signature, missing, err := s.getOneSignature(ctx, sigURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if missing {
|
||||
break
|
||||
}
|
||||
signatures = append(signatures, signature)
|
||||
*dest = append(*dest, signature)
|
||||
}
|
||||
return signatures, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// getOneSignature downloads one signature from sigURL, and returns (signature, false, nil)
|
||||
|
@ -596,48 +590,51 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL
|
|||
}
|
||||
}
|
||||
|
||||
// getSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension.
|
||||
func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
|
||||
// appendSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension,
|
||||
// storing the signatures to *dest.
|
||||
// On error, the contents of *dest are undefined.
|
||||
func (s *dockerImageSource) appendSignaturesFromAPIExtension(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
|
||||
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
var sigs []signature.Signature
|
||||
for _, sig := range parsedBody.Signatures {
|
||||
if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
|
||||
sigs = append(sigs, signature.SimpleSigningFromBlob(sig.Content))
|
||||
*dest = append(*dest, signature.SimpleSigningFromBlob(sig.Content))
|
||||
}
|
||||
}
|
||||
return sigs, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
|
||||
// appendSignaturesFromSigstoreAttachments implements GetSignaturesWithFormat() using the sigstore tag convention,
|
||||
// storing the signatures to *dest.
|
||||
// On error, the contents of *dest are undefined.
|
||||
func (s *dockerImageSource) appendSignaturesFromSigstoreAttachments(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error {
|
||||
if !s.c.useSigstoreAttachments {
|
||||
logrus.Debugf("Not looking for sigstore attachments: disabled by configuration")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
manifestDigest, err := s.manifestDigest(ctx, instanceDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if ociManifest == nil {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers))
|
||||
res := []signature.Signature{}
|
||||
for layerIndex, layer := range ociManifest.Layers {
|
||||
// Note that this copies all kinds of attachments: attestations, and whatever else is there,
|
||||
// not just signatures. We leave the signature consumers to decide based on the MIME type.
|
||||
|
@ -648,11 +645,11 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con
|
|||
payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize,
|
||||
none.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
res = append(res, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
|
||||
*dest = append(*dest, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations))
|
||||
}
|
||||
return res, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteImage deletes the named image from the registry, if supported.
|
||||
|
@ -830,7 +827,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint)
|
|||
handleBufferedNetworkReader(&br)
|
||||
}()
|
||||
|
||||
for i := uint(0); i < nBuffers; i++ {
|
||||
for range nBuffers {
|
||||
b := bufferedNetworkReaderBuffer{
|
||||
data: make([]byte, bufferSize),
|
||||
}
|
||||
|
|
|
@ -37,8 +37,11 @@ type BlobInfoCache2 interface {
|
|||
|
||||
// RecordDigestCompressorData records data for the blob with the specified digest.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data:
|
||||
// - don’t record a compressor for a digest just because some remote author claims so
|
||||
// (e.g. because a manifest says so);
|
||||
// - don’t record a compressor for a digest just because some remote author claims so
|
||||
// (e.g. because a manifest says so);
|
||||
// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
|
||||
// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
|
||||
// in a manifest)
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData)
|
||||
|
@ -52,6 +55,9 @@ type BlobInfoCache2 interface {
|
|||
// (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.)
|
||||
type DigestCompressorData struct {
|
||||
BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression.
|
||||
// The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression:
|
||||
SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant)
|
||||
SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant.
|
||||
}
|
||||
|
||||
// CandidateLocations2Options are used in CandidateLocations2.
|
||||
|
@ -66,9 +72,10 @@ type CandidateLocations2Options struct {
|
|||
|
||||
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
|
||||
type BICReplacementCandidate2 struct {
|
||||
Digest digest.Digest
|
||||
CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
Digest digest.Digest
|
||||
CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
}
|
||||
|
|
|
@ -76,6 +76,9 @@ func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.Blob
|
|||
Size: blob.Size,
|
||||
CompressionOperation: blob.CompressionOperation,
|
||||
CompressionAlgorithm: blob.CompressionAlgorithm,
|
||||
// CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated
|
||||
// annotations, and we didn’t use the blob.Annotations field previously, so we’ll
|
||||
// continue not using it.
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -152,10 +152,7 @@ func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemCont
|
|||
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
|
||||
// of the image which is appropriate for the current environment.
|
||||
func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
|
||||
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
|
||||
}
|
||||
wantedPlatforms := platform.WantedPlatforms(ctx)
|
||||
for _, wantedPlatform := range wantedPlatforms {
|
||||
for _, d := range list.Manifests {
|
||||
imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform)
|
||||
|
|
|
@ -205,11 +205,6 @@ type ReuseConditions struct {
|
|||
// (which can be nil to represent uncompressed or unknown) matches reuseConditions.
|
||||
func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
|
||||
if c.RequiredCompression != nil {
|
||||
if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
if candidateCompression == nil ||
|
||||
(c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
|
||||
return false
|
||||
|
|
|
@ -236,10 +236,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
|
|||
if preferGzip == types.OptionalBoolTrue {
|
||||
didPreferGzip = true
|
||||
}
|
||||
wantedPlatforms, err := platform.WantedPlatforms(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
|
||||
}
|
||||
wantedPlatforms := platform.WantedPlatforms(ctx)
|
||||
var bestMatch *instanceCandidate
|
||||
bestMatch = nil
|
||||
for manifestIndex, d := range index.Manifests {
|
||||
|
|
4
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
4
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
|
@ -153,7 +153,7 @@ var compatibility = map[string][]string{
|
|||
// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user,
|
||||
// the most compatible platform is first.
|
||||
// If some option (arch, os, variant) is not present, a value from current platform is detected.
|
||||
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
||||
func WantedPlatforms(ctx *types.SystemContext) []imgspecv1.Platform {
|
||||
// Note that this does not use Platform.OSFeatures and Platform.OSVersion at all.
|
||||
// The fields are not specified by the OCI specification, as of version 1.1, usefully enough
|
||||
// to be interoperable, anyway.
|
||||
|
@ -211,7 +211,7 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
|||
Variant: v,
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
return res
|
||||
}
|
||||
|
||||
// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches
|
||||
|
|
|
@ -134,9 +134,14 @@ type ReusedBlob struct {
|
|||
Size int64 // Must be provided
|
||||
// The following compression fields should be set when the reuse substitutes
|
||||
// a differently-compressed blob.
|
||||
// They may be set also to change from a base variant to a specific variant of an algorithm.
|
||||
CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
|
||||
CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
|
||||
|
||||
// Annotations that should be added, for CompressionAlgorithm. Note that they might need to be
|
||||
// added even if the digest doesn’t change (if we found the annotations in a cache).
|
||||
CompressionAnnotations map[string]string
|
||||
|
||||
MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes.
|
||||
}
|
||||
|
||||
|
|
|
@ -318,20 +318,20 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
|
|||
// Add the history and rootfs information.
|
||||
rootfs, err := json.Marshal(rootFS)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
|
||||
return nil, fmt.Errorf("error encoding rootfs information %#v: %w", rootFS, err)
|
||||
}
|
||||
rawRootfs := json.RawMessage(rootfs)
|
||||
raw["rootfs"] = &rawRootfs
|
||||
history, err := json.Marshal(convertedHistory)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encoding history information %#v: %v", convertedHistory, err)
|
||||
return nil, fmt.Errorf("error encoding history information %#v: %w", convertedHistory, err)
|
||||
}
|
||||
rawHistory := json.RawMessage(history)
|
||||
raw["history"] = &rawHistory
|
||||
// Encode the result.
|
||||
config, err = json.Marshal(raw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error re-encoding compat image config %#v: %v", s1, err)
|
||||
return nil, fmt.Errorf("error re-encoding compat image config %#v: %w", s1, err)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) {
|
|||
if err := json.Unmarshal(manifestBlob, &oci1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex,
|
||||
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageManifest,
|
||||
manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -27,17 +27,8 @@ func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContex
|
|||
return err
|
||||
}
|
||||
|
||||
var blobsUsedByImage map[digest.Digest]int
|
||||
|
||||
switch descriptor.MediaType {
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir)
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir)
|
||||
default:
|
||||
return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
|
||||
}
|
||||
if err != nil {
|
||||
blobsUsedByImage := make(map[digest.Digest]int)
|
||||
if err := ref.countBlobsForDescriptor(blobsUsedByImage, &descriptor, sharedBlobsDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -54,80 +45,46 @@ func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContex
|
|||
return ref.deleteReferenceFromIndex(descriptorIndex)
|
||||
}
|
||||
|
||||
func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
|
||||
manifest, err := ref.getManifest(descriptor, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest)
|
||||
blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference
|
||||
|
||||
return blobsUsedInManifest, nil
|
||||
}
|
||||
|
||||
func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
|
||||
// countBlobsForDescriptor updates dest with usage counts of blobs required for descriptor, INCLUDING descriptor itself.
|
||||
func (ref ociReference) countBlobsForDescriptor(dest map[digest.Digest]int, descriptor *imgspecv1.Descriptor, sharedBlobsDir string) error {
|
||||
blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index, err := parseIndex(blobPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
blobsUsedInImageRefIndex := make(map[digest.Digest]int)
|
||||
err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference
|
||||
|
||||
return blobsUsedInImageRefIndex, nil
|
||||
}
|
||||
|
||||
// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map
|
||||
func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error {
|
||||
for _, descriptor := range index.Manifests {
|
||||
destination[descriptor.Digest]++
|
||||
switch descriptor.MediaType {
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
manifest, err := ref.getManifest(&descriptor, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for digest, count := range ref.getBlobsUsedInManifest(manifest) {
|
||||
destination[digest] += count
|
||||
}
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
index, err := parseIndex(blobPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
|
||||
dest[descriptor.Digest]++
|
||||
switch descriptor.MediaType {
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
manifest, err := parseJSON[imgspecv1.Manifest](blobPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest[manifest.Config.Digest]++
|
||||
for _, layer := range manifest.Layers {
|
||||
dest[layer.Digest]++
|
||||
}
|
||||
case imgspecv1.MediaTypeImageIndex:
|
||||
index, err := parseIndex(blobPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ref.countBlobsReferencedByIndex(dest, index, sharedBlobsDir); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int {
|
||||
blobsUsedInManifest := make(map[digest.Digest]int, 0)
|
||||
|
||||
blobsUsedInManifest[manifest.Config.Digest]++
|
||||
for _, layer := range manifest.Layers {
|
||||
blobsUsedInManifest[layer.Digest]++
|
||||
// countBlobsReferencedByIndex updates dest with usage counts of blobs required for index, EXCLUDING the index itself.
|
||||
func (ref ociReference) countBlobsReferencedByIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error {
|
||||
for _, descriptor := range index.Manifests {
|
||||
if err := ref.countBlobsForDescriptor(destination, &descriptor, sharedBlobsDir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return blobsUsedInManifest
|
||||
return nil
|
||||
}
|
||||
|
||||
// This takes in a map of the digest and their usage count in the manifest to be deleted
|
||||
|
@ -138,7 +95,7 @@ func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[diges
|
|||
return nil, err
|
||||
}
|
||||
blobsUsedInRootIndex := make(map[digest.Digest]int)
|
||||
err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir)
|
||||
err = ref.countBlobsReferencedByIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -224,17 +181,3 @@ func saveJSON(path string, content any) error {
|
|||
|
||||
return json.NewEncoder(file).Encode(content)
|
||||
}
|
||||
|
||||
func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) {
|
||||
manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
manifest, err := parseJSON[imgspecv1.Manifest](manifestPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return manifest, nil
|
||||
}
|
||||
|
|
|
@ -365,7 +365,7 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err
|
|||
if len(clusterInfo.CertificateAuthority) != 0 {
|
||||
err := validateFileIsReadable(clusterInfo.CertificateAuthority)
|
||||
if err != nil {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -403,13 +403,13 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
|
|||
if len(authInfo.ClientCertificate) != 0 {
|
||||
err := validateFileIsReadable(authInfo.ClientCertificate)
|
||||
if err != nil {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err))
|
||||
}
|
||||
}
|
||||
if len(authInfo.ClientKey) != 0 {
|
||||
err := validateFileIsReadable(authInfo.ClientKey)
|
||||
if err != nil {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,9 +28,10 @@ const replacementUnknownLocationAttempts = 2
|
|||
// CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest,
|
||||
// which can be later combined with information about a location.
|
||||
type CandidateTemplate struct {
|
||||
digest digest.Digest
|
||||
compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
digest digest.Digest
|
||||
compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm
|
||||
}
|
||||
|
||||
// CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable
|
||||
|
@ -40,7 +41,7 @@ type CandidateTemplate struct {
|
|||
// if not nil, the call is assumed to be CandidateLocations2.
|
||||
func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate {
|
||||
if v2Options == nil {
|
||||
return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm values are not used.
|
||||
return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used.
|
||||
digest: digest,
|
||||
}
|
||||
}
|
||||
|
@ -60,14 +61,40 @@ func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocation
|
|||
return nil
|
||||
}
|
||||
return &CandidateTemplate{
|
||||
digest: digest,
|
||||
compressionOperation: types.Decompress,
|
||||
compressionAlgorithm: nil,
|
||||
digest: digest,
|
||||
compressionOperation: types.Decompress,
|
||||
compressionAlgorithm: nil,
|
||||
compressionAnnotations: nil,
|
||||
}
|
||||
case blobinfocache.UnknownCompression:
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String())
|
||||
return nil // Not allowed with CandidateLocations2
|
||||
default:
|
||||
// See if we can use the specific variant, first.
|
||||
if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
|
||||
algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor)
|
||||
if err != nil {
|
||||
logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v",
|
||||
data.SpecificVariantCompressor, digest.String(), err)
|
||||
} else {
|
||||
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: v2Options.PossibleManifestFormats,
|
||||
RequiredCompression: v2Options.RequiredCompression,
|
||||
}, &algo) {
|
||||
logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v",
|
||||
data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats)
|
||||
} else {
|
||||
return &CandidateTemplate{
|
||||
digest: digest,
|
||||
compressionOperation: types.Compress,
|
||||
compressionAlgorithm: &algo,
|
||||
compressionAnnotations: data.SpecificVariantAnnotations,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try the base variant.
|
||||
algo, err := compression.AlgorithmByName(data.BaseVariantCompressor)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v",
|
||||
|
@ -83,9 +110,10 @@ func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocation
|
|||
return nil
|
||||
}
|
||||
return &CandidateTemplate{
|
||||
digest: digest,
|
||||
compressionOperation: types.Compress,
|
||||
compressionAlgorithm: &algo,
|
||||
digest: digest,
|
||||
compressionOperation: types.Compress,
|
||||
compressionAlgorithm: &algo,
|
||||
compressionAnnotations: nil,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -100,11 +128,12 @@ type CandidateWithTime struct {
|
|||
func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime {
|
||||
return CandidateWithTime{
|
||||
candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: template.digest,
|
||||
CompressionOperation: template.compressionOperation,
|
||||
CompressionAlgorithm: template.compressionAlgorithm,
|
||||
UnknownLocation: false,
|
||||
Location: location,
|
||||
Digest: template.digest,
|
||||
CompressionOperation: template.compressionOperation,
|
||||
CompressionAlgorithm: template.compressionAlgorithm,
|
||||
CompressionAnnotations: template.compressionAnnotations,
|
||||
UnknownLocation: false,
|
||||
Location: location,
|
||||
},
|
||||
lastSeen: lastSeen,
|
||||
}
|
||||
|
@ -114,11 +143,12 @@ func (template CandidateTemplate) CandidateWithLocation(location types.BICLocati
|
|||
func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime {
|
||||
return CandidateWithTime{
|
||||
candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: template.digest,
|
||||
CompressionOperation: template.compressionOperation,
|
||||
CompressionAlgorithm: template.compressionAlgorithm,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
Digest: template.digest,
|
||||
CompressionOperation: template.compressionOperation,
|
||||
CompressionAlgorithm: template.compressionAlgorithm,
|
||||
CompressionAnnotations: template.compressionAnnotations,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
lastSeen: time.Time{},
|
||||
}
|
||||
|
@ -170,8 +200,6 @@ func (css *candidateSortState) compare(xi, xj CandidateWithTime) int {
|
|||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
|
||||
// number of entries to limit for known and unknown location separately, only to make testing simpler.
|
||||
// TODO: following function is not destructive any more in the nature instead prioritized result is actually copies of the original
|
||||
// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
|
||||
// split unknown candidates and known candidates
|
||||
// and limit them separately.
|
||||
|
|
|
@ -28,7 +28,7 @@ type cache struct {
|
|||
uncompressedDigestsByTOC map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest
|
||||
compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression
|
||||
}
|
||||
|
||||
// New returns a BlobInfoCache implementation which is in-memory only.
|
||||
|
@ -49,7 +49,7 @@ func new2() *cache {
|
|||
uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
compressors: map[digest.Digest]string{},
|
||||
compressors: map[digest.Digest]blobinfocache.DigestCompressorData{},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -148,20 +148,36 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type
|
|||
// WARNING: Only call this with LOCALLY VERIFIED data:
|
||||
// - don’t record a compressor for a digest just because some remote author claims so
|
||||
// (e.g. because a manifest says so);
|
||||
// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
|
||||
// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
|
||||
// in a manifest)
|
||||
//
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if previous, ok := mem.compressors[anyDigest]; ok && previous != data.BaseVariantCompressor {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor)
|
||||
if previous, ok := mem.compressors[anyDigest]; ok {
|
||||
if previous.BaseVariantCompressor != data.BaseVariantCompressor {
|
||||
logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor)
|
||||
} else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression &&
|
||||
previous.SpecificVariantCompressor != data.SpecificVariantCompressor {
|
||||
logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor)
|
||||
}
|
||||
// We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic.
|
||||
|
||||
// Preserve specific variant information if the incoming data does not have it.
|
||||
if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression &&
|
||||
previous.SpecificVariantCompressor != blobinfocache.UnknownCompression {
|
||||
data.SpecificVariantCompressor = previous.SpecificVariantCompressor
|
||||
data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations
|
||||
}
|
||||
}
|
||||
if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
|
||||
delete(mem.compressors, anyDigest)
|
||||
return
|
||||
}
|
||||
mem.compressors[anyDigest] = data.BaseVariantCompressor
|
||||
mem.compressors[anyDigest] = data
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
|
||||
|
@ -171,13 +187,15 @@ func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi
|
|||
// with unknown compression.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
if v, ok := mem.compressors[digest]; ok {
|
||||
compressorName = v
|
||||
compressionData := blobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantAnnotations: nil,
|
||||
}
|
||||
template := prioritize.CandidateTemplateWithCompression(v2Options, digest, blobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: compressorName,
|
||||
})
|
||||
if v, ok := mem.compressors[digest]; ok {
|
||||
compressionData = v
|
||||
}
|
||||
template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
|
||||
if template == nil {
|
||||
return candidates
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package sqlite
|
|||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
@ -303,6 +304,16 @@ func ensureDBHasCurrentSchema(db *sql.DB) error {
|
|||
`uncompressedDigest TEXT NOT NULL
|
||||
)`,
|
||||
},
|
||||
{
|
||||
"DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors.
|
||||
`CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` +
|
||||
// index implied by PRIMARY KEY
|
||||
`digest TEXT PRIMARY KEY NOT NULL,` +
|
||||
// The compressor is not `UnknownCompression`.
|
||||
`specificVariantCompressor TEXT NOT NULL,
|
||||
specificVariantAnnotations BLOB NOT NULL
|
||||
)`,
|
||||
},
|
||||
}
|
||||
|
||||
_, err := dbTransaction(db, func(tx *sql.Tx) (void, error) {
|
||||
|
@ -461,6 +472,9 @@ func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope type
|
|||
// WARNING: Only call this with LOCALLY VERIFIED data:
|
||||
// - don’t record a compressor for a digest just because some remote author claims so
|
||||
// (e.g. because a manifest says so);
|
||||
// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
|
||||
// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
|
||||
// in a manifest)
|
||||
//
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
|
@ -468,21 +482,46 @@ func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi
|
|||
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
|
||||
previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String())
|
||||
if err != nil {
|
||||
return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest)
|
||||
return void{}, fmt.Errorf("looking for compressor of %q", anyDigest)
|
||||
}
|
||||
warned := false
|
||||
if gotPrevious && previous != data.BaseVariantCompressor {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor)
|
||||
warned = true
|
||||
}
|
||||
if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
|
||||
if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil {
|
||||
return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err)
|
||||
}
|
||||
if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil {
|
||||
return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)",
|
||||
anyDigest.String(), data.BaseVariantCompressor); err != nil {
|
||||
return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err)
|
||||
}
|
||||
}
|
||||
|
||||
if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
|
||||
if !warned { // Don’t warn twice about the same digest
|
||||
prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String())
|
||||
if err != nil {
|
||||
return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest)
|
||||
}
|
||||
if found && data.SpecificVariantCompressor != prevSVC {
|
||||
logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor)
|
||||
}
|
||||
}
|
||||
annotations, err := json.Marshal(data.SpecificVariantAnnotations)
|
||||
if err != nil {
|
||||
return void{}, err
|
||||
}
|
||||
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)",
|
||||
anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil {
|
||||
return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err)
|
||||
}
|
||||
}
|
||||
return void{}, nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
@ -493,19 +532,32 @@ func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobi
|
|||
// with unknown compression.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
compressionData := blobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantAnnotations: nil,
|
||||
}
|
||||
if v2Options != nil {
|
||||
compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning compressorName: %w", err)
|
||||
}
|
||||
if found {
|
||||
compressorName = compressor
|
||||
var baseVariantCompressor string
|
||||
var specificVariantCompressor sql.NullString
|
||||
var annotationBytes []byte
|
||||
switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+
|
||||
"FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()).
|
||||
Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); {
|
||||
case errors.Is(err, sql.ErrNoRows): // Do nothing
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf("scanning compressor data: %w", err)
|
||||
default:
|
||||
compressionData.BaseVariantCompressor = baseVariantCompressor
|
||||
if specificVariantCompressor.Valid && annotationBytes != nil {
|
||||
compressionData.SpecificVariantCompressor = specificVariantCompressor.String
|
||||
if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
template := prioritize.CandidateTemplateWithCompression(v2Options, digest, blobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: compressorName,
|
||||
})
|
||||
template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
|
||||
if template == nil {
|
||||
return candidates, nil
|
||||
}
|
||||
|
@ -561,40 +613,41 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
|
||||
// (In the extreme, we could turn _everything_ this function does into a single query.
|
||||
// And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
|
||||
// For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
|
||||
rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying for other digests: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var otherDigestString string
|
||||
if err := rows.Scan(&otherDigestString); err != nil {
|
||||
return nil, fmt.Errorf("scanning other digest: %w", err)
|
||||
}
|
||||
otherDigest, err := digest.Parse(otherDigestString)
|
||||
if uncompressedDigest != "" {
|
||||
// FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
|
||||
// (In the extreme, we could turn _everything_ this function does into a single query.
|
||||
// And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
|
||||
// For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
|
||||
rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("querying for other digests: %w", err)
|
||||
}
|
||||
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var otherDigestString string
|
||||
if err := rows.Scan(&otherDigestString); err != nil {
|
||||
return nil, fmt.Errorf("scanning other digest: %w", err)
|
||||
}
|
||||
otherDigest, err := digest.Parse(otherDigestString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through other digests: %w", err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through other digests: %w", err)
|
||||
}
|
||||
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -195,10 +195,10 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
|
|||
return untrustedCertificate.PublicKey, nil
|
||||
}
|
||||
|
||||
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
|
||||
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
|
||||
rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKey, untrustedRekorSET, untrustedCertificateBytes,
|
||||
rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKeys, untrustedRekorSET, untrustedCertificateBytes,
|
||||
untrustedBase64Signature, untrustedPayloadBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -40,17 +40,20 @@ type UntrustedRekorPayload struct {
|
|||
// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
|
||||
err := s.strictUnmarshalJSON(data)
|
||||
if err != nil {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
|
||||
// All other errors are returned as is.
|
||||
func JSONFormatToInvalidSignatureError(err error) error {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
|
||||
return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
|
||||
}
|
||||
|
||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
|
||||
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
|
||||
func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error {
|
||||
|
@ -77,13 +80,7 @@ var _ json.Unmarshaler = (*UntrustedRekorPayload)(nil)
|
|||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error {
|
||||
err := p.strictUnmarshalJSON(data)
|
||||
if err != nil {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
}
|
||||
return err
|
||||
return JSONFormatToInvalidSignatureError(p.strictUnmarshalJSON(data))
|
||||
}
|
||||
|
||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
|
||||
|
@ -113,7 +110,7 @@ func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
|
||||
// Returns bundle upload time on success.
|
||||
func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
// FIXME: Should the publicKey parameter hard-code ecdsa?
|
||||
|
||||
// == Parse SET bytes
|
||||
|
@ -130,7 +127,14 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver
|
|||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err))
|
||||
}
|
||||
untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes)
|
||||
if !ecdsa.VerifyASN1(publicKey, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) {
|
||||
publicKeymatched := false
|
||||
for _, pk := range publicKeys {
|
||||
if ecdsa.VerifyASN1(pk, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) {
|
||||
publicKeymatched = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !publicKeymatched {
|
||||
return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed")
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/version"
|
||||
|
@ -79,13 +80,7 @@ var _ json.Unmarshaler = (*UntrustedSigstorePayload)(nil)
|
|||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error {
|
||||
err := s.strictUnmarshalJSON(data)
|
||||
if err != nil {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
}
|
||||
return err
|
||||
return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
|
||||
}
|
||||
|
||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
|
||||
|
@ -126,7 +121,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
|
|||
if gotTimestamp {
|
||||
intTimestamp := int64(timestamp)
|
||||
if float64(intTimestamp) != timestamp {
|
||||
return NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
|
||||
return NewInvalidSignatureError("Field optional.timestamp is not an integer")
|
||||
}
|
||||
s.untrustedTimestamp = &intTimestamp
|
||||
}
|
||||
|
@ -171,24 +166,62 @@ type SigstorePayloadAcceptanceRules struct {
|
|||
ValidateSignedDockerManifestDigest func(digest.Digest) error
|
||||
}
|
||||
|
||||
// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by publicKey, and that its principal components
|
||||
// verifySigstorePayloadBlobSignature verifies unverifiedSignature of unverifiedPayload was correctly created
|
||||
// by any of the public keys in publicKeys.
|
||||
//
|
||||
// This is an internal implementation detail of VerifySigstorePayload and should have no other callers.
|
||||
// It is INSUFFICIENT alone to consider the signature acceptable.
|
||||
func verifySigstorePayloadBlobSignature(publicKeys []crypto.PublicKey, unverifiedPayload, unverifiedSignature []byte) error {
|
||||
if len(publicKeys) == 0 {
|
||||
return errors.New("Need at least one public key to verify the sigstore payload, but got 0")
|
||||
}
|
||||
|
||||
verifiers := make([]sigstoreSignature.Verifier, 0, len(publicKeys))
|
||||
for _, key := range publicKeys {
|
||||
// Failing to load a verifier indicates that something is really, really
|
||||
// invalid about the public key; prefer to fail even if the signature might be
|
||||
// valid with other keys, so that users fix their fallback keys before they need them.
|
||||
// For that reason, we even initialize all verifiers before trying to validate the signature
|
||||
// with any key.
|
||||
verifier, err := sigstoreSignature.LoadVerifier(key, sigstoreHarcodedHashAlgorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
verifiers = append(verifiers, verifier)
|
||||
}
|
||||
|
||||
var failures []string
|
||||
for _, verifier := range verifiers {
|
||||
// github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(),
|
||||
// which seems to be not used by anything. So we don’t bother.
|
||||
err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
failures = append(failures, err.Error())
|
||||
}
|
||||
|
||||
if len(failures) == 0 {
|
||||
// Coverage: We have checked there is at least one public key, any success causes an early return,
|
||||
// and any failure adds an entry to failures => there must be at least one error.
|
||||
return fmt.Errorf("Internal error: signature verification failed but no errors have been recorded")
|
||||
}
|
||||
return NewInvalidSignatureError("cryptographic signature verification failed: " + strings.Join(failures, ", "))
|
||||
}
|
||||
|
||||
// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by any of the public keys in publicKeys, and that its principal components
|
||||
// match expected values, both as specified by rules, and returns it.
|
||||
// We return an *UntrustedSigstorePayload, although nothing actually uses it,
|
||||
// just to double-check against stupid typos.
|
||||
func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) {
|
||||
verifier, err := sigstoreSignature.LoadVerifier(publicKey, sigstoreHarcodedHashAlgorithm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating verifier: %w", err)
|
||||
}
|
||||
|
||||
func VerifySigstorePayload(publicKeys []crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) {
|
||||
unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature)
|
||||
if err != nil {
|
||||
return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err))
|
||||
}
|
||||
// github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(),
|
||||
// which seems to be not used by anything. So we don’t bother.
|
||||
if err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)); err != nil {
|
||||
return nil, NewInvalidSignatureError(fmt.Sprintf("cryptographic signature verification failed: %v", err))
|
||||
|
||||
if err := verifySigstorePayloadBlobSignature(publicKeys, unverifiedPayload, unverifiedSignature); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var unmatchedPayload UntrustedSigstorePayload
|
||||
|
|
|
@ -2,7 +2,6 @@ package signature
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
|
@ -15,29 +14,57 @@ type PRSigstoreSignedOption func(*prSigstoreSigned) error
|
|||
func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.KeyPath != "" {
|
||||
return errors.New(`"keyPath" already specified`)
|
||||
return InvalidPolicyFormatError(`"keyPath" already specified`)
|
||||
}
|
||||
pr.KeyPath = keyPath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithKeyPaths specifies a value for the "keyPaths" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithKeyPaths(keyPaths []string) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.KeyPaths != nil {
|
||||
return InvalidPolicyFormatError(`"keyPaths" already specified`)
|
||||
}
|
||||
if len(keyPaths) == 0 {
|
||||
return InvalidPolicyFormatError(`"keyPaths" contains no entries`)
|
||||
}
|
||||
pr.KeyPaths = keyPaths
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.KeyData != nil {
|
||||
return errors.New(`"keyData" already specified`)
|
||||
return InvalidPolicyFormatError(`"keyData" already specified`)
|
||||
}
|
||||
pr.KeyData = keyData
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithKeyDatas specifies a value for the "keyDatas" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithKeyDatas(keyDatas [][]byte) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.KeyDatas != nil {
|
||||
return InvalidPolicyFormatError(`"keyDatas" already specified`)
|
||||
}
|
||||
if len(keyDatas) == 0 {
|
||||
return InvalidPolicyFormatError(`"keyDatas" contains no entries`)
|
||||
}
|
||||
pr.KeyDatas = keyDatas
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.Fulcio != nil {
|
||||
return errors.New(`"fulcio" already specified`)
|
||||
return InvalidPolicyFormatError(`"fulcio" already specified`)
|
||||
}
|
||||
pr.Fulcio = fulcio
|
||||
return nil
|
||||
|
@ -48,29 +75,57 @@ func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedO
|
|||
func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.RekorPublicKeyPath != "" {
|
||||
return errors.New(`"rekorPublicKeyPath" already specified`)
|
||||
return InvalidPolicyFormatError(`"rekorPublicKeyPath" already specified`)
|
||||
}
|
||||
pr.RekorPublicKeyPath = rekorPublicKeyPath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithRekorPublicKeyPaths specifies a value for the rRekorPublickeyPaths" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithRekorPublicKeyPaths(rekorPublickeyPaths []string) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.RekorPublicKeyPaths != nil {
|
||||
return InvalidPolicyFormatError(`"rekorPublickeyPaths" already specified`)
|
||||
}
|
||||
if len(rekorPublickeyPaths) == 0 {
|
||||
return InvalidPolicyFormatError(`"rekorPublickeyPaths" contains no entries`)
|
||||
}
|
||||
pr.RekorPublicKeyPaths = rekorPublickeyPaths
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.RekorPublicKeyData != nil {
|
||||
return errors.New(`"rekorPublicKeyData" already specified`)
|
||||
return InvalidPolicyFormatError(`"rekorPublicKeyData" already specified`)
|
||||
}
|
||||
pr.RekorPublicKeyData = rekorPublicKeyData
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithRekorPublicKeyDatas specifies a value for the "rekorPublickeyDatas" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithRekorPublicKeyDatas(rekorPublickeyDatas [][]byte) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.RekorPublicKeyDatas != nil {
|
||||
return InvalidPolicyFormatError(`"rekorPublickeyDatas" already specified`)
|
||||
}
|
||||
if len(rekorPublickeyDatas) == 0 {
|
||||
return InvalidPolicyFormatError(`"rekorPublickeyDatas" contains no entries`)
|
||||
}
|
||||
pr.RekorPublicKeyDatas = rekorPublickeyDatas
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned.
|
||||
func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption {
|
||||
return func(pr *prSigstoreSigned) error {
|
||||
if pr.SignedIdentity != nil {
|
||||
return errors.New(`"signedIdentity" already specified`)
|
||||
return InvalidPolicyFormatError(`"signedIdentity" already specified`)
|
||||
}
|
||||
pr.SignedIdentity = signedIdentity
|
||||
return nil
|
||||
|
@ -92,21 +147,40 @@ func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned,
|
|||
if res.KeyPath != "" {
|
||||
keySources++
|
||||
}
|
||||
if res.KeyPaths != nil {
|
||||
keySources++
|
||||
}
|
||||
if res.KeyData != nil {
|
||||
keySources++
|
||||
}
|
||||
if res.KeyDatas != nil {
|
||||
keySources++
|
||||
}
|
||||
if res.Fulcio != nil {
|
||||
keySources++
|
||||
}
|
||||
if keySources != 1 {
|
||||
return nil, InvalidPolicyFormatError("exactly one of keyPath, keyData and fulcio must be specified")
|
||||
return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas and fulcio must be specified")
|
||||
}
|
||||
|
||||
if res.RekorPublicKeyPath != "" && res.RekorPublicKeyData != nil {
|
||||
return nil, InvalidPolicyFormatError("rekorPublickeyType and rekorPublickeyData cannot be used simultaneously")
|
||||
rekorSources := 0
|
||||
if res.RekorPublicKeyPath != "" {
|
||||
rekorSources++
|
||||
}
|
||||
if res.Fulcio != nil && res.RekorPublicKeyPath == "" && res.RekorPublicKeyData == nil {
|
||||
return nil, InvalidPolicyFormatError("At least one of RekorPublickeyPath and RekorPublickeyData must be specified if fulcio is used")
|
||||
if res.RekorPublicKeyPaths != nil {
|
||||
rekorSources++
|
||||
}
|
||||
if res.RekorPublicKeyData != nil {
|
||||
rekorSources++
|
||||
}
|
||||
if res.RekorPublicKeyDatas != nil {
|
||||
rekorSources++
|
||||
}
|
||||
if rekorSources > 1 {
|
||||
return nil, InvalidPolicyFormatError("at most one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas can be used simultaneously")
|
||||
}
|
||||
if res.Fulcio != nil && rekorSources == 0 {
|
||||
return nil, InvalidPolicyFormatError("At least one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas must be specified if fulcio is used")
|
||||
}
|
||||
|
||||
if res.SignedIdentity == nil {
|
||||
|
@ -144,7 +218,8 @@ var _ json.Unmarshaler = (*prSigstoreSigned)(nil)
|
|||
func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
|
||||
*pr = prSigstoreSigned{}
|
||||
var tmp prSigstoreSigned
|
||||
var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool
|
||||
var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio bool
|
||||
var gotRekorPublicKeyPath, gotRekorPublicKeyPaths, gotRekorPublicKeyData, gotRekorPublicKeyDatas bool
|
||||
var fulcio prSigstoreSignedFulcio
|
||||
var signedIdentity json.RawMessage
|
||||
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
|
||||
|
@ -154,18 +229,30 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
|
|||
case "keyPath":
|
||||
gotKeyPath = true
|
||||
return &tmp.KeyPath
|
||||
case "keyPaths":
|
||||
gotKeyPaths = true
|
||||
return &tmp.KeyPaths
|
||||
case "keyData":
|
||||
gotKeyData = true
|
||||
return &tmp.KeyData
|
||||
case "keyDatas":
|
||||
gotKeyDatas = true
|
||||
return &tmp.KeyDatas
|
||||
case "fulcio":
|
||||
gotFulcio = true
|
||||
return &fulcio
|
||||
case "rekorPublicKeyPath":
|
||||
gotRekorPublicKeyPath = true
|
||||
return &tmp.RekorPublicKeyPath
|
||||
case "rekorPublicKeyPaths":
|
||||
gotRekorPublicKeyPaths = true
|
||||
return &tmp.RekorPublicKeyPaths
|
||||
case "rekorPublicKeyData":
|
||||
gotRekorPublicKeyData = true
|
||||
return &tmp.RekorPublicKeyData
|
||||
case "rekorPublicKeyDatas":
|
||||
gotRekorPublicKeyDatas = true
|
||||
return &tmp.RekorPublicKeyDatas
|
||||
case "signedIdentity":
|
||||
return &signedIdentity
|
||||
default:
|
||||
|
@ -192,18 +279,30 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
|
|||
if gotKeyPath {
|
||||
opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath))
|
||||
}
|
||||
if gotKeyPaths {
|
||||
opts = append(opts, PRSigstoreSignedWithKeyPaths(tmp.KeyPaths))
|
||||
}
|
||||
if gotKeyData {
|
||||
opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData))
|
||||
}
|
||||
if gotKeyDatas {
|
||||
opts = append(opts, PRSigstoreSignedWithKeyDatas(tmp.KeyDatas))
|
||||
}
|
||||
if gotFulcio {
|
||||
opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio))
|
||||
}
|
||||
if gotRekorPublicKeyPath {
|
||||
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath))
|
||||
}
|
||||
if gotRekorPublicKeyPaths {
|
||||
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPaths(tmp.RekorPublicKeyPaths))
|
||||
}
|
||||
if gotRekorPublicKeyData {
|
||||
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData))
|
||||
}
|
||||
if gotRekorPublicKeyDatas {
|
||||
opts = append(opts, PRSigstoreSignedWithRekorPublicKeyDatas(tmp.RekorPublicKeyDatas))
|
||||
}
|
||||
opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity))
|
||||
|
||||
res, err := newPRSigstoreSigned(opts...)
|
||||
|
@ -221,7 +320,7 @@ type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error
|
|||
func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption {
|
||||
return func(f *prSigstoreSignedFulcio) error {
|
||||
if f.CAPath != "" {
|
||||
return errors.New(`"caPath" already specified`)
|
||||
return InvalidPolicyFormatError(`"caPath" already specified`)
|
||||
}
|
||||
f.CAPath = caPath
|
||||
return nil
|
||||
|
@ -232,7 +331,7 @@ func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOptio
|
|||
func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption {
|
||||
return func(f *prSigstoreSignedFulcio) error {
|
||||
if f.CAData != nil {
|
||||
return errors.New(`"caData" already specified`)
|
||||
return InvalidPolicyFormatError(`"caData" already specified`)
|
||||
}
|
||||
f.CAData = caData
|
||||
return nil
|
||||
|
@ -243,7 +342,7 @@ func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOptio
|
|||
func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption {
|
||||
return func(f *prSigstoreSignedFulcio) error {
|
||||
if f.OIDCIssuer != "" {
|
||||
return errors.New(`"oidcIssuer" already specified`)
|
||||
return InvalidPolicyFormatError(`"oidcIssuer" already specified`)
|
||||
}
|
||||
f.OIDCIssuer = oidcIssuer
|
||||
return nil
|
||||
|
@ -254,7 +353,7 @@ func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFul
|
|||
func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption {
|
||||
return func(f *prSigstoreSignedFulcio) error {
|
||||
if f.SubjectEmail != "" {
|
||||
return errors.New(`"subjectEmail" already specified`)
|
||||
return InvalidPolicyFormatError(`"subjectEmail" already specified`)
|
||||
}
|
||||
f.SubjectEmail = subjectEmail
|
||||
return nil
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
|
@ -27,33 +26,18 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
|
|||
}
|
||||
|
||||
// FIXME: move this to per-context initialization
|
||||
var data [][]byte
|
||||
keySources := 0
|
||||
if pr.KeyPath != "" {
|
||||
keySources++
|
||||
d, err := os.ReadFile(pr.KeyPath)
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
data = [][]byte{d}
|
||||
const notOneSourceErrorText = `Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`
|
||||
data, err := loadBytesFromConfigSources(configBytesSources{
|
||||
inconsistencyErrorMessage: notOneSourceErrorText,
|
||||
path: pr.KeyPath,
|
||||
paths: pr.KeyPaths,
|
||||
data: pr.KeyData,
|
||||
})
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
if pr.KeyPaths != nil {
|
||||
keySources++
|
||||
data = [][]byte{}
|
||||
for _, path := range pr.KeyPaths {
|
||||
d, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
data = append(data, d)
|
||||
}
|
||||
}
|
||||
if pr.KeyData != nil {
|
||||
keySources++
|
||||
data = [][]byte{pr.KeyData}
|
||||
}
|
||||
if keySources != 1 {
|
||||
return sarRejected, nil, errors.New(`Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`)
|
||||
if data == nil {
|
||||
return sarRejected, nil, errors.New(notOneSourceErrorText)
|
||||
}
|
||||
|
||||
// FIXME: move this to per-context initialization
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/internal/multierr"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
|
@ -20,37 +21,69 @@ import (
|
|||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
)
|
||||
|
||||
// loadBytesFromDataOrPath ensures there is at most one of ${prefix}Data and ${prefix}Path set,
|
||||
// configBytesSources contains configuration fields which may result in one or more []byte values
|
||||
type configBytesSources struct {
|
||||
inconsistencyErrorMessage string // Error to return if more than one source is set
|
||||
path string // …Path: a path to a file containing the data, or ""
|
||||
paths []string // …Paths: paths to files containing the data, or nil
|
||||
data []byte // …Data: a single instance ofhe raw data, or nil
|
||||
datas [][]byte // …Datas: the raw data, or nil // codespell:ignore datas
|
||||
}
|
||||
|
||||
// loadBytesFromConfigSources ensures at most one of the sources in src is set,
|
||||
// and returns the referenced data, or nil if neither is set.
|
||||
func loadBytesFromDataOrPath(prefix string, data []byte, path string) ([]byte, error) {
|
||||
switch {
|
||||
case data != nil && path != "":
|
||||
return nil, fmt.Errorf(`Internal inconsistency: both "%sPath" and "%sData" specified`, prefix, prefix)
|
||||
case path != "":
|
||||
d, err := os.ReadFile(path)
|
||||
func loadBytesFromConfigSources(src configBytesSources) ([][]byte, error) {
|
||||
sources := 0
|
||||
var data [][]byte // = nil
|
||||
if src.path != "" {
|
||||
sources++
|
||||
d, err := os.ReadFile(src.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
case data != nil:
|
||||
return data, nil
|
||||
default: // Nothing
|
||||
return nil, nil
|
||||
data = [][]byte{d}
|
||||
}
|
||||
if src.paths != nil {
|
||||
sources++
|
||||
data = [][]byte{}
|
||||
for _, path := range src.paths {
|
||||
d, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data = append(data, d)
|
||||
}
|
||||
}
|
||||
if src.data != nil {
|
||||
sources++
|
||||
data = [][]byte{src.data}
|
||||
}
|
||||
if src.datas != nil { // codespell:ignore datas
|
||||
sources++
|
||||
data = src.datas // codespell:ignore datas
|
||||
}
|
||||
if sources > 1 {
|
||||
return nil, errors.New(src.inconsistencyErrorMessage)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// prepareTrustRoot creates a fulcioTrustRoot from the input data.
|
||||
// (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.)
|
||||
func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) {
|
||||
caCertBytes, err := loadBytesFromDataOrPath("fulcioCA", f.CAData, f.CAPath)
|
||||
caCertPEMs, err := loadBytesFromConfigSources(configBytesSources{
|
||||
inconsistencyErrorMessage: `Internal inconsistency: both "caPath" and "caData" specified`,
|
||||
path: f.CAPath,
|
||||
data: f.CAData,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if caCertBytes == nil {
|
||||
return nil, errors.New(`Internal inconsistency: Fulcio specified with neither "caPath" nor "caData"`)
|
||||
if len(caCertPEMs) != 1 {
|
||||
return nil, errors.New(`Internal inconsistency: Fulcio specified with not exactly one of "caPath" nor "caData"`)
|
||||
}
|
||||
certs := x509.NewCertPool()
|
||||
if ok := certs.AppendCertsFromPEM(caCertBytes); !ok {
|
||||
if ok := certs.AppendCertsFromPEM(caCertPEMs[0]); !ok {
|
||||
return nil, errors.New("error loading Fulcio CA certificates")
|
||||
}
|
||||
fulcio := fulcioTrustRoot{
|
||||
|
@ -66,24 +99,35 @@ func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) {
|
|||
|
||||
// sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy
|
||||
type sigstoreSignedTrustRoot struct {
|
||||
publicKey crypto.PublicKey
|
||||
fulcio *fulcioTrustRoot
|
||||
rekorPublicKey *ecdsa.PublicKey
|
||||
publicKeys []crypto.PublicKey
|
||||
fulcio *fulcioTrustRoot
|
||||
rekorPublicKeys []*ecdsa.PublicKey
|
||||
}
|
||||
|
||||
func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) {
|
||||
res := sigstoreSignedTrustRoot{}
|
||||
|
||||
publicKeyPEM, err := loadBytesFromDataOrPath("key", pr.KeyData, pr.KeyPath)
|
||||
publicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{
|
||||
inconsistencyErrorMessage: `Internal inconsistency: more than one of "keyPath", "keyPaths", "keyData", "keyDatas" specified`,
|
||||
path: pr.KeyPath,
|
||||
paths: pr.KeyPaths,
|
||||
data: pr.KeyData,
|
||||
datas: pr.KeyDatas, // codespell:ignore datas
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if publicKeyPEM != nil {
|
||||
pk, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing public key: %w", err)
|
||||
if publicKeyPEMs != nil {
|
||||
for index, keyData := range publicKeyPEMs {
|
||||
pk, err := cryptoutils.UnmarshalPEMToPublicKey(keyData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing public key %d: %w", index+1, err)
|
||||
}
|
||||
res.publicKeys = append(res.publicKeys, pk)
|
||||
}
|
||||
if len(res.publicKeys) == 0 {
|
||||
return nil, errors.New(`Internal inconsistency: "keyPath", "keyPaths", "keyData" and "keyDatas" produced no public keys`)
|
||||
}
|
||||
res.publicKey = pk
|
||||
}
|
||||
|
||||
if pr.Fulcio != nil {
|
||||
|
@ -94,21 +138,32 @@ func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error)
|
|||
res.fulcio = f
|
||||
}
|
||||
|
||||
rekorPublicKeyPEM, err := loadBytesFromDataOrPath("rekorPublicKey", pr.RekorPublicKeyData, pr.RekorPublicKeyPath)
|
||||
rekorPublicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{
|
||||
inconsistencyErrorMessage: `Internal inconsistency: both "rekorPublicKeyPath" and "rekorPublicKeyData" specified`,
|
||||
path: pr.RekorPublicKeyPath,
|
||||
paths: pr.RekorPublicKeyPaths,
|
||||
data: pr.RekorPublicKeyData,
|
||||
datas: pr.RekorPublicKeyDatas, // codespell:ignore datas
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rekorPublicKeyPEM != nil {
|
||||
pk, err := cryptoutils.UnmarshalPEMToPublicKey(rekorPublicKeyPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing Rekor public key: %w", err)
|
||||
}
|
||||
pkECDSA, ok := pk.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Rekor public key is not using ECDSA")
|
||||
if rekorPublicKeyPEMs != nil {
|
||||
for index, pem := range rekorPublicKeyPEMs {
|
||||
pk, err := cryptoutils.UnmarshalPEMToPublicKey(pem)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing Rekor public key %d: %w", index+1, err)
|
||||
}
|
||||
pkECDSA, ok := pk.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Rekor public key %d is not using ECDSA", index+1)
|
||||
|
||||
}
|
||||
res.rekorPublicKeys = append(res.rekorPublicKeys, pkECDSA)
|
||||
}
|
||||
if len(res.rekorPublicKeys) == 0 {
|
||||
return nil, errors.New(`Internal inconsistency: "rekorPublicKeyPath", "rekorPublicKeyPaths", "rekorPublicKeyData" and "rekorPublicKeyDatas" produced no public keys`)
|
||||
}
|
||||
res.rekorPublicKey = pkECDSA
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
|
@ -134,37 +189,51 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
|
|||
}
|
||||
untrustedPayload := sig.UntrustedPayload()
|
||||
|
||||
var publicKey crypto.PublicKey
|
||||
var publicKeys []crypto.PublicKey
|
||||
switch {
|
||||
case trustRoot.publicKey != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations.
|
||||
case trustRoot.publicKeys != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations.
|
||||
return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified")
|
||||
case trustRoot.publicKey == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations.
|
||||
case trustRoot.publicKeys == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations.
|
||||
return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified")
|
||||
|
||||
case trustRoot.publicKey != nil:
|
||||
if trustRoot.rekorPublicKey != nil {
|
||||
case trustRoot.publicKeys != nil:
|
||||
if trustRoot.rekorPublicKeys != nil {
|
||||
untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
|
||||
if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work.
|
||||
return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey)
|
||||
}
|
||||
// We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies.
|
||||
// FIXME: We could just generate DER instead of the full PEM text
|
||||
recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(trustRoot.publicKey)
|
||||
if err != nil {
|
||||
// Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail.
|
||||
// (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.)
|
||||
return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err)
|
||||
|
||||
var rekorFailures []string
|
||||
for _, candidatePublicKey := range trustRoot.publicKeys {
|
||||
// We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies.
|
||||
// FIXME: We could just generate DER instead of the full PEM text
|
||||
recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(candidatePublicKey)
|
||||
if err != nil {
|
||||
// Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail.
|
||||
// (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.)
|
||||
return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err)
|
||||
}
|
||||
// We don’t care about the Rekor timestamp, just about log presence.
|
||||
_, err = internal.VerifyRekorSET(trustRoot.rekorPublicKeys, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload)
|
||||
if err == nil {
|
||||
publicKeys = append(publicKeys, candidatePublicKey)
|
||||
break // The SET can only accept one public key entry, so if we found one, the rest either doesn’t match or is a duplicate
|
||||
}
|
||||
rekorFailures = append(rekorFailures, err.Error())
|
||||
}
|
||||
// We don’t care about the Rekor timestamp, just about log presence.
|
||||
if _, err := internal.VerifyRekorSET(trustRoot.rekorPublicKey, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload); err != nil {
|
||||
return sarRejected, err
|
||||
if len(publicKeys) == 0 {
|
||||
if len(rekorFailures) == 0 {
|
||||
// Coverage: We have ensured that len(trustRoot.publicKeys) != 0, when nothing succeeds, there must be at least one failure.
|
||||
return sarRejected, errors.New(`Internal inconsistency: Rekor SET did not match any key but we have no failures.`)
|
||||
}
|
||||
return sarRejected, internal.NewInvalidSignatureError(fmt.Sprintf("No public key verified against the RekorSET: %s", strings.Join(rekorFailures, ", ")))
|
||||
}
|
||||
} else {
|
||||
publicKeys = trustRoot.publicKeys
|
||||
}
|
||||
publicKey = trustRoot.publicKey
|
||||
|
||||
case trustRoot.fulcio != nil:
|
||||
if trustRoot.rekorPublicKey == nil { // newPRSigstoreSigned rejects such combinations.
|
||||
if trustRoot.rekorPublicKeys == nil { // newPRSigstoreSigned rejects such combinations.
|
||||
return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key")
|
||||
}
|
||||
untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey]
|
||||
|
@ -179,19 +248,20 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva
|
|||
if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok {
|
||||
untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain)
|
||||
}
|
||||
pk, err := verifyRekorFulcio(trustRoot.rekorPublicKey, trustRoot.fulcio,
|
||||
pk, err := verifyRekorFulcio(trustRoot.rekorPublicKeys, trustRoot.fulcio,
|
||||
[]byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload)
|
||||
if err != nil {
|
||||
return sarRejected, err
|
||||
}
|
||||
publicKey = pk
|
||||
publicKeys = []crypto.PublicKey{pk}
|
||||
}
|
||||
|
||||
if publicKey == nil {
|
||||
// Coverage: This should never happen, we have already excluded the possibility in the switch above.
|
||||
if len(publicKeys) == 0 {
|
||||
// Coverage: This should never happen, we ensured that trustRoot.publicKeys is non-empty if set,
|
||||
// and we have already excluded the possibility in the switch above.
|
||||
return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload")
|
||||
}
|
||||
signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
|
||||
signature, err := internal.VerifySigstorePayload(publicKeys, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{
|
||||
ValidateSignedDockerReference: func(ref string) error {
|
||||
if !pr.SignedIdentity.matchesDockerReference(image, ref) {
|
||||
return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref))
|
||||
|
|
|
@ -136,7 +136,7 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc
|
|||
newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1)
|
||||
newParsedRef, err := reference.ParseNamed(newNamedRef)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`error rewriting reference from %q to %q: %v`, refString, newNamedRef, err)
|
||||
return nil, fmt.Errorf(`error rewriting reference from %q to %q: %w`, refString, newNamedRef, err)
|
||||
}
|
||||
return newParsedRef, nil
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ type prSignedBy struct {
|
|||
|
||||
// KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
|
||||
KeyPath string `json:"keyPath,omitempty"`
|
||||
// KeyPaths if a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
|
||||
// KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified.
|
||||
KeyPaths []string `json:"keyPaths,omitempty"`
|
||||
// KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified.
|
||||
KeyData []byte `json:"keyData,omitempty"`
|
||||
|
@ -111,24 +111,35 @@ type prSignedBaseLayer struct {
|
|||
type prSigstoreSigned struct {
|
||||
prCommon
|
||||
|
||||
// KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyData, Fulcio must be specified.
|
||||
// KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
|
||||
KeyPath string `json:"keyPath,omitempty"`
|
||||
// KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyData, Fulcio must be specified.
|
||||
// KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
|
||||
KeyPaths []string `json:"keyPaths,omitempty"`
|
||||
// KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
|
||||
KeyData []byte `json:"keyData,omitempty"`
|
||||
// FIXME: Multiple public keys?
|
||||
// KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
|
||||
KeyDatas [][]byte `json:"keyDatas,omitempty"`
|
||||
|
||||
// Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyData, Fulcio must be specified.
|
||||
// Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified.
|
||||
// If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well.
|
||||
Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"`
|
||||
|
||||
// RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures.
|
||||
// If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional
|
||||
// (and Rekor inclusion is not required if a Rekor public key is not specified).
|
||||
// If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
|
||||
// otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
|
||||
RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"`
|
||||
// RekorPublicKeyPaths is a set of pathnames to local files, each containing a public key of a Rekor server. One of the keys must record acceptable signatures.
|
||||
// If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
|
||||
// otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
|
||||
RekorPublicKeyPaths []string `json:"rekorPublicKeyPaths,omitempty"`
|
||||
// RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures.
|
||||
// If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional
|
||||
// (and Rekor inclusion is not required if a Rekor public key is not specified).
|
||||
// If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
|
||||
// otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
|
||||
RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"`
|
||||
// RekorPublicKeyDatas each contain a base64-encoded public key of a Rekor server. One of the keys must record acceptable signatures.
|
||||
// If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well;
|
||||
// otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified).
|
||||
RekorPublicKeyDatas [][]byte `json:"rekorPublicKeyDatas,omitempty"`
|
||||
|
||||
// SignedIdentity specifies what image identity the signature must be claiming about the image.
|
||||
// Defaults to "matchRepoDigestOrExact" if not specified.
|
||||
|
|
|
@ -105,13 +105,7 @@ var _ json.Unmarshaler = (*untrustedSignature)(nil)
|
|||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
|
||||
err := s.strictUnmarshalJSON(data)
|
||||
if err != nil {
|
||||
if formatErr, ok := err.(internal.JSONFormatError); ok {
|
||||
err = internal.NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
}
|
||||
return err
|
||||
return internal.JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
|
||||
}
|
||||
|
||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal.JSONFormatError error type.
|
||||
|
@ -149,7 +143,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
|||
if gotTimestamp {
|
||||
intTimestamp := int64(timestamp)
|
||||
if float64(intTimestamp) != timestamp {
|
||||
return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
|
||||
return internal.NewInvalidSignatureError("Field optional.timestamp is not an integer")
|
||||
}
|
||||
s.untrustedTimestamp = &intTimestamp
|
||||
}
|
||||
|
|
|
@ -325,7 +325,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
|||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
|
||||
out, err := s.imageRef.transport.store.PrepareStagedLayer(nil, differ)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
@ -337,7 +337,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
|||
}()
|
||||
|
||||
if out.TOCDigest == "" && out.UncompressedDigest == "" {
|
||||
return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set")
|
||||
return private.UploadedBlob{}, errors.New("internal error: PrepareStagedLayer succeeded with neither TOCDigest nor UncompressedDigest set")
|
||||
}
|
||||
|
||||
blobDigest := srcInfo.Digest
|
||||
|
@ -356,11 +356,11 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
|||
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
|
||||
// responsible for ensuring blobDigest has been validated.
|
||||
if out.CompressedDigest != blobDigest {
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: ApplyDiffWithDiffer returned CompressedDigest %q not matching expected %q",
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
|
||||
out.CompressedDigest, blobDigest)
|
||||
}
|
||||
// So, record also information about blobDigest, that might benefit reuse.
|
||||
// We trust ApplyDiffWithDiffer to validate or create both values correctly.
|
||||
// We trust PrepareStagedLayer to validate or create both values correctly.
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
|
||||
} else {
|
||||
|
|
|
@ -37,7 +37,7 @@ func newReference(transport storageTransport, named reference.Named, id string)
|
|||
}
|
||||
if id != "" {
|
||||
if err := validateImageID(id); err != nil {
|
||||
return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err, ErrInvalidReference)
|
||||
return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err.Error(), ErrInvalidReference)
|
||||
}
|
||||
}
|
||||
// We take a copy of the transport, which contains a pointer to the
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
|
@ -300,7 +301,7 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
|
|||
uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed
|
||||
}
|
||||
|
||||
physicalBlobInfos := []types.BlobInfo{}
|
||||
physicalBlobInfos := []types.BlobInfo{} // Built reversed
|
||||
layerID := s.image.TopLayer
|
||||
for layerID != "" {
|
||||
layer, err := s.imageRef.transport.store.Layer(layerID)
|
||||
|
@ -340,9 +341,10 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
|
|||
Size: size,
|
||||
MediaType: uncompressedLayerType,
|
||||
}
|
||||
physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
|
||||
physicalBlobInfos = append(physicalBlobInfos, blobInfo)
|
||||
layerID = layer.Parent
|
||||
}
|
||||
slices.Reverse(physicalBlobInfos)
|
||||
|
||||
res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
|
||||
if err != nil {
|
||||
|
|
|
@ -103,7 +103,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
|
|||
}
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
if _, err := io.Copy(io.Discard, reader); err != nil {
|
||||
return nil, fmt.Errorf("error reading %q: %v", filename, err)
|
||||
return nil, fmt.Errorf("error reading %q: %w", filename, err)
|
||||
}
|
||||
if uncompressed != nil {
|
||||
uncompressed.Close()
|
||||
|
@ -152,7 +152,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
|
|||
// Encode and digest the image configuration blob.
|
||||
configBytes, err := json.Marshal(&config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err)
|
||||
return nil, fmt.Errorf("error generating configuration blob for %q: %w", strings.Join(r.filenames, separator), err)
|
||||
}
|
||||
configID := digest.Canonical.FromBytes(configBytes)
|
||||
blobs[configID] = tarballBlob{
|
||||
|
@ -177,7 +177,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
|
|||
// Encode the manifest.
|
||||
manifestBytes, err := json.Marshal(&manifest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err)
|
||||
return nil, fmt.Errorf("error generating manifest for %q: %w", strings.Join(r.filenames, separator), err)
|
||||
}
|
||||
|
||||
// Return the image.
|
||||
|
|
|
@ -38,13 +38,13 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc
|
|||
if filename == "-" {
|
||||
stdin, err = io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error buffering stdin: %v", err)
|
||||
return nil, fmt.Errorf("error buffering stdin: %w", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening %q: %v", filename, err)
|
||||
return nil, fmt.Errorf("error opening %q: %w", filename, err)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
|
|
@ -64,16 +64,28 @@ func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time)
|
|||
if now == nil {
|
||||
now = time.Now
|
||||
}
|
||||
return &RemoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now}
|
||||
return &RemoteKeySet{
|
||||
jwksURL: jwksURL,
|
||||
now: now,
|
||||
// For historical reasons, this package uses contexts for configuration, not just
|
||||
// cancellation. In hindsight, this was a bad idea.
|
||||
//
|
||||
// Attemps to reason about how cancels should work with background requests have
|
||||
// largely lead to confusion. Use the context here as a config bag-of-values and
|
||||
// ignore the cancel function.
|
||||
ctx: context.WithoutCancel(ctx),
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteKeySet is a KeySet implementation that validates JSON web tokens against
|
||||
// a jwks_uri endpoint.
|
||||
type RemoteKeySet struct {
|
||||
jwksURL string
|
||||
ctx context.Context
|
||||
now func() time.Time
|
||||
|
||||
// Used for configuration. Cancelation is ignored.
|
||||
ctx context.Context
|
||||
|
||||
// guard all other fields
|
||||
mu sync.RWMutex
|
||||
|
||||
|
|
|
@ -120,8 +120,8 @@ type Config struct {
|
|||
}
|
||||
|
||||
// VerifierContext returns an IDTokenVerifier that uses the provider's key set to
|
||||
// verify JWTs. As opposed to Verifier, the context is used for all requests to
|
||||
// the upstream JWKs endpoint.
|
||||
// verify JWTs. As opposed to Verifier, the context is used to configure requests
|
||||
// to the upstream JWKs endpoint. The provided context's cancellation is ignored.
|
||||
func (p *Provider) VerifierContext(ctx context.Context, config *Config) *IDTokenVerifier {
|
||||
return p.newVerifier(NewRemoteKeySet(ctx, p.jwksURL), config)
|
||||
}
|
||||
|
|
|
@ -1,3 +1,27 @@
|
|||
# v4.0.4
|
||||
|
||||
## Fixed
|
||||
|
||||
- Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a
|
||||
breaking change. See #136 / #137.
|
||||
|
||||
# v4.0.3
|
||||
|
||||
## Changed
|
||||
|
||||
- Allow unmarshalling JSONWebKeySets with unsupported key types (#130)
|
||||
- Document that OpaqueKeyEncrypter can't be implemented (for now) (#129)
|
||||
- Dependency updates
|
||||
|
||||
# v4.0.2
|
||||
|
||||
## Changed
|
||||
|
||||
- Improved documentation of Verify() to note that JSONWebKeySet is a supported
|
||||
argument type (#104)
|
||||
- Defined exported error values for missing x5c header and unsupported elliptic
|
||||
curves error cases (#117)
|
||||
|
||||
# v4.0.1
|
||||
|
||||
## Fixed
|
||||
|
|
|
@ -459,7 +459,10 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
|
|||
return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
|
||||
}
|
||||
|
||||
key := tryJWKS(decryptionKey, obj.Header)
|
||||
key, err := tryJWKS(decryptionKey, obj.Header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
decrypter, err := newDecrypter(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -529,7 +532,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade
|
|||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
|
||||
}
|
||||
|
||||
key := tryJWKS(decryptionKey, obj.Header)
|
||||
key, err := tryJWKS(decryptionKey, obj.Header)
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, err
|
||||
}
|
||||
decrypter, err := newDecrypter(key)
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, err
|
||||
|
|
|
@ -779,7 +779,13 @@ func (key rawJSONWebKey) symmetricKey() ([]byte, error) {
|
|||
return key.K.bytes(), nil
|
||||
}
|
||||
|
||||
func tryJWKS(key interface{}, headers ...Header) interface{} {
|
||||
var (
|
||||
// ErrJWKSKidNotFound is returned when a JWKS does not contain a JWK with a
|
||||
// key ID which matches one in the provided tokens headers.
|
||||
ErrJWKSKidNotFound = errors.New("go-jose/go-jose: JWK with matching kid not found in JWK Set")
|
||||
)
|
||||
|
||||
func tryJWKS(key interface{}, headers ...Header) (interface{}, error) {
|
||||
var jwks JSONWebKeySet
|
||||
|
||||
switch jwksType := key.(type) {
|
||||
|
@ -788,9 +794,11 @@ func tryJWKS(key interface{}, headers ...Header) interface{} {
|
|||
case JSONWebKeySet:
|
||||
jwks = jwksType
|
||||
default:
|
||||
return key
|
||||
// If the specified key is not a JWKS, return as is.
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// Determine the KID to search for from the headers.
|
||||
var kid string
|
||||
for _, header := range headers {
|
||||
if header.KeyID != "" {
|
||||
|
@ -799,14 +807,17 @@ func tryJWKS(key interface{}, headers ...Header) interface{} {
|
|||
}
|
||||
}
|
||||
|
||||
// If no KID is specified in the headers, reject.
|
||||
if kid == "" {
|
||||
return key
|
||||
return nil, ErrJWKSKidNotFound
|
||||
}
|
||||
|
||||
// Find the JWK with the matching KID. If no JWK with the specified KID is
|
||||
// found, reject.
|
||||
keys := jwks.Key(kid)
|
||||
if len(keys) == 0 {
|
||||
return key
|
||||
return nil, ErrJWKSKidNotFound
|
||||
}
|
||||
|
||||
return keys[0].Key
|
||||
return keys[0].Key, nil
|
||||
}
|
||||
|
|
|
@ -83,6 +83,9 @@ func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg Sig
|
|||
}
|
||||
|
||||
// OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key.
|
||||
//
|
||||
// Note: this cannot currently be implemented outside this package because of its
|
||||
// unexported method.
|
||||
type OpaqueKeyEncrypter interface {
|
||||
// KeyID returns the kid
|
||||
KeyID() string
|
||||
|
|
|
@ -390,7 +390,10 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
|
|||
// The verificationKey argument must have one of the types allowed for the
|
||||
// verificationKey argument of JSONWebSignature.Verify().
|
||||
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
key, err := tryJWKS(verificationKey, obj.headers()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
verifier, err := newVerifier(key)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -455,7 +458,10 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa
|
|||
// The verificationKey argument must have one of the types allowed for the
|
||||
// verificationKey argument of JSONWebSignature.Verify().
|
||||
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
key, err := tryJWKS(verificationKey, obj.headers()...)
|
||||
if err != nil {
|
||||
return -1, Signature{}, err
|
||||
}
|
||||
verifier, err := newVerifier(key)
|
||||
if err != nil {
|
||||
return -1, Signature{}, err
|
||||
|
|
|
@ -157,58 +157,44 @@ type ValidationRecord struct {
|
|||
UsedRSAKEX bool `json:"-"`
|
||||
}
|
||||
|
||||
func looksLikeKeyAuthorization(str string) error {
|
||||
parts := strings.Split(str, ".")
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("Invalid key authorization: does not look like a key authorization")
|
||||
} else if !LooksLikeAToken(parts[0]) {
|
||||
return fmt.Errorf("Invalid key authorization: malformed token")
|
||||
} else if !LooksLikeAToken(parts[1]) {
|
||||
// Thumbprints have the same syntax as tokens in boulder
|
||||
// Both are base64-encoded and 32 octets
|
||||
return fmt.Errorf("Invalid key authorization: malformed key thumbprint")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Challenge is an aggregate of all data needed for any challenges.
|
||||
//
|
||||
// Rather than define individual types for different types of
|
||||
// challenge, we just throw all the elements into one bucket,
|
||||
// together with the common metadata elements.
|
||||
type Challenge struct {
|
||||
// The type of challenge
|
||||
// Type is the type of challenge encoded in this object.
|
||||
Type AcmeChallenge `json:"type"`
|
||||
|
||||
// The status of this challenge
|
||||
Status AcmeStatus `json:"status,omitempty"`
|
||||
|
||||
// Contains the error that occurred during challenge validation, if any
|
||||
Error *probs.ProblemDetails `json:"error,omitempty"`
|
||||
|
||||
// A URI to which a response can be POSTed
|
||||
URI string `json:"uri,omitempty"`
|
||||
|
||||
// For the V2 API the "URI" field is deprecated in favour of URL.
|
||||
// URL is the URL to which a response can be posted. Required for all types.
|
||||
URL string `json:"url,omitempty"`
|
||||
|
||||
// Used by http-01, tls-sni-01, tls-alpn-01 and dns-01 challenges
|
||||
// Status is the status of this challenge. Required for all types.
|
||||
Status AcmeStatus `json:"status,omitempty"`
|
||||
|
||||
// Validated is the time at which the server validated the challenge. Required
|
||||
// if status is valid.
|
||||
Validated *time.Time `json:"validated,omitempty"`
|
||||
|
||||
// Error contains the error that occurred during challenge validation, if any.
|
||||
// If set, the Status must be "invalid".
|
||||
Error *probs.ProblemDetails `json:"error,omitempty"`
|
||||
|
||||
// Token is a random value that uniquely identifies the challenge. It is used
|
||||
// by all current challenges (http-01, tls-alpn-01, and dns-01).
|
||||
Token string `json:"token,omitempty"`
|
||||
|
||||
// The expected KeyAuthorization for validation of the challenge. Populated by
|
||||
// the RA prior to passing the challenge to the VA. For legacy reasons this
|
||||
// field is called "ProvidedKeyAuthorization" because it was initially set by
|
||||
// the content of the challenge update POST from the client. It is no longer
|
||||
// set that way and should be renamed to "KeyAuthorization".
|
||||
// TODO(@cpu): Rename `ProvidedKeyAuthorization` to `KeyAuthorization`.
|
||||
// ProvidedKeyAuthorization used to carry the expected key authorization from
|
||||
// the RA to the VA. However, since this field is never presented to the user
|
||||
// via the ACME API, it should not be on this type.
|
||||
//
|
||||
// Deprecated: use vapb.PerformValidationRequest.ExpectedKeyAuthorization instead.
|
||||
// TODO(#7514): Remove this.
|
||||
ProvidedKeyAuthorization string `json:"keyAuthorization,omitempty"`
|
||||
|
||||
// Contains information about URLs used or redirected to and IPs resolved and
|
||||
// used
|
||||
ValidationRecord []ValidationRecord `json:"validationRecord,omitempty"`
|
||||
// The time at which the server validated the challenge. Required by
|
||||
// RFC8555 if status is valid.
|
||||
Validated *time.Time `json:"validated,omitempty"`
|
||||
}
|
||||
|
||||
// ExpectedKeyAuthorization computes the expected KeyAuthorization value for
|
||||
|
@ -273,43 +259,18 @@ func (ch Challenge) RecordsSane() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// CheckConsistencyForClientOffer checks the fields of a challenge object before it is
|
||||
// given to the client.
|
||||
func (ch Challenge) CheckConsistencyForClientOffer() error {
|
||||
err := ch.checkConsistency()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Before completion, the key authorization field should be empty
|
||||
if ch.ProvidedKeyAuthorization != "" {
|
||||
return fmt.Errorf("A response to this challenge was already submitted.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckConsistencyForValidation checks the fields of a challenge object before it is
|
||||
// given to the VA.
|
||||
func (ch Challenge) CheckConsistencyForValidation() error {
|
||||
err := ch.checkConsistency()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the challenge is completed, then there should be a key authorization
|
||||
return looksLikeKeyAuthorization(ch.ProvidedKeyAuthorization)
|
||||
}
|
||||
|
||||
// checkConsistency checks the sanity of a challenge object before issued to the client.
|
||||
func (ch Challenge) checkConsistency() error {
|
||||
// CheckPending ensures that a challenge object is pending and has a token.
|
||||
// This is used before offering the challenge to the client, and before actually
|
||||
// validating a challenge.
|
||||
func (ch Challenge) CheckPending() error {
|
||||
if ch.Status != StatusPending {
|
||||
return fmt.Errorf("The challenge is not pending.")
|
||||
return fmt.Errorf("challenge is not pending")
|
||||
}
|
||||
|
||||
// There always needs to be a token
|
||||
if !LooksLikeAToken(ch.Token) {
|
||||
return fmt.Errorf("The token is missing.")
|
||||
if !looksLikeAToken(ch.Token) {
|
||||
return fmt.Errorf("token is missing or malformed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -76,9 +76,9 @@ func NewToken() string {
|
|||
|
||||
var tokenFormat = regexp.MustCompile(`^[\w-]{43}$`)
|
||||
|
||||
// LooksLikeAToken checks whether a string represents a 32-octet value in
|
||||
// looksLikeAToken checks whether a string represents a 32-octet value in
|
||||
// the URL-safe base64 alphabet.
|
||||
func LooksLikeAToken(token string) bool {
|
||||
func looksLikeAToken(token string) bool {
|
||||
return tokenFormat.MatchString(token)
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,9 @@ var (
|
|||
)
|
||||
|
||||
type Config struct {
|
||||
// AllowedKeys enables or disables specific key algorithms and sizes. If
|
||||
// nil, defaults to just those keys allowed by the Let's Encrypt CPS.
|
||||
AllowedKeys *AllowedKeys
|
||||
// WeakKeyFile is the path to a JSON file containing truncated modulus hashes
|
||||
// of known weak RSA keys. If this config value is empty, then RSA modulus
|
||||
// hash checking will be disabled.
|
||||
|
@ -54,6 +57,40 @@ type Config struct {
|
|||
FermatRounds int
|
||||
}
|
||||
|
||||
// AllowedKeys is a map of six specific key algorithm and size combinations to
|
||||
// booleans indicating whether keys of that type are considered good.
|
||||
type AllowedKeys struct {
|
||||
// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple
|
||||
// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes
|
||||
// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which
|
||||
// have a known method to easily compute their private key, such as Debian Weak
|
||||
// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at
|
||||
// common key sizes, so we restrict all issuance to those common key sizes.
|
||||
RSA2048 bool
|
||||
RSA3072 bool
|
||||
RSA4096 bool
|
||||
// Baseline Requirements, Section 6.1.5 requires that ECDSA keys be valid
|
||||
// points on the NIST P-256, P-384, or P-521 elliptic curves.
|
||||
ECDSAP256 bool
|
||||
ECDSAP384 bool
|
||||
ECDSAP521 bool
|
||||
}
|
||||
|
||||
// LetsEncryptCPS encodes the five key algorithms and sizes allowed by the Let's
|
||||
// Encrypt CPS CV-SSL Subscriber Certificate Profile: RSA 2048, RSA 3076, RSA
|
||||
// 4096, ECDSA 256 and ECDSA P384.
|
||||
// https://github.com/letsencrypt/cp-cps/blob/main/CP-CPS.md#dv-ssl-subscriber-certificate
|
||||
// If this is ever changed, the CP/CPS MUST be changed first.
|
||||
func LetsEncryptCPS() AllowedKeys {
|
||||
return AllowedKeys{
|
||||
RSA2048: true,
|
||||
RSA3072: true,
|
||||
RSA4096: true,
|
||||
ECDSAP256: true,
|
||||
ECDSAP384: true,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrBadKey represents an error with a key. It is distinct from the various
|
||||
// ways in which an ACME request can have an erroneous key (BadPublicKeyError,
|
||||
// BadCSRError) because this library is used to check both JWS signing keys and
|
||||
|
@ -74,28 +111,29 @@ type BlockedKeyCheckFunc func(ctx context.Context, keyHash []byte) (bool, error)
|
|||
// KeyPolicy determines which types of key may be used with various boulder
|
||||
// operations.
|
||||
type KeyPolicy struct {
|
||||
AllowRSA bool // Whether RSA keys should be allowed.
|
||||
AllowECDSANISTP256 bool // Whether ECDSA NISTP256 keys should be allowed.
|
||||
AllowECDSANISTP384 bool // Whether ECDSA NISTP384 keys should be allowed.
|
||||
weakRSAList *WeakRSAKeys
|
||||
blockedList *blockedKeys
|
||||
fermatRounds int
|
||||
blockedCheck BlockedKeyCheckFunc
|
||||
allowedKeys AllowedKeys
|
||||
weakRSAList *WeakRSAKeys
|
||||
blockedList *blockedKeys
|
||||
fermatRounds int
|
||||
blockedCheck BlockedKeyCheckFunc
|
||||
}
|
||||
|
||||
// NewKeyPolicy returns a KeyPolicy that allows RSA, ECDSA256 and ECDSA384.
|
||||
// weakKeyFile contains the path to a JSON file containing truncated modulus
|
||||
// hashes of known weak RSA keys. If this argument is empty RSA modulus hash
|
||||
// checking will be disabled. blockedKeyFile contains the path to a YAML file
|
||||
// containing Base64 encoded SHA256 hashes of pkix subject public keys that
|
||||
// should be blocked. If this argument is empty then no blocked key checking is
|
||||
// performed.
|
||||
func NewKeyPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) {
|
||||
// NewPolicy returns a key policy based on the given configuration, with sane
|
||||
// defaults. If the config's AllowedKeys is nil, the LetsEncryptCPS AllowedKeys
|
||||
// is used. If the config's WeakKeyFile or BlockedKeyFile paths are empty, those
|
||||
// checks are disabled. If the config's FermatRounds is 0, Fermat Factorization
|
||||
// is disabled.
|
||||
func NewPolicy(config *Config, bkc BlockedKeyCheckFunc) (KeyPolicy, error) {
|
||||
if config == nil {
|
||||
config = &Config{}
|
||||
}
|
||||
kp := KeyPolicy{
|
||||
AllowRSA: true,
|
||||
AllowECDSANISTP256: true,
|
||||
AllowECDSANISTP384: true,
|
||||
blockedCheck: bkc,
|
||||
blockedCheck: bkc,
|
||||
}
|
||||
if config.AllowedKeys == nil {
|
||||
kp.allowedKeys = LetsEncryptCPS()
|
||||
} else {
|
||||
kp.allowedKeys = *config.AllowedKeys
|
||||
}
|
||||
if config.WeakKeyFile != "" {
|
||||
keyList, err := LoadWeakRSASuffixes(config.WeakKeyFile)
|
||||
|
@ -264,42 +302,28 @@ func (policy *KeyPolicy) goodCurve(c elliptic.Curve) (err error) {
|
|||
// Simply use a whitelist for now.
|
||||
params := c.Params()
|
||||
switch {
|
||||
case policy.AllowECDSANISTP256 && params == elliptic.P256().Params():
|
||||
case policy.allowedKeys.ECDSAP256 && params == elliptic.P256().Params():
|
||||
return nil
|
||||
case policy.AllowECDSANISTP384 && params == elliptic.P384().Params():
|
||||
case policy.allowedKeys.ECDSAP384 && params == elliptic.P384().Params():
|
||||
return nil
|
||||
case policy.allowedKeys.ECDSAP521 && params == elliptic.P521().Params():
|
||||
return nil
|
||||
default:
|
||||
return badKey("ECDSA curve %v not allowed", params.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Baseline Requirements, Section 6.1.5 requires key size >= 2048 and a multiple
|
||||
// of 8 bits: https://github.com/cabforum/servercert/blob/main/docs/BR.md#615-key-sizes
|
||||
// Baseline Requirements, Section 6.1.1.3 requires that we reject any keys which
|
||||
// have a known method to easily compute their private key, such as Debian Weak
|
||||
// Keys. Our enforcement mechanism relies on enumerating all Debian Weak Keys at
|
||||
// common key sizes, so we restrict all issuance to those common key sizes.
|
||||
var acceptableRSAKeySizes = map[int]bool{
|
||||
2048: true,
|
||||
3072: true,
|
||||
4096: true,
|
||||
}
|
||||
|
||||
// GoodKeyRSA determines if a RSA pubkey meets our requirements
|
||||
func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) {
|
||||
if !policy.AllowRSA {
|
||||
return badKey("RSA keys are not allowed")
|
||||
}
|
||||
if policy.weakRSAList != nil && policy.weakRSAList.Known(key) {
|
||||
return badKey("key is on a known weak RSA key list")
|
||||
}
|
||||
|
||||
func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) error {
|
||||
modulus := key.N
|
||||
|
||||
// See comment on acceptableRSAKeySizes above.
|
||||
modulusBitLen := modulus.BitLen()
|
||||
if !acceptableRSAKeySizes[modulusBitLen] {
|
||||
return badKey("key size not supported: %d", modulusBitLen)
|
||||
err := policy.goodRSABitLen(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if policy.weakRSAList != nil && policy.weakRSAList.Known(key) {
|
||||
return badKey("key is on a known weak RSA key list")
|
||||
}
|
||||
|
||||
// Rather than support arbitrary exponents, which significantly increases
|
||||
|
@ -341,6 +365,21 @@ func (policy *KeyPolicy) goodKeyRSA(key *rsa.PublicKey) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (policy *KeyPolicy) goodRSABitLen(key *rsa.PublicKey) error {
|
||||
// See comment on AllowedKeys above.
|
||||
modulusBitLen := key.N.BitLen()
|
||||
switch {
|
||||
case modulusBitLen == 2048 && policy.allowedKeys.RSA2048:
|
||||
return nil
|
||||
case modulusBitLen == 3072 && policy.allowedKeys.RSA3072:
|
||||
return nil
|
||||
case modulusBitLen == 4096 && policy.allowedKeys.RSA4096:
|
||||
return nil
|
||||
default:
|
||||
return badKey("key size not supported: %d", modulusBitLen)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true iff integer i is divisible by any of the primes in smallPrimes.
|
||||
//
|
||||
// Short circuits; execution time is dependent on i. Do not use this on secret
|
||||
|
@ -400,7 +439,7 @@ func checkPrimeFactorsTooClose(n *big.Int, rounds int) error {
|
|||
b2 := new(big.Int)
|
||||
b2.Mul(a, a).Sub(b2, n)
|
||||
|
||||
for i := 0; i < rounds; i++ {
|
||||
for range rounds {
|
||||
// To see if b2 is a perfect square, we take its square root, square that,
|
||||
// and check to see if we got the same result back.
|
||||
bb.Sqrt(b2).Mul(bb, bb)
|
||||
|
|
|
@ -69,69 +69,69 @@ type Extensions struct {
|
|||
// Deprecated
|
||||
// Triggering event of the Github Workflow. Matches the `event_name` claim of ID
|
||||
// tokens from Github Actions
|
||||
GithubWorkflowTrigger string // OID 1.3.6.1.4.1.57264.1.2
|
||||
GithubWorkflowTrigger string `json:"GithubWorkflowTrigger,omitempty" yaml:"github-workflow-trigger,omitempty"` // OID 1.3.6.1.4.1.57264.1.2
|
||||
|
||||
// Deprecated
|
||||
// SHA of git commit being built in Github Actions. Matches the `sha` claim of ID
|
||||
// tokens from Github Actions
|
||||
GithubWorkflowSHA string // OID 1.3.6.1.4.1.57264.1.3
|
||||
GithubWorkflowSHA string `json:"GithubWorkflowSHA,omitempty" yaml:"github-workflow-sha,omitempty"` // OID 1.3.6.1.4.1.57264.1.3
|
||||
|
||||
// Deprecated
|
||||
// Name of Github Actions Workflow. Matches the `workflow` claim of the ID
|
||||
// tokens from Github Actions
|
||||
GithubWorkflowName string // OID 1.3.6.1.4.1.57264.1.4
|
||||
GithubWorkflowName string `json:"GithubWorkflowName,omitempty" yaml:"github-workflow-name,omitempty"` // OID 1.3.6.1.4.1.57264.1.4
|
||||
|
||||
// Deprecated
|
||||
// Repository of the Github Actions Workflow. Matches the `repository` claim of the ID
|
||||
// tokens from Github Actions
|
||||
GithubWorkflowRepository string // OID 1.3.6.1.4.1.57264.1.5
|
||||
GithubWorkflowRepository string `json:"GithubWorkflowRepository,omitempty" yaml:"github-workflow-repository,omitempty"` // OID 1.3.6.1.4.1.57264.1.5
|
||||
|
||||
// Deprecated
|
||||
// Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens
|
||||
// from Github Actions
|
||||
GithubWorkflowRef string // 1.3.6.1.4.1.57264.1.6
|
||||
GithubWorkflowRef string `json:"GithubWorkflowRef,omitempty" yaml:"github-workflow-ref,omitempty"` // 1.3.6.1.4.1.57264.1.6
|
||||
|
||||
// Reference to specific build instructions that are responsible for signing.
|
||||
BuildSignerURI string // 1.3.6.1.4.1.57264.1.9
|
||||
BuildSignerURI string `json:"BuildSignerURI,omitempty" yaml:"build-signer-uri,omitempty"` // 1.3.6.1.4.1.57264.1.9
|
||||
|
||||
// Immutable reference to the specific version of the build instructions that is responsible for signing.
|
||||
BuildSignerDigest string // 1.3.6.1.4.1.57264.1.10
|
||||
BuildSignerDigest string `json:"BuildSignerDigest,omitempty" yaml:"build-signer-digest,omitempty"` // 1.3.6.1.4.1.57264.1.10
|
||||
|
||||
// Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure.
|
||||
RunnerEnvironment string // 1.3.6.1.4.1.57264.1.11
|
||||
RunnerEnvironment string `json:"RunnerEnvironment,omitempty" yaml:"runner-environment,omitempty"` // 1.3.6.1.4.1.57264.1.11
|
||||
|
||||
// Source repository URL that the build was based on.
|
||||
SourceRepositoryURI string // 1.3.6.1.4.1.57264.1.12
|
||||
SourceRepositoryURI string `json:"SourceRepositoryURI,omitempty" yaml:"source-repository-uri,omitempty"` // 1.3.6.1.4.1.57264.1.12
|
||||
|
||||
// Immutable reference to a specific version of the source code that the build was based upon.
|
||||
SourceRepositoryDigest string // 1.3.6.1.4.1.57264.1.13
|
||||
SourceRepositoryDigest string `json:"SourceRepositoryDigest,omitempty" yaml:"source-repository-digest,omitempty"` // 1.3.6.1.4.1.57264.1.13
|
||||
|
||||
// Source Repository Ref that the build run was based upon.
|
||||
SourceRepositoryRef string // 1.3.6.1.4.1.57264.1.14
|
||||
SourceRepositoryRef string `json:"SourceRepositoryRef,omitempty" yaml:"source-repository-ref,omitempty"` // 1.3.6.1.4.1.57264.1.14
|
||||
|
||||
// Immutable identifier for the source repository the workflow was based upon.
|
||||
SourceRepositoryIdentifier string // 1.3.6.1.4.1.57264.1.15
|
||||
SourceRepositoryIdentifier string `json:"SourceRepositoryIdentifier,omitempty" yaml:"source-repository-identifier,omitempty"` // 1.3.6.1.4.1.57264.1.15
|
||||
|
||||
// Source repository owner URL of the owner of the source repository that the build was based on.
|
||||
SourceRepositoryOwnerURI string // 1.3.6.1.4.1.57264.1.16
|
||||
SourceRepositoryOwnerURI string `json:"SourceRepositoryOwnerURI,omitempty" yaml:"source-repository-owner-uri,omitempty"` // 1.3.6.1.4.1.57264.1.16
|
||||
|
||||
// Immutable identifier for the owner of the source repository that the workflow was based upon.
|
||||
SourceRepositoryOwnerIdentifier string // 1.3.6.1.4.1.57264.1.17
|
||||
SourceRepositoryOwnerIdentifier string `json:"SourceRepositoryOwnerIdentifier,omitempty" yaml:"source-repository-owner-identifier,omitempty"` // 1.3.6.1.4.1.57264.1.17
|
||||
|
||||
// Build Config URL to the top-level/initiating build instructions.
|
||||
BuildConfigURI string // 1.3.6.1.4.1.57264.1.18
|
||||
BuildConfigURI string `json:"BuildConfigURI,omitempty" yaml:"build-config-uri,omitempty"` // 1.3.6.1.4.1.57264.1.18
|
||||
|
||||
// Immutable reference to the specific version of the top-level/initiating build instructions.
|
||||
BuildConfigDigest string // 1.3.6.1.4.1.57264.1.19
|
||||
BuildConfigDigest string `json:"BuildConfigDigest,omitempty" yaml:"build-config-digest,omitempty"` // 1.3.6.1.4.1.57264.1.19
|
||||
|
||||
// Event or action that initiated the build.
|
||||
BuildTrigger string // 1.3.6.1.4.1.57264.1.20
|
||||
BuildTrigger string `json:"BuildTrigger,omitempty" yaml:"build-trigger,omitempty"` // 1.3.6.1.4.1.57264.1.20
|
||||
|
||||
// Run Invocation URL to uniquely identify the build execution.
|
||||
RunInvocationURI string // 1.3.6.1.4.1.57264.1.21
|
||||
RunInvocationURI string `json:"RunInvocationURI,omitempty" yaml:"run-invocation-uri,omitempty"` // 1.3.6.1.4.1.57264.1.21
|
||||
|
||||
// Source repository visibility at the time of signing the certificate.
|
||||
SourceRepositoryVisibilityAtSigning string // 1.3.6.1.4.1.57264.1.22
|
||||
SourceRepositoryVisibilityAtSigning string `json:"SourceRepositoryVisibilityAtSigning,omitempty" yaml:"source-repository-visibility-at-signing,omitempty"` // 1.3.6.1.4.1.57264.1.22
|
||||
}
|
||||
|
||||
func (e Extensions) Render() ([]pkix.Extension, error) {
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1" // nolint:gosec
|
||||
"crypto/x509"
|
||||
|
@ -104,15 +103,15 @@ func EqualKeys(first, second crypto.PublicKey) error {
|
|||
switch pub := first.(type) {
|
||||
case *rsa.PublicKey:
|
||||
if !pub.Equal(second) {
|
||||
return fmt.Errorf(genErrMsg(first, second, "rsa"))
|
||||
return errors.New(genErrMsg(first, second, "rsa"))
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
if !pub.Equal(second) {
|
||||
return fmt.Errorf(genErrMsg(first, second, "ecdsa"))
|
||||
return errors.New(genErrMsg(first, second, "ecdsa"))
|
||||
}
|
||||
case ed25519.PublicKey:
|
||||
if !pub.Equal(second) {
|
||||
return fmt.Errorf(genErrMsg(first, second, "ed25519"))
|
||||
return errors.New(genErrMsg(first, second, "ed25519"))
|
||||
}
|
||||
default:
|
||||
return errors.New("unsupported key type")
|
||||
|
@ -137,47 +136,50 @@ func genErrMsg(first, second crypto.PublicKey, keyType string) string {
|
|||
|
||||
// ValidatePubKey validates the parameters of an RSA, ECDSA, or ED25519 public key.
|
||||
func ValidatePubKey(pub crypto.PublicKey) error {
|
||||
// goodkey policy enforces:
|
||||
// * RSA
|
||||
// * Size of key: 2048 <= size <= 4096, size % 8 = 0
|
||||
// * Exponent E = 65537 (Default exponent for OpenSSL and Golang)
|
||||
// * Small primes check for modulus
|
||||
// * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17)
|
||||
// * Key is easily factored with Fermat's factorization method
|
||||
// * EC
|
||||
// * Public key Q is not the identity element (Ø)
|
||||
// * Public key Q's x and y are within [0, p-1]
|
||||
// * Public key Q is on the curve
|
||||
// * Public key Q's order matches the subgroups (nQ = Ø)
|
||||
allowedKeys := &goodkey.AllowedKeys{
|
||||
RSA2048: true,
|
||||
RSA3072: true,
|
||||
RSA4096: true,
|
||||
ECDSAP256: true,
|
||||
ECDSAP384: true,
|
||||
ECDSAP521: true,
|
||||
}
|
||||
cfg := &goodkey.Config{
|
||||
FermatRounds: 100,
|
||||
AllowedKeys: allowedKeys,
|
||||
}
|
||||
p, err := goodkey.NewPolicy(cfg, nil)
|
||||
if err != nil {
|
||||
// Should not occur, only chances to return errors are if fermat rounds
|
||||
// are <0 or when loading blocked/weak keys from disk (not used here)
|
||||
return errors.New("unable to initialize key policy")
|
||||
}
|
||||
|
||||
switch pk := pub.(type) {
|
||||
case *rsa.PublicKey:
|
||||
// goodkey policy enforces:
|
||||
// * Size of key: 2048 <= size <= 4096, size % 8 = 0
|
||||
// * Exponent E = 65537 (Default exponent for OpenSSL and Golang)
|
||||
// * Small primes check for modulus
|
||||
// * Weak keys generated by Infineon hardware (see https://crocs.fi.muni.cz/public/papers/rsa_ccs17)
|
||||
// * Key is easily factored with Fermat's factorization method
|
||||
p, err := goodkey.NewKeyPolicy(&goodkey.Config{FermatRounds: 100}, nil)
|
||||
if err != nil {
|
||||
// Should not occur, only chances to return errors are if fermat rounds
|
||||
// are <0 or when loading blocked/weak keys from disk (not used here)
|
||||
return errors.New("unable to initialize key policy")
|
||||
}
|
||||
// ctx is unused
|
||||
return p.GoodKey(context.Background(), pub)
|
||||
case *ecdsa.PublicKey:
|
||||
// Unable to use goodkey policy because P-521 curve is not supported
|
||||
return validateEcdsaKey(pk)
|
||||
// ctx is unused
|
||||
return p.GoodKey(context.Background(), pub)
|
||||
case ed25519.PublicKey:
|
||||
return validateEd25519Key(pk)
|
||||
}
|
||||
return errors.New("unsupported public key type")
|
||||
}
|
||||
|
||||
// Enforce that the ECDSA key curve is one of:
|
||||
// * NIST P-256 (secp256r1, prime256v1)
|
||||
// * NIST P-384
|
||||
// * NIST P-521.
|
||||
// Other EC curves, like secp256k1, are not supported by Go.
|
||||
func validateEcdsaKey(pub *ecdsa.PublicKey) error {
|
||||
switch pub.Curve {
|
||||
case elliptic.P224():
|
||||
return fmt.Errorf("unsupported ec curve, expected NIST P-256, P-384, or P-521")
|
||||
case elliptic.P256(), elliptic.P384(), elliptic.P521():
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unexpected ec curve")
|
||||
}
|
||||
}
|
||||
|
||||
// No validations currently, ED25519 supports only one key size.
|
||||
func validateEd25519Key(_ ed25519.PublicKey) error {
|
||||
return nil
|
||||
|
|
|
@ -135,8 +135,9 @@ func (d *DeviceFlowTokenGetter) deviceFlow(p *oidc.Provider, clientID, redirectU
|
|||
// Some providers use a secret here, we don't need for sigstore oauth one so leave it off.
|
||||
data := url.Values{
|
||||
"grant_type": []string{"urn:ietf:params:oauth:grant-type:device_code"},
|
||||
"client_id": []string{clientID},
|
||||
"device_code": []string{parsed.DeviceCode},
|
||||
"scope": []string{"openid", "email"},
|
||||
"scope": []string{"openid email"},
|
||||
"code_verifier": []string{pkce.Value},
|
||||
}
|
||||
|
||||
|
|
|
@ -114,10 +114,24 @@ func OIDConnect(issuer, id, secret, redirectURL string, tg TokenGetter) (*OIDCID
|
|||
return tg.GetIDToken(provider, config)
|
||||
}
|
||||
|
||||
type stringAsBool bool
|
||||
|
||||
func (sb *stringAsBool) UnmarshalJSON(b []byte) error {
|
||||
switch string(b) {
|
||||
case "true", `"true"`, "True", `"True"`:
|
||||
*sb = true
|
||||
case "false", `"false"`, "False", `"False"`:
|
||||
*sb = false
|
||||
default:
|
||||
return errors.New("invalid value for boolean")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type claims struct {
|
||||
Email string `json:"email"`
|
||||
Verified bool `json:"email_verified"`
|
||||
Subject string `json:"sub"`
|
||||
Email string `json:"email"`
|
||||
Verified stringAsBool `json:"email_verified"`
|
||||
Subject string `json:"sub"`
|
||||
}
|
||||
|
||||
// SubjectFromToken extracts the subject claim from an OIDC Identity Token
|
||||
|
@ -129,6 +143,16 @@ func SubjectFromToken(tok *oidc.IDToken) (string, error) {
|
|||
return subjectFromClaims(claims)
|
||||
}
|
||||
|
||||
// SubjectFromUnverifiedToken extracts the subject claim from the raw bytes of
|
||||
// an OIDC identity token.
|
||||
func SubjectFromUnverifiedToken(tok []byte) (string, error) {
|
||||
claims := claims{}
|
||||
if err := json.Unmarshal(tok, &claims); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return subjectFromClaims(claims)
|
||||
}
|
||||
|
||||
func subjectFromClaims(c claims) (string, error) {
|
||||
if c.Email != "" {
|
||||
if !c.Verified {
|
||||
|
|
|
@ -12,33 +12,47 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
var errAlignmentOverflow = errors.New("integer overflow when calculating alignment")
|
||||
|
||||
// nextAligned finds the next offset that satisfies alignment.
|
||||
func nextAligned(offset int64, alignment int) int64 {
|
||||
func nextAligned(offset int64, alignment int) (int64, error) {
|
||||
align64 := uint64(alignment)
|
||||
offset64 := uint64(offset)
|
||||
|
||||
if align64 != 0 && offset64%align64 != 0 {
|
||||
offset64 = (offset64 & ^(align64 - 1)) + align64
|
||||
if align64 <= 0 || offset64%align64 == 0 {
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
return int64(offset64)
|
||||
offset64 += (align64 - offset64%align64)
|
||||
|
||||
if offset64 > math.MaxInt64 {
|
||||
return 0, errAlignmentOverflow
|
||||
}
|
||||
|
||||
//nolint:gosec // Overflow handled above.
|
||||
return int64(offset64), nil
|
||||
}
|
||||
|
||||
// writeDataObjectAt writes the data object described by di to ws, using time t, recording details
|
||||
// in d. The object is written at the first position that satisfies the alignment requirements
|
||||
// described by di following offsetUnaligned.
|
||||
func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll
|
||||
offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart)
|
||||
offset, err := nextAligned(offsetUnaligned, di.opts.alignment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := ws.Seek(offset, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := io.Copy(ws, di.r)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -72,6 +86,7 @@ func (f *FileImage) calculatedDataSize() int64 {
|
|||
var (
|
||||
errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image")
|
||||
errPrimaryPartition = errors.New("image already contains a primary partition")
|
||||
errObjectIDOverflow = errors.New("object ID would overflow")
|
||||
)
|
||||
|
||||
// writeDataObject writes the data object described by di to f, using time t, recording details in
|
||||
|
@ -81,6 +96,11 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro
|
|||
return errInsufficientCapacity
|
||||
}
|
||||
|
||||
// We derive the ID from i, so make sure the ID will not overflow.
|
||||
if int64(i) >= math.MaxUint32 {
|
||||
return errObjectIDOverflow
|
||||
}
|
||||
|
||||
// If this is a primary partition, verify there isn't another primary partition, and update the
|
||||
// architecture in the global header.
|
||||
if p, ok := di.opts.md.(partition); ok && p.Parttype == PartPrimSys {
|
||||
|
@ -92,7 +112,7 @@ func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) erro
|
|||
}
|
||||
|
||||
d := &f.rds[i]
|
||||
d.ID = uint32(i) + 1
|
||||
d.ID = uint32(i) + 1 //nolint:gosec // Overflow handled above.
|
||||
|
||||
f.h.DataSize = f.calculatedDataSize()
|
||||
|
||||
|
@ -213,8 +233,16 @@ func OptCreateWithCloseOnUnload(b bool) CreateOpt {
|
|||
}
|
||||
}
|
||||
|
||||
var errDescriptorCapacityNotSupported = errors.New("descriptor capacity not supported")
|
||||
|
||||
// createContainer creates a new SIF container file in rw, according to opts.
|
||||
func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) {
|
||||
// The supported number of descriptors is limited by the unsigned 32-bit ID field in each
|
||||
// rawDescriptor.
|
||||
if co.descriptorCapacity >= math.MaxUint32 {
|
||||
return nil, errDescriptorCapacityNotSupported
|
||||
}
|
||||
|
||||
rds := make([]rawDescriptor, co.descriptorCapacity)
|
||||
rdsSize := int64(binary.Size(rds))
|
||||
|
||||
|
|
|
@ -49,6 +49,13 @@ type Token struct {
|
|||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
|
||||
// ExpiresIn is the OAuth2 wire format "expires_in" field,
|
||||
// which specifies how many seconds later the token expires,
|
||||
// relative to an unknown time base approximately around "now".
|
||||
// It is the application's responsibility to populate
|
||||
// `Expiry` from `ExpiresIn` when required.
|
||||
ExpiresIn int64 `json:"expires_in,omitempty"`
|
||||
|
||||
// raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
raw interface{}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
|||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# dario.cat/mergo v1.0.0
|
||||
# dario.cat/mergo v1.0.1
|
||||
## explicit; go 1.13
|
||||
dario.cat/mergo
|
||||
# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161
|
||||
|
@ -244,8 +244,8 @@ github.com/containers/conmon/runner/config
|
|||
# github.com/containers/gvisor-tap-vsock v0.7.5
|
||||
## explicit; go 1.21
|
||||
github.com/containers/gvisor-tap-vsock/pkg/types
|
||||
# github.com/containers/image/v5 v5.32.1-0.20240806084436-e3e9287ca8e6
|
||||
## explicit; go 1.21.0
|
||||
# github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46
|
||||
## explicit; go 1.22.6
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
github.com/containers/image/v5/directory/explicitfilepath
|
||||
|
@ -406,7 +406,7 @@ github.com/containers/storage/types
|
|||
## explicit; go 1.19
|
||||
github.com/containers/winquit/pkg/winquit
|
||||
github.com/containers/winquit/pkg/winquit/win32
|
||||
# github.com/coreos/go-oidc/v3 v3.10.0
|
||||
# github.com/coreos/go-oidc/v3 v3.11.0
|
||||
## explicit; go 1.21
|
||||
github.com/coreos/go-oidc/v3/oidc
|
||||
# github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
|
||||
|
@ -551,7 +551,7 @@ github.com/gin-gonic/gin/render
|
|||
github.com/go-jose/go-jose/v3
|
||||
github.com/go-jose/go-jose/v3/cipher
|
||||
github.com/go-jose/go-jose/v3/json
|
||||
# github.com/go-jose/go-jose/v4 v4.0.2
|
||||
# github.com/go-jose/go-jose/v4 v4.0.4
|
||||
## explicit; go 1.21
|
||||
github.com/go-jose/go-jose/v4
|
||||
github.com/go-jose/go-jose/v4/cipher
|
||||
|
@ -655,7 +655,7 @@ github.com/google/go-cmp/cmp/internal/diff
|
|||
github.com/google/go-cmp/cmp/internal/flags
|
||||
github.com/google/go-cmp/cmp/internal/function
|
||||
github.com/google/go-cmp/cmp/internal/value
|
||||
# github.com/google/go-containerregistry v0.20.1
|
||||
# github.com/google/go-containerregistry v0.20.2
|
||||
## explicit; go 1.18
|
||||
github.com/google/go-containerregistry/pkg/name
|
||||
github.com/google/go-containerregistry/pkg/v1
|
||||
|
@ -740,8 +740,8 @@ github.com/kr/fs
|
|||
# github.com/leodido/go-urn v1.2.4
|
||||
## explicit; go 1.16
|
||||
github.com/leodido/go-urn
|
||||
# github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0
|
||||
## explicit; go 1.21
|
||||
# github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec
|
||||
## explicit; go 1.22.0
|
||||
github.com/letsencrypt/boulder/core
|
||||
github.com/letsencrypt/boulder/goodkey
|
||||
github.com/letsencrypt/boulder/identifier
|
||||
|
@ -992,8 +992,8 @@ github.com/shirou/gopsutil/v4/process
|
|||
# github.com/shoenig/go-m1cpu v0.1.6
|
||||
## explicit; go 1.20
|
||||
github.com/shoenig/go-m1cpu
|
||||
# github.com/sigstore/fulcio v1.4.5
|
||||
## explicit; go 1.21
|
||||
# github.com/sigstore/fulcio v1.6.4
|
||||
## explicit; go 1.22.6
|
||||
github.com/sigstore/fulcio/pkg/api
|
||||
github.com/sigstore/fulcio/pkg/certificate
|
||||
# github.com/sigstore/rekor v1.3.6
|
||||
|
@ -1006,8 +1006,8 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey
|
|||
github.com/sigstore/rekor/pkg/generated/client/tlog
|
||||
github.com/sigstore/rekor/pkg/generated/models
|
||||
github.com/sigstore/rekor/pkg/util
|
||||
# github.com/sigstore/sigstore v1.8.4
|
||||
## explicit; go 1.21
|
||||
# github.com/sigstore/sigstore v1.8.9
|
||||
## explicit; go 1.22.5
|
||||
github.com/sigstore/sigstore/pkg/cryptoutils
|
||||
github.com/sigstore/sigstore/pkg/oauth
|
||||
github.com/sigstore/sigstore/pkg/oauthflow
|
||||
|
@ -1034,8 +1034,8 @@ github.com/stefanberger/go-pkcs11uri
|
|||
## explicit; go 1.17
|
||||
github.com/stretchr/testify/assert
|
||||
github.com/stretchr/testify/require
|
||||
# github.com/sylabs/sif/v2 v2.18.0
|
||||
## explicit; go 1.21.0
|
||||
# github.com/sylabs/sif/v2 v2.19.1
|
||||
## explicit; go 1.22.5
|
||||
github.com/sylabs/sif/v2/pkg/sif
|
||||
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
## explicit
|
||||
|
@ -1212,7 +1212,7 @@ golang.org/x/net/internal/socks
|
|||
golang.org/x/net/internal/timeseries
|
||||
golang.org/x/net/proxy
|
||||
golang.org/x/net/trace
|
||||
# golang.org/x/oauth2 v0.22.0
|
||||
# golang.org/x/oauth2 v0.23.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/oauth2
|
||||
golang.org/x/oauth2/internal
|
||||
|
@ -1253,15 +1253,15 @@ golang.org/x/text/secure/bidirule
|
|||
golang.org/x/text/transform
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
# golang.org/x/time v0.5.0
|
||||
# golang.org/x/time v0.6.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/time/rate
|
||||
# golang.org/x/tools v0.24.0
|
||||
## explicit; go 1.19
|
||||
golang.org/x/tools/cover
|
||||
golang.org/x/tools/go/ast/inspector
|
||||
# google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094
|
||||
## explicit; go 1.20
|
||||
# google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c
|
||||
## explicit; go 1.21
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
# google.golang.org/grpc v1.65.0
|
||||
## explicit; go 1.21
|
||||
|
|
Loading…
Reference in New Issue