mirror of https://github.com/containers/podman.git
Add newer c/i to support artifacts
Adding in a newer containers/image to make artifact a little easier to merge. Signed-off-by: Brent Baude <bbaude@redhat.com>
This commit is contained in:
parent
2e1e7107b7
commit
121caa04ca
16
go.mod
16
go.mod
|
@ -3,7 +3,7 @@ module github.com/containers/podman/v5
|
||||||
// Warning: if there is a "toolchain" directive anywhere in this file (and most of the
|
// Warning: if there is a "toolchain" directive anywhere in this file (and most of the
|
||||||
// time there shouldn't be), its version must be an exact match to the "go" directive.
|
// time there shouldn't be), its version must be an exact match to the "go" directive.
|
||||||
|
|
||||||
go 1.22.6
|
go 1.22.8
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v1.4.0
|
github.com/BurntSushi/toml v1.4.0
|
||||||
|
@ -17,7 +17,7 @@ require (
|
||||||
github.com/containers/common v0.61.1-0.20250106142059-514bf04d8e6a
|
github.com/containers/common v0.61.1-0.20250106142059-514bf04d8e6a
|
||||||
github.com/containers/conmon v2.0.20+incompatible
|
github.com/containers/conmon v2.0.20+incompatible
|
||||||
github.com/containers/gvisor-tap-vsock v0.8.1
|
github.com/containers/gvisor-tap-vsock v0.8.1
|
||||||
github.com/containers/image/v5 v5.33.0
|
github.com/containers/image/v5 v5.33.1-0.20250107140133-43c2a741242f
|
||||||
github.com/containers/libhvee v0.9.0
|
github.com/containers/libhvee v0.9.0
|
||||||
github.com/containers/ocicrypt v1.2.1
|
github.com/containers/ocicrypt v1.2.1
|
||||||
github.com/containers/psgo v1.9.0
|
github.com/containers/psgo v1.9.0
|
||||||
|
@ -104,7 +104,7 @@ require (
|
||||||
github.com/containerd/log v0.1.0 // indirect
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
github.com/containerd/platforms v0.2.1 // indirect
|
github.com/containerd/platforms v0.2.1 // indirect
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
||||||
github.com/containerd/typeurl/v2 v2.2.0 // indirect
|
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||||
github.com/containernetworking/cni v1.2.3 // indirect
|
github.com/containernetworking/cni v1.2.3 // indirect
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||||
github.com/containers/luksy v0.0.0-20241007190014-e2530d691420 // indirect
|
github.com/containers/luksy v0.0.0-20241007190014-e2530d691420 // indirect
|
||||||
|
@ -189,19 +189,19 @@ require (
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
github.com/proglottis/gpgme v0.1.4 // indirect
|
||||||
github.com/rivo/uniseg v0.4.7 // indirect
|
github.com/rivo/uniseg v0.4.7 // indirect
|
||||||
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
|
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
|
||||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
|
||||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||||
github.com/sigstore/fulcio v1.6.4 // indirect
|
github.com/sigstore/fulcio v1.6.4 // indirect
|
||||||
github.com/sigstore/rekor v1.3.6 // indirect
|
github.com/sigstore/rekor v1.3.6 // indirect
|
||||||
github.com/sigstore/sigstore v1.8.9 // indirect
|
github.com/sigstore/sigstore v1.8.11 // indirect
|
||||||
github.com/skeema/knownhosts v1.3.0 // indirect
|
github.com/skeema/knownhosts v1.3.0 // indirect
|
||||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||||
github.com/smallstep/pkcs7 v0.1.1 // indirect
|
github.com/smallstep/pkcs7 v0.1.1 // indirect
|
||||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
|
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
|
||||||
github.com/sylabs/sif/v2 v2.19.1 // indirect
|
github.com/sylabs/sif/v2 v2.20.2 // indirect
|
||||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
|
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
|
||||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||||
|
@ -222,7 +222,7 @@ require (
|
||||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||||
golang.org/x/arch v0.8.0 // indirect
|
golang.org/x/arch v0.8.0 // indirect
|
||||||
golang.org/x/mod v0.22.0 // indirect
|
golang.org/x/mod v0.22.0 // indirect
|
||||||
golang.org/x/oauth2 v0.23.0 // indirect
|
golang.org/x/oauth2 v0.25.0 // indirect
|
||||||
golang.org/x/time v0.6.0 // indirect
|
golang.org/x/time v0.6.0 // indirect
|
||||||
golang.org/x/tools v0.28.0 // indirect
|
golang.org/x/tools v0.28.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||||
|
|
32
go.sum
32
go.sum
|
@ -70,8 +70,8 @@ github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpS
|
||||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||||
github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
|
github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
|
||||||
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
|
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
|
||||||
github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM=
|
github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM=
|
||||||
github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
|
github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M=
|
||||||
github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ=
|
github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ=
|
||||||
|
@ -84,8 +84,8 @@ github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6J
|
||||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||||
github.com/containers/gvisor-tap-vsock v0.8.1 h1:88qkOjGMF9NmyoVG/orUw73mdwj3z4aOwEbRS01hF78=
|
github.com/containers/gvisor-tap-vsock v0.8.1 h1:88qkOjGMF9NmyoVG/orUw73mdwj3z4aOwEbRS01hF78=
|
||||||
github.com/containers/gvisor-tap-vsock v0.8.1/go.mod h1:gjdY4JBWnynrXsxT8+OM7peEOd4FCZpoOWjSadHva0g=
|
github.com/containers/gvisor-tap-vsock v0.8.1/go.mod h1:gjdY4JBWnynrXsxT8+OM7peEOd4FCZpoOWjSadHva0g=
|
||||||
github.com/containers/image/v5 v5.33.0 h1:6oPEFwTurf7pDTGw7TghqGs8K0+OvPtY/UyzU0B2DfE=
|
github.com/containers/image/v5 v5.33.1-0.20250107140133-43c2a741242f h1:QbsNC5PejcUiq01uksekZ2cqCUhhQQa0oB9LMk/eXPk=
|
||||||
github.com/containers/image/v5 v5.33.0/go.mod h1:T7HpASmvnp2H1u4cyckMvCzLuYgpD18dSmabSw0AcHk=
|
github.com/containers/image/v5 v5.33.1-0.20250107140133-43c2a741242f/go.mod h1:aUBwvcAgHNVsrU1uoei3H+RNAtJVnz65GRKAPUk5a0g=
|
||||||
github.com/containers/libhvee v0.9.0 h1:5UxJMka1lDfxTeITA25Pd8QVVttJAG43eQS1Getw1tc=
|
github.com/containers/libhvee v0.9.0 h1:5UxJMka1lDfxTeITA25Pd8QVVttJAG43eQS1Getw1tc=
|
||||||
github.com/containers/libhvee v0.9.0/go.mod h1:p44VJd8jMIx3SRN1eM6PxfCEwXQE0lJ0dQppCAlzjPQ=
|
github.com/containers/libhvee v0.9.0/go.mod h1:p44VJd8jMIx3SRN1eM6PxfCEwXQE0lJ0dQppCAlzjPQ=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||||
|
@ -129,8 +129,8 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
|
||||||
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
|
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
|
||||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||||
github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
|
github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI=
|
||||||
github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4=
|
github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4=
|
||||||
|
@ -424,8 +424,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||||
github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
|
github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M=
|
||||||
github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
|
github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM=
|
||||||
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
||||||
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
@ -447,8 +447,8 @@ github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEW
|
||||||
github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||||
github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
|
github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
|
||||||
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
|
github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc=
|
||||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
|
github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw=
|
||||||
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
||||||
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||||
|
@ -459,8 +459,8 @@ github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY
|
||||||
github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
|
github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
|
||||||
github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
|
github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
|
||||||
github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
|
github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
|
||||||
github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk=
|
github.com/sigstore/sigstore v1.8.11 h1:tEqeQqbT+awtM87ec9KEeSUxT/AFvJNawneYJyAkFrQ=
|
||||||
github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w=
|
github.com/sigstore/sigstore v1.8.11/go.mod h1:fdrFQosxCQ4wTL5H1NrZcQkqQ72AQbPjtpcL2QOGKV0=
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY=
|
github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY=
|
||||||
|
@ -489,8 +489,8 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
|
||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0=
|
github.com/sylabs/sif/v2 v2.20.2 h1:HGEPzauCHhIosw5o6xmT3jczuKEuaFzSfdjAsH33vYw=
|
||||||
github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E=
|
github.com/sylabs/sif/v2 v2.20.2/go.mod h1:WyYryGRaR4Wp21SAymm5pK0p45qzZCSRiZMFvUZiuhc=
|
||||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
||||||
|
@ -617,8 +617,8 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
|
|
@ -18,3 +18,9 @@ As a containerd sub-project, you will find the:
|
||||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||||
|
|
||||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||||
|
|
||||||
|
## Optional
|
||||||
|
|
||||||
|
By default, support for gogoproto is available along side the standard Google
|
||||||
|
protobuf types.
|
||||||
|
You can choose to leave gogo support out by using the `!no_gogo` build tag.
|
||||||
|
|
|
@ -24,7 +24,6 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
gogoproto "github.com/gogo/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
"google.golang.org/protobuf/types/known/anypb"
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
|
@ -33,8 +32,16 @@ import (
|
||||||
var (
|
var (
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
registry = make(map[reflect.Type]string)
|
registry = make(map[reflect.Type]string)
|
||||||
|
handlers []handler
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type handler interface {
|
||||||
|
Marshaller(interface{}) func() ([]byte, error)
|
||||||
|
Unmarshaller(interface{}) func([]byte) error
|
||||||
|
TypeURL(interface{}) string
|
||||||
|
GetType(url string) (reflect.Type, bool)
|
||||||
|
}
|
||||||
|
|
||||||
// Definitions of common error types used throughout typeurl.
|
// Definitions of common error types used throughout typeurl.
|
||||||
//
|
//
|
||||||
// These error types are used with errors.Wrap and errors.Wrapf to add context
|
// These error types are used with errors.Wrap and errors.Wrapf to add context
|
||||||
|
@ -112,9 +119,12 @@ func TypeURL(v interface{}) (string, error) {
|
||||||
switch t := v.(type) {
|
switch t := v.(type) {
|
||||||
case proto.Message:
|
case proto.Message:
|
||||||
return string(t.ProtoReflect().Descriptor().FullName()), nil
|
return string(t.ProtoReflect().Descriptor().FullName()), nil
|
||||||
case gogoproto.Message:
|
|
||||||
return gogoproto.MessageName(t), nil
|
|
||||||
default:
|
default:
|
||||||
|
for _, h := range handlers {
|
||||||
|
if u := h.TypeURL(v); u != "" {
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound)
|
return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -149,13 +159,20 @@ func MarshalAny(v interface{}) (Any, error) {
|
||||||
marshal = func(v interface{}) ([]byte, error) {
|
marshal = func(v interface{}) ([]byte, error) {
|
||||||
return proto.Marshal(t)
|
return proto.Marshal(t)
|
||||||
}
|
}
|
||||||
case gogoproto.Message:
|
|
||||||
marshal = func(v interface{}) ([]byte, error) {
|
|
||||||
return gogoproto.Marshal(t)
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
|
for _, h := range handlers {
|
||||||
|
if m := h.Marshaller(v); m != nil {
|
||||||
|
marshal = func(v interface{}) ([]byte, error) {
|
||||||
|
return m()
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if marshal == nil {
|
||||||
marshal = json.Marshal
|
marshal = json.Marshal
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
url, err := TypeURL(v)
|
url, err := TypeURL(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -223,13 +240,13 @@ func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
||||||
t, err := getTypeByUrl(typeURL)
|
t, isProto, err := getTypeByUrl(typeURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if v == nil {
|
if v == nil {
|
||||||
v = reflect.New(t.t).Interface()
|
v = reflect.New(t).Interface()
|
||||||
} else {
|
} else {
|
||||||
// Validate interface type provided by client
|
// Validate interface type provided by client
|
||||||
vURL, err := TypeURL(v)
|
vURL, err := TypeURL(v)
|
||||||
|
@ -241,51 +258,45 @@ func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.isProto {
|
if isProto {
|
||||||
switch t := v.(type) {
|
pm, ok := v.(proto.Message)
|
||||||
case proto.Message:
|
if ok {
|
||||||
err = proto.Unmarshal(value, t)
|
return v, proto.Unmarshal(value, pm)
|
||||||
case gogoproto.Message:
|
|
||||||
err = gogoproto.Unmarshal(value, t)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
err = json.Unmarshal(value, v)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return v, err
|
for _, h := range handlers {
|
||||||
|
if unmarshal := h.Unmarshaller(v); unmarshal != nil {
|
||||||
|
return v, unmarshal(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type urlType struct {
|
// fallback to json unmarshaller
|
||||||
t reflect.Type
|
return v, json.Unmarshal(value, v)
|
||||||
isProto bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTypeByUrl(url string) (urlType, error) {
|
func getTypeByUrl(url string) (_ reflect.Type, isProto bool, _ error) {
|
||||||
mu.RLock()
|
mu.RLock()
|
||||||
for t, u := range registry {
|
for t, u := range registry {
|
||||||
if u == url {
|
if u == url {
|
||||||
mu.RUnlock()
|
mu.RUnlock()
|
||||||
return urlType{
|
return t, false, nil
|
||||||
t: t,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mu.RUnlock()
|
mu.RUnlock()
|
||||||
// fallback to proto registry
|
|
||||||
t := gogoproto.MessageType(url)
|
|
||||||
if t != nil {
|
|
||||||
return urlType{
|
|
||||||
// get the underlying Elem because proto returns a pointer to the type
|
|
||||||
t: t.Elem(),
|
|
||||||
isProto: true,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return urlType{}, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
|
if errors.Is(err, protoregistry.NotFound) {
|
||||||
|
for _, h := range handlers {
|
||||||
|
if t, isProto := h.GetType(url); t != nil {
|
||||||
|
return t, isProto, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
|
||||||
}
|
}
|
||||||
empty := mt.New().Interface()
|
empty := mt.New().Interface()
|
||||||
return urlType{t: reflect.TypeOf(empty).Elem(), isProto: true}, nil
|
return reflect.TypeOf(empty).Elem(), true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryDereference(v interface{}) reflect.Type {
|
func tryDereference(v interface{}) reflect.Type {
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
//go:build !no_gogo
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package typeurl
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
gogoproto "github.com/gogo/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
handlers = append(handlers, gogoHandler{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type gogoHandler struct{}
|
||||||
|
|
||||||
|
func (gogoHandler) Marshaller(v interface{}) func() ([]byte, error) {
|
||||||
|
pm, ok := v.(gogoproto.Message)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return func() ([]byte, error) {
|
||||||
|
return gogoproto.Marshal(pm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gogoHandler) Unmarshaller(v interface{}) func([]byte) error {
|
||||||
|
pm, ok := v.(gogoproto.Message)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(dt []byte) error {
|
||||||
|
return gogoproto.Unmarshal(dt, pm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gogoHandler) TypeURL(v interface{}) string {
|
||||||
|
pm, ok := v.(gogoproto.Message)
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return gogoproto.MessageName(pm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gogoHandler) GetType(url string) (reflect.Type, bool) {
|
||||||
|
t := gogoproto.MessageType(url)
|
||||||
|
if t == nil {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return t.Elem(), true
|
||||||
|
}
|
|
@ -109,7 +109,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||||
return copySingleImageResult{}, err
|
return copySingleImageResult{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,12 +316,15 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
|
// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary.
|
||||||
func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
|
func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error {
|
||||||
|
ociConfig, configErr := src.OCIConfig(ctx)
|
||||||
|
// Do not fail on configErr here, this might be an artifact
|
||||||
|
// and maybe nothing needs this to be a container image and to process the config.
|
||||||
|
|
||||||
if dest.MustMatchRuntimeOS() {
|
if dest.MustMatchRuntimeOS() {
|
||||||
c, err := src.OCIConfig(ctx)
|
if configErr != nil {
|
||||||
if err != nil {
|
return fmt.Errorf("parsing image configuration: %w", configErr)
|
||||||
return fmt.Errorf("parsing image configuration: %w", err)
|
|
||||||
}
|
}
|
||||||
wantedPlatforms := platform.WantedPlatforms(sys)
|
wantedPlatforms := platform.WantedPlatforms(sys)
|
||||||
|
|
||||||
|
@ -331,7 +334,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||||
// For a transitional period, this might trigger warnings because the Variant
|
// For a transitional period, this might trigger warnings because the Variant
|
||||||
// field was added to OCI config only recently. If this turns out to be too noisy,
|
// field was added to OCI config only recently. If this turns out to be too noisy,
|
||||||
// revert this check to only look for (OS, Architecture).
|
// revert this check to only look for (OS, Architecture).
|
||||||
if platform.MatchesPlatform(c.Platform, wantedPlatform) {
|
if platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) {
|
||||||
match = true
|
match = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -339,9 +342,14 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||||
}
|
}
|
||||||
if !match {
|
if !match {
|
||||||
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
|
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
|
||||||
c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", "))
|
ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", "))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,7 @@ var ErrNotContainerImageDir = errors.New("not a containers image directory, don'
|
||||||
type dirImageDestination struct {
|
type dirImageDestination struct {
|
||||||
impl.Compat
|
impl.Compat
|
||||||
impl.PropertyMethodsInitialize
|
impl.PropertyMethodsInitialize
|
||||||
|
stubs.IgnoresOriginalOCIConfig
|
||||||
stubs.NoPutBlobPartialInitialize
|
stubs.NoPutBlobPartialInitialize
|
||||||
stubs.AlwaysSupportsSignatures
|
stubs.AlwaysSupportsSignatures
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package daemon
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
|
@ -47,6 +48,7 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
|
||||||
}
|
}
|
||||||
switch serverURL.Scheme {
|
switch serverURL.Scheme {
|
||||||
case "unix": // Nothing
|
case "unix": // Nothing
|
||||||
|
case "npipe": // Nothing
|
||||||
case "http":
|
case "http":
|
||||||
hc := httpConfig()
|
hc := httpConfig()
|
||||||
opts = append(opts, dockerclient.WithHTTPClient(hc))
|
opts = append(opts, dockerclient.WithHTTPClient(hc))
|
||||||
|
@ -82,6 +84,11 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
|
||||||
Transport: &http.Transport{
|
Transport: &http.Transport{
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
TLSClientConfig: tlsc,
|
TLSClientConfig: tlsc,
|
||||||
|
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||||
|
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||||
|
// we include the same values purely for symmetry.
|
||||||
|
MaxIdleConns: 6,
|
||||||
|
IdleConnTimeout: 30 * time.Second,
|
||||||
},
|
},
|
||||||
CheckRedirect: dockerclient.CheckRedirect,
|
CheckRedirect: dockerclient.CheckRedirect,
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -92,6 +99,11 @@ func httpConfig() *http.Client {
|
||||||
Transport: &http.Transport{
|
Transport: &http.Transport{
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
TLSClientConfig: nil,
|
TLSClientConfig: nil,
|
||||||
|
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||||
|
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||||
|
// we include the same values purely for symmetry.
|
||||||
|
MaxIdleConns: 6,
|
||||||
|
IdleConnTimeout: 30 * time.Second,
|
||||||
},
|
},
|
||||||
CheckRedirect: dockerclient.CheckRedirect,
|
CheckRedirect: dockerclient.CheckRedirect,
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,6 @@ import (
|
||||||
"slices"
|
"slices"
|
||||||
|
|
||||||
"github.com/docker/distribution/registry/api/errcode"
|
"github.com/docker/distribution/registry/api/errcode"
|
||||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||||
|
@ -114,10 +113,11 @@ func mergeErrors(err1, err2 error) error {
|
||||||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
// UnexpectedHTTPStatusError returned for response code outside of expected
|
||||||
// range.
|
// range.
|
||||||
func handleErrorResponse(resp *http.Response) error {
|
func handleErrorResponse(resp *http.Response) error {
|
||||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
switch {
|
||||||
|
case resp.StatusCode == http.StatusUnauthorized:
|
||||||
// Check for OAuth errors within the `WWW-Authenticate` header first
|
// Check for OAuth errors within the `WWW-Authenticate` header first
|
||||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||||
for _, c := range dockerChallenge.ResponseChallenges(resp) {
|
for _, c := range parseAuthHeader(resp.Header) {
|
||||||
if c.Scheme == "bearer" {
|
if c.Scheme == "bearer" {
|
||||||
var err errcode.Error
|
var err errcode.Error
|
||||||
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
||||||
|
@ -138,6 +138,8 @@ func handleErrorResponse(resp *http.Response) error {
|
||||||
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
|
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
fallthrough
|
||||||
|
case resp.StatusCode >= 400 && resp.StatusCode < 500:
|
||||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
||||||
if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
||||||
|
|
|
@ -41,6 +41,7 @@ import (
|
||||||
type dockerImageDestination struct {
|
type dockerImageDestination struct {
|
||||||
impl.Compat
|
impl.Compat
|
||||||
impl.PropertyMethodsInitialize
|
impl.PropertyMethodsInitialize
|
||||||
|
stubs.IgnoresOriginalOCIConfig
|
||||||
stubs.NoPutBlobPartialInitialize
|
stubs.NoPutBlobPartialInitialize
|
||||||
|
|
||||||
ref dockerReference
|
ref dockerReference
|
||||||
|
|
|
@ -340,6 +340,10 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if parts >= len(chunks) {
|
||||||
|
errs <- errors.New("too many parts returned by the server")
|
||||||
|
break
|
||||||
|
}
|
||||||
s := signalCloseReader{
|
s := signalCloseReader{
|
||||||
closed: make(chan struct{}),
|
closed: make(chan struct{}),
|
||||||
stream: p,
|
stream: p,
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
type Destination struct {
|
type Destination struct {
|
||||||
impl.Compat
|
impl.Compat
|
||||||
impl.PropertyMethodsInitialize
|
impl.PropertyMethodsInitialize
|
||||||
|
stubs.IgnoresOriginalOCIConfig
|
||||||
stubs.NoPutBlobPartialInitialize
|
stubs.NoPutBlobPartialInitialize
|
||||||
stubs.NoSignaturesInitialize
|
stubs.NoSignaturesInitialize
|
||||||
|
|
||||||
|
|
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
package stubs
|
||||||
|
|
||||||
|
import (
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing.
|
||||||
|
type IgnoresOriginalOCIConfig struct{}
|
||||||
|
|
||||||
|
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||||
|
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||||
|
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||||
|
// (otherwise it only obtains the final config after all layers are written).
|
||||||
|
func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -14,6 +14,7 @@ import (
|
||||||
// wrapped provides the private.ImageDestination operations
|
// wrapped provides the private.ImageDestination operations
|
||||||
// for a destination that only implements types.ImageDestination
|
// for a destination that only implements types.ImageDestination
|
||||||
type wrapped struct {
|
type wrapped struct {
|
||||||
|
stubs.IgnoresOriginalOCIConfig
|
||||||
stubs.NoPutBlobPartialInitialize
|
stubs.NoPutBlobPartialInitialize
|
||||||
|
|
||||||
types.ImageDestination
|
types.ImageDestination
|
||||||
|
|
32
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
32
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
|
@ -74,20 +74,20 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat
|
||||||
|
|
||||||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||||
// which the list catalogs.
|
// which the list catalogs.
|
||||||
func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||||
editInstances := []ListEdit{}
|
editInstances := []ListEdit{}
|
||||||
for i, instance := range updates {
|
for i, instance := range updates {
|
||||||
editInstances = append(editInstances, ListEdit{
|
editInstances = append(editInstances, ListEdit{
|
||||||
UpdateOldDigest: index.Manifests[i].Digest,
|
UpdateOldDigest: list.Manifests[i].Digest,
|
||||||
UpdateDigest: instance.Digest,
|
UpdateDigest: instance.Digest,
|
||||||
UpdateSize: instance.Size,
|
UpdateSize: instance.Size,
|
||||||
UpdateMediaType: instance.MediaType,
|
UpdateMediaType: instance.MediaType,
|
||||||
ListOperation: ListOpUpdate})
|
ListOperation: ListOpUpdate})
|
||||||
}
|
}
|
||||||
return index.editInstances(editInstances)
|
return list.editInstances(editInstances)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||||
addedEntries := []Schema2ManifestDescriptor{}
|
addedEntries := []Schema2ManifestDescriptor{}
|
||||||
for i, editInstance := range editInstances {
|
for i, editInstance := range editInstances {
|
||||||
switch editInstance.ListOperation {
|
switch editInstance.ListOperation {
|
||||||
|
@ -98,21 +98,21 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||||
if err := editInstance.UpdateDigest.Validate(); err != nil {
|
if err := editInstance.UpdateDigest.Validate(); err != nil {
|
||||||
return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
|
return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
|
||||||
}
|
}
|
||||||
targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
|
targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||||
return m.Digest == editInstance.UpdateOldDigest
|
return m.Digest == editInstance.UpdateOldDigest
|
||||||
})
|
})
|
||||||
if targetIndex == -1 {
|
if targetIndex == -1 {
|
||||||
return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
|
return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
|
||||||
}
|
}
|
||||||
index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
list.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||||
if editInstance.UpdateSize < 0 {
|
if editInstance.UpdateSize < 0 {
|
||||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
|
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
|
||||||
}
|
}
|
||||||
index.Manifests[targetIndex].Size = editInstance.UpdateSize
|
list.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||||
if editInstance.UpdateMediaType == "" {
|
if editInstance.UpdateMediaType == "" {
|
||||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
|
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType)
|
||||||
}
|
}
|
||||||
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||||
case ListOpAdd:
|
case ListOpAdd:
|
||||||
if editInstance.AddPlatform == nil {
|
if editInstance.AddPlatform == nil {
|
||||||
// Should we create a struct with empty fields instead?
|
// Should we create a struct with empty fields instead?
|
||||||
|
@ -135,13 +135,13 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||||
if len(addedEntries) != 0 {
|
if len(addedEntries) != 0 {
|
||||||
// slices.Clone() here to ensure a private backing array;
|
// slices.Clone() here to ensure a private backing array;
|
||||||
// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
|
// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
|
||||||
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
|
list.Manifests = append(slices.Clone(list.Manifests), addedEntries...)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
|
func (list *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||||
return index.editInstances(editInstances)
|
return list.editInstances(editInstances)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||||
|
@ -280,12 +280,12 @@ func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
|
||||||
return &Schema2List{*public}
|
return &Schema2List{*public}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (index *Schema2List) CloneInternal() List {
|
func (list *Schema2List) CloneInternal() List {
|
||||||
return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
|
return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (index *Schema2List) Clone() ListPublic {
|
func (list *Schema2List) Clone() ListPublic {
|
||||||
return index.CloneInternal()
|
return list.CloneInternal()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
|
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ImageSourceInternalOnly is the part of private.ImageSource that is not
|
// ImageSourceInternalOnly is the part of private.ImageSource that is not
|
||||||
|
@ -41,6 +42,12 @@ type ImageDestinationInternalOnly interface {
|
||||||
// FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
|
// FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
|
||||||
// on unsupported formats.
|
// on unsupported formats.
|
||||||
|
|
||||||
|
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||||
|
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||||
|
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||||
|
// (otherwise it only obtains the final config after all layers are written).
|
||||||
|
NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error
|
||||||
|
|
||||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||||
// inputInfo.Size is the expected length of stream, if known.
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
|
|
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package reflink
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||||
|
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||||
|
func LinkOrCopy(src, dst *os.File) error {
|
||||||
|
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dst.Fd(), unix.FICLONE, src.Fd())
|
||||||
|
if errno == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := io.Copy(dst, src)
|
||||||
|
return err
|
||||||
|
}
|
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
package reflink
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||||
|
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||||
|
func LinkOrCopy(src, dst *os.File) error {
|
||||||
|
_, err := io.Copy(dst, src)
|
||||||
|
return err
|
||||||
|
}
|
|
@ -14,6 +14,7 @@ import (
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -103,6 +104,14 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
|
||||||
return d.unpackedDest.SupportsPutBlobPartial()
|
return d.unpackedDest.SupportsPutBlobPartial()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||||
|
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||||
|
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||||
|
// (otherwise it only obtains the final config after all layers are written).
|
||||||
|
func (d *ociArchiveImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||||
|
return d.unpackedDest.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||||
|
}
|
||||||
|
|
||||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||||
// inputInfo.Size is the expected length of stream, if known.
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -98,7 +99,7 @@ func ValidateScope(scope string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateScopeWindows(scope string) error {
|
func validateScopeWindows(scope string) error {
|
||||||
matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
|
matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope)
|
||||||
if !matched {
|
if !matched {
|
||||||
return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
|
return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
|
||||||
}
|
}
|
||||||
|
@ -119,3 +120,31 @@ func validateScopeNonWindows(scope string) error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseOCIReferenceName parses the image from the oci reference.
|
||||||
|
func parseOCIReferenceName(image string) (img string, index int, err error) {
|
||||||
|
index = -1
|
||||||
|
if strings.HasPrefix(image, "@") {
|
||||||
|
idx, err := strconv.Atoi(image[1:])
|
||||||
|
if err != nil {
|
||||||
|
return "", index, fmt.Errorf("Invalid source index @%s: not an integer: %w", image[1:], err)
|
||||||
|
}
|
||||||
|
if idx < 0 {
|
||||||
|
return "", index, fmt.Errorf("Invalid source index @%d: must not be negative", idx)
|
||||||
|
}
|
||||||
|
index = idx
|
||||||
|
} else {
|
||||||
|
img = image
|
||||||
|
}
|
||||||
|
return img, index, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseReferenceIntoElements splits the oci reference into location, image name and source index if exists
|
||||||
|
func ParseReferenceIntoElements(reference string) (string, string, int, error) {
|
||||||
|
dir, image := SplitPathAndImage(reference)
|
||||||
|
image, index, err := parseOCIReferenceName(image)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", -1, err
|
||||||
|
}
|
||||||
|
return dir, image, index, nil
|
||||||
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"github.com/containers/image/v5/internal/manifest"
|
"github.com/containers/image/v5/internal/manifest"
|
||||||
"github.com/containers/image/v5/internal/private"
|
"github.com/containers/image/v5/internal/private"
|
||||||
"github.com/containers/image/v5/internal/putblobdigest"
|
"github.com/containers/image/v5/internal/putblobdigest"
|
||||||
|
"github.com/containers/image/v5/internal/reflink"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/containers/storage/pkg/fileutils"
|
"github.com/containers/storage/pkg/fileutils"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
@ -27,6 +28,7 @@ import (
|
||||||
type ociImageDestination struct {
|
type ociImageDestination struct {
|
||||||
impl.Compat
|
impl.Compat
|
||||||
impl.PropertyMethodsInitialize
|
impl.PropertyMethodsInitialize
|
||||||
|
stubs.IgnoresOriginalOCIConfig
|
||||||
stubs.NoPutBlobPartialInitialize
|
stubs.NoPutBlobPartialInitialize
|
||||||
stubs.NoSignaturesInitialize
|
stubs.NoSignaturesInitialize
|
||||||
|
|
||||||
|
@ -37,6 +39,9 @@ type ociImageDestination struct {
|
||||||
|
|
||||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||||
func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) {
|
func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) {
|
||||||
|
if ref.sourceIndex != -1 {
|
||||||
|
return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
|
||||||
|
}
|
||||||
var index *imgspecv1.Index
|
var index *imgspecv1.Index
|
||||||
if indexExists(ref) {
|
if indexExists(ref) {
|
||||||
var err error
|
var err error
|
||||||
|
@ -137,9 +142,21 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
||||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||||
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
||||||
}
|
}
|
||||||
if err := blobFile.Sync(); err != nil {
|
|
||||||
|
if err := d.blobFileSyncAndRename(blobFile, blobDigest, &explicitClosed); err != nil {
|
||||||
return private.UploadedBlob{}, err
|
return private.UploadedBlob{}, err
|
||||||
}
|
}
|
||||||
|
succeeded = true
|
||||||
|
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// blobFileSyncAndRename syncs the specified blobFile on the filesystem and renames it to the
|
||||||
|
// specific blob path determined by the blobDigest. The closed pointer indicates to the caller
|
||||||
|
// whether blobFile has been closed or not.
|
||||||
|
func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDigest digest.Digest, closed *bool) error {
|
||||||
|
if err := blobFile.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
||||||
// On Windows, the “permissions of newly created files” argument to syscall.Open is
|
// On Windows, the “permissions of newly created files” argument to syscall.Open is
|
||||||
|
@ -147,26 +164,27 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
||||||
// always fails on Windows.
|
// always fails on Windows.
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
if err := blobFile.Chmod(0644); err != nil {
|
if err := blobFile.Chmod(0644); err != nil {
|
||||||
return private.UploadedBlob{}, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
|
blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return private.UploadedBlob{}, err
|
return err
|
||||||
}
|
}
|
||||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||||
return private.UploadedBlob{}, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// need to explicitly close the file, since a rename won't otherwise not work on Windows
|
// need to explicitly close the file, since a rename won't otherwise work on Windows
|
||||||
blobFile.Close()
|
blobFile.Close()
|
||||||
explicitClosed = true
|
*closed = true
|
||||||
|
|
||||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||||
return private.UploadedBlob{}, err
|
return err
|
||||||
}
|
}
|
||||||
succeeded = true
|
|
||||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||||
|
@ -299,6 +317,67 @@ func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options pri
|
||||||
return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PutBlobFromLocalFileOption is unused but may receive functionality in the future.
|
||||||
|
type PutBlobFromLocalFileOption struct{}
|
||||||
|
|
||||||
|
// PutBlobFromLocalFile arranges the data from path to be used as blob with digest.
|
||||||
|
// It computes, and returns, the digest and size of the used file.
|
||||||
|
//
|
||||||
|
// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called.
|
||||||
|
func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (digest.Digest, int64, error) {
|
||||||
|
d, ok := dest.(*ociImageDestination)
|
||||||
|
if !ok {
|
||||||
|
return "", -1, errors.New("internal error: PutBlobFromLocalFile called with a non-oci: destination")
|
||||||
|
}
|
||||||
|
|
||||||
|
succeeded := false
|
||||||
|
blobFileClosed := false
|
||||||
|
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if !blobFileClosed {
|
||||||
|
blobFile.Close()
|
||||||
|
}
|
||||||
|
if !succeeded {
|
||||||
|
os.Remove(blobFile.Name())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
srcFile, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
defer srcFile.Close()
|
||||||
|
|
||||||
|
err = reflink.LinkOrCopy(srcFile, blobFile)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = blobFile.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
blobDigest, err := digest.FromReader(blobFile)
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fileInfo, err := blobFile.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := d.blobFileSyncAndRename(blobFile, blobDigest, &blobFileClosed); err != nil {
|
||||||
|
return "", -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
succeeded = true
|
||||||
|
return blobDigest, fileInfo.Size(), nil
|
||||||
|
}
|
||||||
|
|
||||||
func ensureDirectoryExists(path string) error {
|
func ensureDirectoryExists(path string) error {
|
||||||
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
|
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||||
if err := os.MkdirAll(path, 0755); err != nil {
|
if err := os.MkdirAll(path, 0755); err != nil {
|
||||||
|
|
|
@ -61,22 +61,31 @@ type ociReference struct {
|
||||||
// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
|
// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
|
||||||
dir string // As specified by the user. May be relative, contain symlinks, etc.
|
dir string // As specified by the user. May be relative, contain symlinks, etc.
|
||||||
resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
|
resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
|
||||||
// If image=="", it means the "only image" in the index.json is used in the case it is a source
|
// If image=="" && sourceIndex==-1, it means the "only image" in the index.json is used in the case it is a source
|
||||||
// for destinations, the image name annotation "image.ref.name" is not added to the index.json
|
// for destinations, the image name annotation "image.ref.name" is not added to the index.json.
|
||||||
|
//
|
||||||
|
// Must not be set if sourceIndex is set (the value is not -1).
|
||||||
image string
|
image string
|
||||||
|
// If not -1, a zero-based index of an image in the manifest index. Valid only for sources.
|
||||||
|
// Must not be set if image is set.
|
||||||
|
sourceIndex int
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
|
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
|
||||||
func ParseReference(reference string) (types.ImageReference, error) {
|
func ParseReference(reference string) (types.ImageReference, error) {
|
||||||
dir, image := internal.SplitPathAndImage(reference)
|
dir, image, index, err := internal.ParseReferenceIntoElements(reference)
|
||||||
return NewReference(dir, image)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newReference(dir, image, index)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReference returns an OCI reference for a directory and a image.
|
// newReference returns an OCI reference for a directory, and an image name annotation or sourceIndex.
|
||||||
//
|
//
|
||||||
|
// If sourceIndex==-1, the index will not be valid to point out the source image, only image will be used.
|
||||||
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
||||||
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
||||||
func NewReference(dir, image string) (types.ImageReference, error) {
|
func newReference(dir, image string, sourceIndex int) (types.ImageReference, error) {
|
||||||
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir)
|
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -90,7 +99,26 @@ func NewReference(dir, image string) (types.ImageReference, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil
|
if sourceIndex != -1 && sourceIndex < 0 {
|
||||||
|
return nil, fmt.Errorf("Invalid oci: layout reference: index @%d must not be negative", sourceIndex)
|
||||||
|
}
|
||||||
|
if sourceIndex != -1 && image != "" {
|
||||||
|
return nil, fmt.Errorf("Invalid oci: layout reference: cannot use both an image %s and a source index @%d", image, sourceIndex)
|
||||||
|
}
|
||||||
|
return ociReference{dir: dir, resolvedDir: resolved, image: image, sourceIndex: sourceIndex}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIndexReference returns an OCI reference for a path and a zero-based source manifest index.
|
||||||
|
func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) {
|
||||||
|
return newReference(dir, "", sourceIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReference returns an OCI reference for a directory and a image.
|
||||||
|
//
|
||||||
|
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
||||||
|
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
||||||
|
func NewReference(dir, image string) (types.ImageReference, error) {
|
||||||
|
return newReference(dir, image, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ref ociReference) Transport() types.ImageTransport {
|
func (ref ociReference) Transport() types.ImageTransport {
|
||||||
|
@ -103,8 +131,11 @@ func (ref ociReference) Transport() types.ImageTransport {
|
||||||
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
||||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||||
func (ref ociReference) StringWithinTransport() string {
|
func (ref ociReference) StringWithinTransport() string {
|
||||||
|
if ref.sourceIndex == -1 {
|
||||||
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
||||||
}
|
}
|
||||||
|
return fmt.Sprintf("%s:@%d", ref.dir, ref.sourceIndex)
|
||||||
|
}
|
||||||
|
|
||||||
// DockerReference returns a Docker reference associated with this reference
|
// DockerReference returns a Docker reference associated with this reference
|
||||||
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
|
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
|
||||||
|
@ -187,14 +218,18 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
|
||||||
return imgspecv1.Descriptor{}, -1, err
|
return imgspecv1.Descriptor{}, -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if ref.image == "" {
|
switch {
|
||||||
// return manifest if only one image is in the oci directory
|
case ref.image != "" && ref.sourceIndex != -1: // Coverage: newReference refuses to create such references.
|
||||||
if len(index.Manifests) != 1 {
|
return imgspecv1.Descriptor{}, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d",
|
||||||
// ask user to choose image when more than one image in the oci directory
|
ref.image, ref.sourceIndex)
|
||||||
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
|
||||||
|
case ref.sourceIndex != -1:
|
||||||
|
if ref.sourceIndex >= len(index.Manifests) {
|
||||||
|
return imgspecv1.Descriptor{}, -1, fmt.Errorf("index %d is too large, only %d entries available", ref.sourceIndex, len(index.Manifests))
|
||||||
}
|
}
|
||||||
return index.Manifests[0], 0, nil
|
return index.Manifests[ref.sourceIndex], ref.sourceIndex, nil
|
||||||
} else {
|
|
||||||
|
case ref.image != "":
|
||||||
// if image specified, look through all manifests for a match
|
// if image specified, look through all manifests for a match
|
||||||
var unsupportedMIMETypes []string
|
var unsupportedMIMETypes []string
|
||||||
for i, md := range index.Manifests {
|
for i, md := range index.Manifests {
|
||||||
|
@ -208,8 +243,16 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
|
||||||
if len(unsupportedMIMETypes) != 0 {
|
if len(unsupportedMIMETypes) != 0 {
|
||||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
|
return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
||||||
|
|
||||||
|
default:
|
||||||
|
// return manifest if only one image is in the oci directory
|
||||||
|
if len(index.Manifests) != 1 {
|
||||||
|
// ask user to choose image when more than one image in the oci directory
|
||||||
|
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
||||||
|
}
|
||||||
|
return index.Manifests[0], 0, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
|
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
package layout
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/containers/image/v5/types"
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This file is named reader.go for consistency with other transports’
|
||||||
|
// handling of “image containers”, but we don’t actually need a stateful reader object.
|
||||||
|
|
||||||
|
// ListResult wraps the image reference and the manifest for loading
|
||||||
|
type ListResult struct {
|
||||||
|
Reference types.ImageReference
|
||||||
|
ManifestDescriptor imgspecv1.Descriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns a slice of manifests included in the archive
|
||||||
|
func List(dir string) ([]ListResult, error) {
|
||||||
|
var res []ListResult
|
||||||
|
|
||||||
|
indexJSON, err := os.ReadFile(filepath.Join(dir, imgspecv1.ImageIndexFile))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var index imgspecv1.Index
|
||||||
|
if err := json.Unmarshal(indexJSON, &index); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for manifestIndex, md := range index.Manifests {
|
||||||
|
refName := md.Annotations[imgspecv1.AnnotationRefName]
|
||||||
|
index := -1
|
||||||
|
if refName == "" {
|
||||||
|
index = manifestIndex
|
||||||
|
}
|
||||||
|
ref, err := newReference(dir, refName, index)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating image reference: %w", err)
|
||||||
|
}
|
||||||
|
reference := ListResult{
|
||||||
|
Reference: ref,
|
||||||
|
ManifestDescriptor: md,
|
||||||
|
}
|
||||||
|
res = append(res, reference)
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"github.com/containers/image/v5/manifest"
|
"github.com/containers/image/v5/manifest"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type openshiftImageDestination struct {
|
type openshiftImageDestination struct {
|
||||||
|
@ -111,6 +112,14 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
|
||||||
return d.docker.SupportsPutBlobPartial()
|
return d.docker.SupportsPutBlobPartial()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||||
|
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||||
|
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||||
|
// (otherwise it only obtains the final config after all layers are written).
|
||||||
|
func (d *openshiftImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||||
|
return d.docker.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||||
|
}
|
||||||
|
|
||||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||||
// inputInfo.Size is the expected length of stream, if known.
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"github.com/containers/storage/pkg/archive"
|
"github.com/containers/storage/pkg/archive"
|
||||||
"github.com/containers/storage/pkg/ioutils"
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -138,6 +139,14 @@ func (d *blobCacheDestination) HasThreadSafePutBlob() bool {
|
||||||
return d.destination.HasThreadSafePutBlob()
|
return d.destination.HasThreadSafePutBlob()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||||
|
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||||
|
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||||
|
// (otherwise it only obtains the final config after all layers are written).
|
||||||
|
func (d *blobCacheDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||||
|
return d.destination.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||||
|
}
|
||||||
|
|
||||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||||
// inputInfo.Size is the expected length of stream, if known.
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
|
|
|
@ -20,7 +20,7 @@ func (f *fulcioTrustRoot) validate() error {
|
||||||
return errors.New("fulcio disabled at compile-time")
|
return errors.New("fulcio disabled at compile-time")
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||||
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
|
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
|
||||||
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
|
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
|
||||||
return nil, errors.New("fulcio disabled at compile-time")
|
return nil, errors.New("fulcio disabled at compile-time")
|
||||||
|
|
|
@ -13,3 +13,12 @@ func (err InvalidSignatureError) Error() string {
|
||||||
func NewInvalidSignatureError(msg string) InvalidSignatureError {
|
func NewInvalidSignatureError(msg string) InvalidSignatureError {
|
||||||
return InvalidSignatureError{msg: msg}
|
return InvalidSignatureError{msg: msg}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
|
||||||
|
// All other errors are returned as is.
|
||||||
|
func JSONFormatToInvalidSignatureError(err error) error {
|
||||||
|
if formatErr, ok := err.(JSONFormatError); ok {
|
||||||
|
err = NewInvalidSignatureError(formatErr.Error())
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
|
@ -40,15 +40,6 @@ type UntrustedRekorPayload struct {
|
||||||
// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
|
// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
|
||||||
var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
|
var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
|
||||||
|
|
||||||
// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
|
|
||||||
// All other errors are returned as is.
|
|
||||||
func JSONFormatToInvalidSignatureError(err error) error {
|
|
||||||
if formatErr, ok := err.(JSONFormatError); ok {
|
|
||||||
err = NewInvalidSignatureError(formatErr.Error())
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||||
func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
|
func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
|
||||||
return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
|
return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
|
||||||
|
|
|
@ -10,6 +10,6 @@ import (
|
||||||
|
|
||||||
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
|
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
|
||||||
// Returns bundle upload time on success.
|
// Returns bundle upload time on success.
|
||||||
func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||||
return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time")
|
return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time")
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,11 +17,13 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/containers/image/v5/docker/reference"
|
"github.com/containers/image/v5/docker/reference"
|
||||||
|
"github.com/containers/image/v5/internal/image"
|
||||||
"github.com/containers/image/v5/internal/imagedestination/impl"
|
"github.com/containers/image/v5/internal/imagedestination/impl"
|
||||||
"github.com/containers/image/v5/internal/imagedestination/stubs"
|
"github.com/containers/image/v5/internal/imagedestination/stubs"
|
||||||
|
srcImpl "github.com/containers/image/v5/internal/imagesource/impl"
|
||||||
|
srcStubs "github.com/containers/image/v5/internal/imagesource/stubs"
|
||||||
"github.com/containers/image/v5/internal/private"
|
"github.com/containers/image/v5/internal/private"
|
||||||
"github.com/containers/image/v5/internal/putblobdigest"
|
"github.com/containers/image/v5/internal/putblobdigest"
|
||||||
"github.com/containers/image/v5/internal/set"
|
|
||||||
"github.com/containers/image/v5/internal/signature"
|
"github.com/containers/image/v5/internal/signature"
|
||||||
"github.com/containers/image/v5/internal/tmpdir"
|
"github.com/containers/image/v5/internal/tmpdir"
|
||||||
"github.com/containers/image/v5/manifest"
|
"github.com/containers/image/v5/manifest"
|
||||||
|
@ -57,8 +59,9 @@ type storageImageDestination struct {
|
||||||
imageRef storageReference
|
imageRef storageReference
|
||||||
directory string // Temporary directory where we store blobs until Commit() time
|
directory string // Temporary directory where we store blobs until Commit() time
|
||||||
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
|
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
|
||||||
manifest []byte // Manifest contents, temporary
|
manifest []byte // (Per-instance) manifest contents, or nil if not yet known.
|
||||||
manifestDigest digest.Digest // Valid if len(manifest) != 0
|
manifestMIMEType string // Valid if manifest != nil
|
||||||
|
manifestDigest digest.Digest // Valid if manifest != nil
|
||||||
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
|
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
|
||||||
signatures []byte // Signature contents, temporary
|
signatures []byte // Signature contents, temporary
|
||||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||||
|
@ -121,6 +124,9 @@ type storageImageDestinationLockProtected struct {
|
||||||
filenames map[digest.Digest]string
|
filenames map[digest.Digest]string
|
||||||
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
|
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
|
||||||
fileSizes map[digest.Digest]int64
|
fileSizes map[digest.Digest]int64
|
||||||
|
|
||||||
|
// Config
|
||||||
|
configDigest digest.Digest // "" if N/A or not known yet.
|
||||||
}
|
}
|
||||||
|
|
||||||
// addedLayerInfo records data about a layer to use in this image.
|
// addedLayerInfo records data about a layer to use in this image.
|
||||||
|
@ -201,6 +207,18 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
||||||
return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
|
return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||||
|
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||||
|
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||||
|
// (otherwise it only obtains the final config after all layers are written).
|
||||||
|
func (s *storageImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||||
|
if configErr != nil {
|
||||||
|
return fmt.Errorf("writing to c/storage without a valid image config: %w", configErr)
|
||||||
|
}
|
||||||
|
s.setUntrustedDiffIDValuesFromOCIConfig(ociConfig)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||||
// inputInfo.Size is the expected length of stream, if known.
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
|
@ -214,7 +232,17 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.IsConfig || options.LayerIndex == nil {
|
if options.IsConfig {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
if s.lockProtected.configDigest != "" {
|
||||||
|
return private.UploadedBlob{}, fmt.Errorf("after config %q, refusing to record another config %q",
|
||||||
|
s.lockProtected.configDigest.String(), info.Digest.String())
|
||||||
|
}
|
||||||
|
s.lockProtected.configDigest = info.Digest
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
if options.LayerIndex == nil {
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -351,6 +379,9 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||||
blobDigest := srcInfo.Digest
|
blobDigest := srcInfo.Digest
|
||||||
|
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
|
if err := func() error { // A scope for defer
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
if out.UncompressedDigest != "" {
|
if out.UncompressedDigest != "" {
|
||||||
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
|
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
|
||||||
if out.TOCDigest != "" {
|
if out.TOCDigest != "" {
|
||||||
|
@ -364,7 +395,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||||
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
|
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
|
||||||
// responsible for ensuring blobDigest has been validated.
|
// responsible for ensuring blobDigest has been validated.
|
||||||
if out.CompressedDigest != blobDigest {
|
if out.CompressedDigest != blobDigest {
|
||||||
return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
|
return fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
|
||||||
out.CompressedDigest, blobDigest)
|
out.CompressedDigest, blobDigest)
|
||||||
}
|
}
|
||||||
// So, record also information about blobDigest, that might benefit reuse.
|
// So, record also information about blobDigest, that might benefit reuse.
|
||||||
|
@ -379,7 +410,10 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||||
}
|
}
|
||||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||||
s.lock.Unlock()
|
return nil
|
||||||
|
}(); err != nil {
|
||||||
|
return private.UploadedBlob{}, err
|
||||||
|
}
|
||||||
|
|
||||||
succeeded = true
|
succeeded = true
|
||||||
return private.UploadedBlob{
|
return private.UploadedBlob{
|
||||||
|
@ -975,9 +1009,11 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||||
return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest)
|
return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest)
|
||||||
}
|
}
|
||||||
var trustedOriginalDigest digest.Digest // For storage.LayerOptions
|
var trustedOriginalDigest digest.Digest // For storage.LayerOptions
|
||||||
|
var trustedOriginalSize *int64
|
||||||
if gotFilename {
|
if gotFilename {
|
||||||
// The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest.
|
// The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest.
|
||||||
trustedOriginalDigest = trusted.blobDigest
|
trustedOriginalDigest = trusted.blobDigest
|
||||||
|
trustedOriginalSize = nil // It’s s.lockProtected.fileSizes[trusted.blobDigest], but we don’t hold the lock now, and the consumer can compute it at trivial cost.
|
||||||
} else {
|
} else {
|
||||||
// Try to find the layer with contents matching the data we use.
|
// Try to find the layer with contents matching the data we use.
|
||||||
var layer *storage.Layer // = nil
|
var layer *storage.Layer // = nil
|
||||||
|
@ -1032,22 +1068,36 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||||
if trusted.diffID == "" && layer.UncompressedDigest != "" {
|
if trusted.diffID == "" && layer.UncompressedDigest != "" {
|
||||||
trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now.
|
trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now.
|
||||||
}
|
}
|
||||||
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
|
|
||||||
|
// Set the layer’s CompressedDigest/CompressedSize to relevant values if known, to allow more layer reuse.
|
||||||
|
// But we don’t want to just use the size from the manifest if we never saw the compressed blob,
|
||||||
|
// so that we don’t propagate mistakes / attacks.
|
||||||
//
|
//
|
||||||
// FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse.
|
// s.lockProtected.fileSizes[trusted.blobDigest] is not set, otherwise we would have found gotFilename.
|
||||||
// But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created
|
// So, check if the layer we found contains that metadata. (If that layer continues to exist, there’s no benefit
|
||||||
// layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream).
|
// to us propagating the metadata; but that layer could be removed, and in that case propagating the metadata to
|
||||||
|
// this new layer copy can help.)
|
||||||
|
if trusted.blobDigest != "" && layer.CompressedDigest == trusted.blobDigest && layer.CompressedSize > 0 {
|
||||||
|
trustedOriginalDigest = trusted.blobDigest
|
||||||
|
sizeCopy := layer.CompressedSize
|
||||||
|
trustedOriginalSize = &sizeCopy
|
||||||
|
} else {
|
||||||
|
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
|
||||||
//
|
//
|
||||||
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
||||||
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
|
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
|
||||||
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
||||||
trustedOriginalDigest = trusted.diffID
|
trustedOriginalDigest = trusted.diffID
|
||||||
|
trustedOriginalSize = nil // Probably layer.UncompressedSize, but the consumer can compute it at trivial cost.
|
||||||
|
}
|
||||||
|
|
||||||
// Allow using the already-collected layer contents without extracting the layer again.
|
// Allow using the already-collected layer contents without extracting the layer again.
|
||||||
//
|
//
|
||||||
// This only matches against the uncompressed digest.
|
// This only matches against the uncompressed digest.
|
||||||
// We don’t have the original compressed data here to trivially set filenames[layerDigest].
|
// If we have trustedOriginalDigest == trusted.blobDigest, we could arrange to reuse the
|
||||||
// In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API.
|
// same uncompressed stream for future calls of createNewLayer; but for the non-layer blobs (primarily the config),
|
||||||
|
// we assume that the file at filenames[someDigest] matches someDigest _exactly_; we would need to differentiate
|
||||||
|
// between “original files” and “possibly uncompressed files”.
|
||||||
// Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
|
// Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
|
||||||
if trusted.diffID != "" {
|
if trusted.diffID != "" {
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
|
@ -1067,6 +1117,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||||
layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
|
layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
|
||||||
OriginalDigest: trustedOriginalDigest,
|
OriginalDigest: trustedOriginalDigest,
|
||||||
|
OriginalSize: trustedOriginalSize, // nil in many cases
|
||||||
// This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream.
|
// This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream.
|
||||||
UncompressedDigest: trusted.diffID,
|
UncompressedDigest: trusted.diffID,
|
||||||
}, file)
|
}, file)
|
||||||
|
@ -1076,52 +1127,110 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||||
return layer, nil
|
return layer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// uncommittedImageSource allows accessing an image’s metadata (not layers) before it has been committed,
|
||||||
|
// to allow using image.FromUnparsedImage.
|
||||||
|
type uncommittedImageSource struct {
|
||||||
|
srcImpl.Compat
|
||||||
|
srcImpl.PropertyMethodsInitialize
|
||||||
|
srcImpl.NoSignatures
|
||||||
|
srcImpl.DoesNotAffectLayerInfosForCopy
|
||||||
|
srcStubs.NoGetBlobAtInitialize
|
||||||
|
|
||||||
|
d *storageImageDestination
|
||||||
|
}
|
||||||
|
|
||||||
|
func newUncommittedImageSource(d *storageImageDestination) *uncommittedImageSource {
|
||||||
|
s := &uncommittedImageSource{
|
||||||
|
PropertyMethodsInitialize: srcImpl.PropertyMethods(srcImpl.Properties{
|
||||||
|
HasThreadSafeGetBlob: true,
|
||||||
|
}),
|
||||||
|
NoGetBlobAtInitialize: srcStubs.NoGetBlobAt(d.Reference()),
|
||||||
|
|
||||||
|
d: d,
|
||||||
|
}
|
||||||
|
s.Compat = srcImpl.AddCompat(s)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *uncommittedImageSource) Reference() types.ImageReference {
|
||||||
|
return u.d.Reference()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *uncommittedImageSource) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *uncommittedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||||
|
return u.d.manifest, u.d.manifestMIMEType, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *uncommittedImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||||
|
blob, err := u.d.getConfigBlob(info)
|
||||||
|
if err != nil {
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
return io.NopCloser(bytes.NewReader(blob)), int64(len(blob)), nil
|
||||||
|
}
|
||||||
|
|
||||||
// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
|
// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
|
||||||
// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil).
|
// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil).
|
||||||
// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication.
|
// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication.
|
||||||
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
|
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
|
||||||
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and
|
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob,
|
||||||
// nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil.
|
// nothing is writing to s.manifest yet, and s.untrustedDiffIDValues might have been set
|
||||||
|
// by NoteOriginalOCIConfig and are not being updated any more;
|
||||||
|
// or PutManifest has been called and s.manifest != nil.
|
||||||
// Either way this function does not need the protection of s.lock.
|
// Either way this function does not need the protection of s.lock.
|
||||||
|
|
||||||
|
if s.untrustedDiffIDValues == nil {
|
||||||
|
// Typically, we expect untrustedDiffIDValues to be set by the generic copy code
|
||||||
|
// via NoteOriginalOCIConfig; this is a compatibility fallback for external callers
|
||||||
|
// of the public types.ImageDestination.
|
||||||
if s.manifest == nil {
|
if s.manifest == nil {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.untrustedDiffIDValues == nil {
|
ctx := context.Background() // This is all happening in memory, no need to worry about cancellation.
|
||||||
mt := manifest.GuessMIMEType(s.manifest)
|
unparsed := image.UnparsedInstance(newUncommittedImageSource(s), nil)
|
||||||
if mt != imgspecv1.MediaTypeImageManifest {
|
sourced, err := image.FromUnparsedImage(ctx, nil, unparsed)
|
||||||
// We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage,
|
|
||||||
// and then use types.Image.OCIConfig so that we can parse the image.
|
|
||||||
//
|
|
||||||
// In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation),
|
|
||||||
// while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull).
|
|
||||||
// So it is not implemented yet.
|
|
||||||
return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt)
|
|
||||||
}
|
|
||||||
man, err := manifest.FromBlob(s.manifest, mt)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("parsing manifest: %w", err)
|
return "", fmt.Errorf("parsing image to be committed: %w", err)
|
||||||
|
}
|
||||||
|
configOCI, err := sourced.OCIConfig(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("obtaining config of image to be committed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cb, err := s.getConfigBlob(man.ConfigInfo())
|
s.setUntrustedDiffIDValuesFromOCIConfig(configOCI)
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// retrieve the expected uncompressed digest from the config blob.
|
|
||||||
configOCI := &imgspecv1.Image{}
|
|
||||||
if err := json.Unmarshal(cb, configOCI); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs)
|
|
||||||
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
|
|
||||||
s.untrustedDiffIDValues = []digest.Digest{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if layerIndex >= len(s.untrustedDiffIDValues) {
|
if layerIndex >= len(s.untrustedDiffIDValues) {
|
||||||
return "", fmt.Errorf("image config has only %d DiffID values, but a layer with index %d exists", len(s.untrustedDiffIDValues), layerIndex)
|
return "", fmt.Errorf("image config has only %d DiffID values, but a layer with index %d exists", len(s.untrustedDiffIDValues), layerIndex)
|
||||||
}
|
}
|
||||||
return s.untrustedDiffIDValues[layerIndex], nil
|
res := s.untrustedDiffIDValues[layerIndex]
|
||||||
|
if res == "" {
|
||||||
|
// In practice, this should, right now, only matter for pulls of OCI images
|
||||||
|
// (this code path implies that we did a partial pull because a layer has an annotation),
|
||||||
|
// So, DiffIDs should always be set.
|
||||||
|
//
|
||||||
|
// It is, though, reachable by pulling an OCI image while converting to schema1,
|
||||||
|
// using a manual (skopeo copy) or something similar, not (podman pull).
|
||||||
|
//
|
||||||
|
// Our schema1.OCIConfig code produces non-empty DiffID arrays of empty values.
|
||||||
|
// The current semantics of this function are that ("", nil) means "try again later",
|
||||||
|
// which is not what we want to happen; for now, turn that into an explicit error.
|
||||||
|
return "", fmt.Errorf("DiffID value for layer %d is unknown or explicitly empty", layerIndex)
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setUntrustedDiffIDValuesFromOCIConfig updates s.untrustedDiffIDvalues from config.
|
||||||
|
// The caller must ensure s.lock does not need to be held.
|
||||||
|
func (s *storageImageDestination) setUntrustedDiffIDValuesFromOCIConfig(config *imgspecv1.Image) {
|
||||||
|
s.untrustedDiffIDValues = slices.Clone(config.RootFS.DiffIDs)
|
||||||
|
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
|
||||||
|
s.untrustedDiffIDValues = []digest.Digest{}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
@ -1131,7 +1240,7 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D
|
||||||
func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||||
// This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
// This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
||||||
|
|
||||||
if len(s.manifest) == 0 {
|
if s.manifest == nil {
|
||||||
return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()")
|
return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()")
|
||||||
}
|
}
|
||||||
toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx)
|
toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx)
|
||||||
|
@ -1159,7 +1268,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Find the list of layer blobs.
|
// Find the list of layer blobs.
|
||||||
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
|
man, err := manifest.FromBlob(s.manifest, s.manifestMIMEType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing manifest: %w", err)
|
return fmt.Errorf("parsing manifest: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1193,29 +1302,21 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
|
||||||
imgOptions.CreationDate = *inspect.Created
|
imgOptions.CreationDate = *inspect.Created
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so
|
// Set up to save the config as a data item. Since we only share layers, the config should be in a file.
|
||||||
// we just need to screen out the ones that are actually layers to get the list of non-layers.
|
if s.lockProtected.configDigest != "" {
|
||||||
dataBlobs := set.New[digest.Digest]()
|
v, err := os.ReadFile(s.lockProtected.filenames[s.lockProtected.configDigest])
|
||||||
for blob := range s.lockProtected.filenames {
|
|
||||||
dataBlobs.Add(blob)
|
|
||||||
}
|
|
||||||
for _, layerBlob := range layerBlobs {
|
|
||||||
dataBlobs.Delete(layerBlob.Digest)
|
|
||||||
}
|
|
||||||
for _, blob := range dataBlobs.Values() {
|
|
||||||
v, err := os.ReadFile(s.lockProtected.filenames[blob])
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
|
return fmt.Errorf("copying config blob %q to image: %w", s.lockProtected.configDigest, err)
|
||||||
}
|
}
|
||||||
imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
|
imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
|
||||||
Key: blob.String(),
|
Key: s.lockProtected.configDigest.String(),
|
||||||
Data: v,
|
Data: v,
|
||||||
Digest: digest.Canonical.FromBytes(v),
|
Digest: digest.Canonical.FromBytes(v),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
// Set up to save the options.UnparsedToplevel's manifest if it differs from
|
// Set up to save the options.UnparsedToplevel's manifest if it differs from
|
||||||
// the per-platform one, which is saved below.
|
// the per-platform one, which is saved below.
|
||||||
if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
|
if !bytes.Equal(toplevelManifest, s.manifest) {
|
||||||
manifestDigest, err := manifest.Digest(toplevelManifest)
|
manifestDigest, err := manifest.Digest(toplevelManifest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("digesting top-level manifest: %w", err)
|
return fmt.Errorf("digesting top-level manifest: %w", err)
|
||||||
|
@ -1370,6 +1471,10 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.manifest = bytes.Clone(manifestBlob)
|
s.manifest = bytes.Clone(manifestBlob)
|
||||||
|
if s.manifest == nil { // Make sure PutManifest can never succeed with s.manifest == nil
|
||||||
|
s.manifest = []byte{}
|
||||||
|
}
|
||||||
|
s.manifestMIMEType = manifest.GuessMIMEType(s.manifest)
|
||||||
s.manifestDigest = digest
|
s.manifestDigest = digest
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1392,7 +1497,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s
|
||||||
if instanceDigest == nil {
|
if instanceDigest == nil {
|
||||||
s.signatures = sigblob
|
s.signatures = sigblob
|
||||||
s.metadata.SignatureSizes = sizes
|
s.metadata.SignatureSizes = sizes
|
||||||
if len(s.manifest) > 0 {
|
if s.manifest != nil {
|
||||||
manifestDigest := s.manifestDigest
|
manifestDigest := s.manifestDigest
|
||||||
instanceDigest = &manifestDigest
|
instanceDigest = &manifestDigest
|
||||||
}
|
}
|
||||||
|
|
|
@ -153,7 +153,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
|
||||||
}
|
}
|
||||||
if s.id == "" {
|
if s.id == "" {
|
||||||
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
|
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
|
||||||
return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage)
|
// %.0w makes the error visible to error.Unwrap() without including any text.
|
||||||
|
// ErrNoSuchImage ultimately is “identifier is not an image”, which is not helpful for identifying the root cause.
|
||||||
|
return nil, fmt.Errorf("reference %q does not resolve to an image ID%.0w", s.StringWithinTransport(), ErrNoSuchImage)
|
||||||
}
|
}
|
||||||
if loadedImage == nil {
|
if loadedImage == nil {
|
||||||
img, err := s.transport.store.Image(s.id)
|
img, err := s.transport.store.Image(s.id)
|
||||||
|
|
|
@ -40,6 +40,7 @@ type storageImageSource struct {
|
||||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||||
metadata storageImageMetadata
|
metadata storageImageMetadata
|
||||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||||
|
cachedManifestMIMEType string // Valid if cachedManifest != nil
|
||||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||||
getBlobMutexProtected getBlobMutexProtected
|
getBlobMutexProtected getBlobMutexProtected
|
||||||
}
|
}
|
||||||
|
@ -247,7 +248,7 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||||
}
|
}
|
||||||
return blob, manifest.GuessMIMEType(blob), err
|
return blob, manifest.GuessMIMEType(blob), err
|
||||||
}
|
}
|
||||||
if len(s.cachedManifest) == 0 {
|
if s.cachedManifest == nil {
|
||||||
// The manifest is stored as a big data item.
|
// The manifest is stored as a big data item.
|
||||||
// Prefer the manifest corresponding to the user-specified digest, if available.
|
// Prefer the manifest corresponding to the user-specified digest, if available.
|
||||||
if s.imageRef.named != nil {
|
if s.imageRef.named != nil {
|
||||||
|
@ -267,15 +268,16 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||||
}
|
}
|
||||||
// If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
|
// If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
|
||||||
// Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
|
// Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
|
||||||
if len(s.cachedManifest) == 0 {
|
if s.cachedManifest == nil {
|
||||||
cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
|
cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
s.cachedManifest = cachedBlob
|
s.cachedManifest = cachedBlob
|
||||||
}
|
}
|
||||||
|
s.cachedManifestMIMEType = manifest.GuessMIMEType(s.cachedManifest)
|
||||||
}
|
}
|
||||||
return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
|
return s.cachedManifest, s.cachedManifestMIMEType, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
|
// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
|
||||||
|
|
|
@ -6,12 +6,12 @@ const (
|
||||||
// VersionMajor is for an API incompatible changes
|
// VersionMajor is for an API incompatible changes
|
||||||
VersionMajor = 5
|
VersionMajor = 5
|
||||||
// VersionMinor is for functionality in a backwards-compatible manner
|
// VersionMinor is for functionality in a backwards-compatible manner
|
||||||
VersionMinor = 33
|
VersionMinor = 34
|
||||||
// VersionPatch is for backwards-compatible bug fixes
|
// VersionPatch is for backwards-compatible bug fixes
|
||||||
VersionPatch = 0
|
VersionPatch = 0
|
||||||
|
|
||||||
// VersionDev indicates development branch. Releases will be empty string.
|
// VersionDev indicates development branch. Releases will be empty string.
|
||||||
VersionDev = ""
|
VersionDev = "-dev"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Version is the specification version that the package types support.
|
// Version is the specification version that the package types support.
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
package challenge
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FROM: https://golang.org/src/net/http/http.go
|
|
||||||
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
|
|
||||||
// return true if the string includes a port.
|
|
||||||
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
|
|
||||||
|
|
||||||
// FROM: http://golang.org/src/net/http/transport.go
|
|
||||||
var portMap = map[string]string{
|
|
||||||
"http": "80",
|
|
||||||
"https": "443",
|
|
||||||
}
|
|
||||||
|
|
||||||
// canonicalAddr returns url.Host but always with a ":port" suffix
|
|
||||||
// FROM: http://golang.org/src/net/http/transport.go
|
|
||||||
func canonicalAddr(url *url.URL) string {
|
|
||||||
addr := url.Host
|
|
||||||
if !hasPort(addr) {
|
|
||||||
return addr + ":" + portMap[url.Scheme]
|
|
||||||
}
|
|
||||||
return addr
|
|
||||||
}
|
|
237
vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
generated
vendored
237
vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
generated
vendored
|
@ -1,237 +0,0 @@
|
||||||
package challenge
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Challenge carries information from a WWW-Authenticate response header.
|
|
||||||
// See RFC 2617.
|
|
||||||
type Challenge struct {
|
|
||||||
// Scheme is the auth-scheme according to RFC 2617
|
|
||||||
Scheme string
|
|
||||||
|
|
||||||
// Parameters are the auth-params according to RFC 2617
|
|
||||||
Parameters map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Manager manages the challenges for endpoints.
|
|
||||||
// The challenges are pulled out of HTTP responses. Only
|
|
||||||
// responses which expect challenges should be added to
|
|
||||||
// the manager, since a non-unauthorized request will be
|
|
||||||
// viewed as not requiring challenges.
|
|
||||||
type Manager interface {
|
|
||||||
// GetChallenges returns the challenges for the given
|
|
||||||
// endpoint URL.
|
|
||||||
GetChallenges(endpoint url.URL) ([]Challenge, error)
|
|
||||||
|
|
||||||
// AddResponse adds the response to the challenge
|
|
||||||
// manager. The challenges will be parsed out of
|
|
||||||
// the WWW-Authenicate headers and added to the
|
|
||||||
// URL which was produced the response. If the
|
|
||||||
// response was authorized, any challenges for the
|
|
||||||
// endpoint will be cleared.
|
|
||||||
AddResponse(resp *http.Response) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSimpleManager returns an instance of
|
|
||||||
// Manger which only maps endpoints to challenges
|
|
||||||
// based on the responses which have been added the
|
|
||||||
// manager. The simple manager will make no attempt to
|
|
||||||
// perform requests on the endpoints or cache the responses
|
|
||||||
// to a backend.
|
|
||||||
func NewSimpleManager() Manager {
|
|
||||||
return &simpleManager{
|
|
||||||
Challenges: make(map[string][]Challenge),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type simpleManager struct {
|
|
||||||
sync.RWMutex
|
|
||||||
Challenges map[string][]Challenge
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalizeURL(endpoint *url.URL) {
|
|
||||||
endpoint.Host = strings.ToLower(endpoint.Host)
|
|
||||||
endpoint.Host = canonicalAddr(endpoint)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
|
|
||||||
normalizeURL(&endpoint)
|
|
||||||
|
|
||||||
m.RLock()
|
|
||||||
defer m.RUnlock()
|
|
||||||
challenges := m.Challenges[endpoint.String()]
|
|
||||||
return challenges, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *simpleManager) AddResponse(resp *http.Response) error {
|
|
||||||
challenges := ResponseChallenges(resp)
|
|
||||||
if resp.Request == nil {
|
|
||||||
return fmt.Errorf("missing request reference")
|
|
||||||
}
|
|
||||||
urlCopy := url.URL{
|
|
||||||
Path: resp.Request.URL.Path,
|
|
||||||
Host: resp.Request.URL.Host,
|
|
||||||
Scheme: resp.Request.URL.Scheme,
|
|
||||||
}
|
|
||||||
normalizeURL(&urlCopy)
|
|
||||||
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
m.Challenges[urlCopy.String()] = challenges
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Octet types from RFC 2616.
|
|
||||||
type octetType byte
|
|
||||||
|
|
||||||
var octetTypes [256]octetType
|
|
||||||
|
|
||||||
const (
|
|
||||||
isToken octetType = 1 << iota
|
|
||||||
isSpace
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// OCTET = <any 8-bit sequence of data>
|
|
||||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
|
||||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
|
||||||
// CR = <US-ASCII CR, carriage return (13)>
|
|
||||||
// LF = <US-ASCII LF, linefeed (10)>
|
|
||||||
// SP = <US-ASCII SP, space (32)>
|
|
||||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
|
||||||
// <"> = <US-ASCII double-quote mark (34)>
|
|
||||||
// CRLF = CR LF
|
|
||||||
// LWS = [CRLF] 1*( SP | HT )
|
|
||||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
|
||||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
|
||||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
|
||||||
// token = 1*<any CHAR except CTLs or separators>
|
|
||||||
// qdtext = <any TEXT except <">>
|
|
||||||
|
|
||||||
for c := 0; c < 256; c++ {
|
|
||||||
var t octetType
|
|
||||||
isCtl := c <= 31 || c == 127
|
|
||||||
isChar := 0 <= c && c <= 127
|
|
||||||
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
|
|
||||||
if strings.ContainsRune(" \t\r\n", rune(c)) {
|
|
||||||
t |= isSpace
|
|
||||||
}
|
|
||||||
if isChar && !isCtl && !isSeparator {
|
|
||||||
t |= isToken
|
|
||||||
}
|
|
||||||
octetTypes[c] = t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResponseChallenges returns a list of authorization challenges
|
|
||||||
// for the given http Response. Challenges are only checked if
|
|
||||||
// the response status code was a 401.
|
|
||||||
func ResponseChallenges(resp *http.Response) []Challenge {
|
|
||||||
if resp.StatusCode == http.StatusUnauthorized {
|
|
||||||
// Parse the WWW-Authenticate Header and store the challenges
|
|
||||||
// on this endpoint object.
|
|
||||||
return parseAuthHeader(resp.Header)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseAuthHeader(header http.Header) []Challenge {
|
|
||||||
challenges := []Challenge{}
|
|
||||||
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
|
||||||
v, p := parseValueAndParams(h)
|
|
||||||
if v != "" {
|
|
||||||
challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return challenges
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseValueAndParams(header string) (value string, params map[string]string) {
|
|
||||||
params = make(map[string]string)
|
|
||||||
value, s := expectToken(header)
|
|
||||||
if value == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value = strings.ToLower(value)
|
|
||||||
s = "," + skipSpace(s)
|
|
||||||
for strings.HasPrefix(s, ",") {
|
|
||||||
var pkey string
|
|
||||||
pkey, s = expectToken(skipSpace(s[1:]))
|
|
||||||
if pkey == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(s, "=") {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var pvalue string
|
|
||||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
|
||||||
if pvalue == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pkey = strings.ToLower(pkey)
|
|
||||||
params[pkey] = pvalue
|
|
||||||
s = skipSpace(s)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func skipSpace(s string) (rest string) {
|
|
||||||
i := 0
|
|
||||||
for ; i < len(s); i++ {
|
|
||||||
if octetTypes[s[i]]&isSpace == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s[i:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectToken(s string) (token, rest string) {
|
|
||||||
i := 0
|
|
||||||
for ; i < len(s); i++ {
|
|
||||||
if octetTypes[s[i]]&isToken == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s[:i], s[i:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectTokenOrQuoted(s string) (value string, rest string) {
|
|
||||||
if !strings.HasPrefix(s, "\"") {
|
|
||||||
return expectToken(s)
|
|
||||||
}
|
|
||||||
s = s[1:]
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
switch s[i] {
|
|
||||||
case '"':
|
|
||||||
return s[:i], s[i+1:]
|
|
||||||
case '\\':
|
|
||||||
p := make([]byte, len(s)-1)
|
|
||||||
j := copy(p, s[:i])
|
|
||||||
escape := true
|
|
||||||
for i = i + 1; i < len(s); i++ {
|
|
||||||
b := s[i]
|
|
||||||
switch {
|
|
||||||
case escape:
|
|
||||||
escape = false
|
|
||||||
p[j] = b
|
|
||||||
j++
|
|
||||||
case b == '\\':
|
|
||||||
escape = true
|
|
||||||
case b == '"':
|
|
||||||
return string(p[:j]), s[i+1:]
|
|
||||||
default:
|
|
||||||
p[j] = b
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", ""
|
|
||||||
}
|
|
|
@ -1 +1,3 @@
|
||||||
testdata/gpghome/random_seed
|
testdata/gpghome/random_seed
|
||||||
|
testdata/gpghome/.gpg-v21-migrated
|
||||||
|
testdata/gpghome/private-keys-v1.d/
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
package gpgme
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var callbacks struct {
|
|
||||||
sync.Mutex
|
|
||||||
m map[uintptr]interface{}
|
|
||||||
c uintptr
|
|
||||||
}
|
|
||||||
|
|
||||||
func callbackAdd(v interface{}) uintptr {
|
|
||||||
callbacks.Lock()
|
|
||||||
defer callbacks.Unlock()
|
|
||||||
if callbacks.m == nil {
|
|
||||||
callbacks.m = make(map[uintptr]interface{})
|
|
||||||
}
|
|
||||||
callbacks.c++
|
|
||||||
ret := callbacks.c
|
|
||||||
callbacks.m[ret] = v
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func callbackLookup(c uintptr) interface{} {
|
|
||||||
callbacks.Lock()
|
|
||||||
defer callbacks.Unlock()
|
|
||||||
ret := callbacks.m[c]
|
|
||||||
if ret == nil {
|
|
||||||
panic("callback pointer not found")
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func callbackDelete(c uintptr) {
|
|
||||||
callbacks.Lock()
|
|
||||||
defer callbacks.Unlock()
|
|
||||||
if callbacks.m[c] == nil {
|
|
||||||
panic("callback pointer not found")
|
|
||||||
}
|
|
||||||
delete(callbacks.m, c)
|
|
||||||
}
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"runtime/cgo"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,30 +20,32 @@ const (
|
||||||
SeekEnd = C.SEEK_END
|
SeekEnd = C.SEEK_END
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var dataCallbacks = C.struct_gpgme_data_cbs{
|
||||||
|
read: C.gpgme_data_read_cb_t(C.gogpgme_readfunc),
|
||||||
|
write: C.gpgme_data_write_cb_t(C.gogpgme_writefunc),
|
||||||
|
seek: C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc),
|
||||||
|
}
|
||||||
|
|
||||||
//export gogpgme_readfunc
|
//export gogpgme_readfunc
|
||||||
func gogpgme_readfunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
|
func gogpgme_readfunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
|
||||||
d := callbackLookup(uintptr(handle)).(*Data)
|
h := *(*cgo.Handle)(handle)
|
||||||
if len(d.buf) < int(size) {
|
d := h.Value().(*Data)
|
||||||
d.buf = make([]byte, size)
|
n, err := d.r.Read(unsafe.Slice((*byte)(buffer), size))
|
||||||
}
|
|
||||||
n, err := d.r.Read(d.buf[:size])
|
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
|
d.err = err
|
||||||
C.gpgme_err_set_errno(C.EIO)
|
C.gpgme_err_set_errno(C.EIO)
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
C.memcpy(buffer, unsafe.Pointer(&d.buf[0]), C.size_t(n))
|
|
||||||
return C.ssize_t(n)
|
return C.ssize_t(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
//export gogpgme_writefunc
|
//export gogpgme_writefunc
|
||||||
func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
|
func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
|
||||||
d := callbackLookup(uintptr(handle)).(*Data)
|
h := *(*cgo.Handle)(handle)
|
||||||
if len(d.buf) < int(size) {
|
d := h.Value().(*Data)
|
||||||
d.buf = make([]byte, size)
|
n, err := d.w.Write(unsafe.Slice((*byte)(buffer), size))
|
||||||
}
|
|
||||||
C.memcpy(unsafe.Pointer(&d.buf[0]), buffer, C.size_t(size))
|
|
||||||
n, err := d.w.Write(d.buf[:size])
|
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
|
d.err = err
|
||||||
C.gpgme_err_set_errno(C.EIO)
|
C.gpgme_err_set_errno(C.EIO)
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
@ -51,9 +54,11 @@ func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
|
||||||
|
|
||||||
//export gogpgme_seekfunc
|
//export gogpgme_seekfunc
|
||||||
func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) C.gpgme_off_t {
|
func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) C.gpgme_off_t {
|
||||||
d := callbackLookup(uintptr(handle)).(*Data)
|
h := *(*cgo.Handle)(handle)
|
||||||
|
d := h.Value().(*Data)
|
||||||
n, err := d.s.Seek(int64(offset), int(whence))
|
n, err := d.s.Seek(int64(offset), int(whence))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
d.err = err
|
||||||
C.gpgme_err_set_errno(C.EIO)
|
C.gpgme_err_set_errno(C.EIO)
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
@ -63,12 +68,11 @@ func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int)
|
||||||
// The Data buffer used to communicate with GPGME
|
// The Data buffer used to communicate with GPGME
|
||||||
type Data struct {
|
type Data struct {
|
||||||
dh C.gpgme_data_t // WARNING: Call runtime.KeepAlive(d) after ANY passing of d.dh to C
|
dh C.gpgme_data_t // WARNING: Call runtime.KeepAlive(d) after ANY passing of d.dh to C
|
||||||
buf []byte
|
|
||||||
cbs C.struct_gpgme_data_cbs
|
|
||||||
r io.Reader
|
r io.Reader
|
||||||
w io.Writer
|
w io.Writer
|
||||||
s io.Seeker
|
s io.Seeker
|
||||||
cbc uintptr // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh)
|
cbc cgo.Handle // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh)
|
||||||
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func newData() *Data {
|
func newData() *Data {
|
||||||
|
@ -86,6 +90,7 @@ func NewData() (*Data, error) {
|
||||||
// NewDataFile returns a new file based data buffer
|
// NewDataFile returns a new file based data buffer
|
||||||
func NewDataFile(f *os.File) (*Data, error) {
|
func NewDataFile(f *os.File) (*Data, error) {
|
||||||
d := newData()
|
d := newData()
|
||||||
|
d.r = f
|
||||||
return d, handleError(C.gpgme_data_new_from_fd(&d.dh, C.int(f.Fd())))
|
return d, handleError(C.gpgme_data_new_from_fd(&d.dh, C.int(f.Fd())))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,20 +108,22 @@ func NewDataBytes(b []byte) (*Data, error) {
|
||||||
func NewDataReader(r io.Reader) (*Data, error) {
|
func NewDataReader(r io.Reader) (*Data, error) {
|
||||||
d := newData()
|
d := newData()
|
||||||
d.r = r
|
d.r = r
|
||||||
d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc)
|
if s, ok := r.(io.Seeker); ok {
|
||||||
cbc := callbackAdd(d)
|
d.s = s
|
||||||
d.cbc = cbc
|
}
|
||||||
return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
|
d.cbc = cgo.NewHandle(d)
|
||||||
|
return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDataWriter returns a new callback based data buffer
|
// NewDataWriter returns a new callback based data buffer
|
||||||
func NewDataWriter(w io.Writer) (*Data, error) {
|
func NewDataWriter(w io.Writer) (*Data, error) {
|
||||||
d := newData()
|
d := newData()
|
||||||
d.w = w
|
d.w = w
|
||||||
d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc)
|
if s, ok := w.(io.Seeker); ok {
|
||||||
cbc := callbackAdd(d)
|
d.s = s
|
||||||
d.cbc = cbc
|
}
|
||||||
return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
|
d.cbc = cgo.NewHandle(d)
|
||||||
|
return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDataReadWriter returns a new callback based data buffer
|
// NewDataReadWriter returns a new callback based data buffer
|
||||||
|
@ -124,11 +131,11 @@ func NewDataReadWriter(rw io.ReadWriter) (*Data, error) {
|
||||||
d := newData()
|
d := newData()
|
||||||
d.r = rw
|
d.r = rw
|
||||||
d.w = rw
|
d.w = rw
|
||||||
d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc)
|
if s, ok := rw.(io.Seeker); ok {
|
||||||
d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc)
|
d.s = s
|
||||||
cbc := callbackAdd(d)
|
}
|
||||||
d.cbc = cbc
|
d.cbc = cgo.NewHandle(d)
|
||||||
return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
|
return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDataReadWriteSeeker returns a new callback based data buffer
|
// NewDataReadWriteSeeker returns a new callback based data buffer
|
||||||
|
@ -137,12 +144,8 @@ func NewDataReadWriteSeeker(rw io.ReadWriteSeeker) (*Data, error) {
|
||||||
d.r = rw
|
d.r = rw
|
||||||
d.w = rw
|
d.w = rw
|
||||||
d.s = rw
|
d.s = rw
|
||||||
d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc)
|
d.cbc = cgo.NewHandle(d)
|
||||||
d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc)
|
return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc)))
|
||||||
d.cbs.seek = C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc)
|
|
||||||
cbc := callbackAdd(d)
|
|
||||||
d.cbc = cbc
|
|
||||||
return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close releases any resources associated with the data buffer
|
// Close releases any resources associated with the data buffer
|
||||||
|
@ -151,7 +154,7 @@ func (d *Data) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if d.cbc > 0 {
|
if d.cbc > 0 {
|
||||||
callbackDelete(d.cbc)
|
d.cbc.Delete()
|
||||||
}
|
}
|
||||||
_, err := C.gpgme_data_release(d.dh)
|
_, err := C.gpgme_data_release(d.dh)
|
||||||
runtime.KeepAlive(d)
|
runtime.KeepAlive(d)
|
||||||
|
@ -160,24 +163,42 @@ func (d *Data) Close() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Data) Write(p []byte) (int, error) {
|
func (d *Data) Write(p []byte) (int, error) {
|
||||||
n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p)))
|
var buffer *byte
|
||||||
runtime.KeepAlive(d)
|
if len(p) > 0 {
|
||||||
if err != nil {
|
buffer = &p[0]
|
||||||
return 0, err
|
|
||||||
}
|
}
|
||||||
if n == 0 {
|
|
||||||
|
n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(buffer), C.size_t(len(p)))
|
||||||
|
runtime.KeepAlive(d)
|
||||||
|
switch {
|
||||||
|
case d.err != nil:
|
||||||
|
defer func() { d.err = nil }()
|
||||||
|
|
||||||
|
return 0, d.err
|
||||||
|
case err != nil:
|
||||||
|
return 0, err
|
||||||
|
case len(p) > 0 && n == 0:
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
return int(n), nil
|
return int(n), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Data) Read(p []byte) (int, error) {
|
func (d *Data) Read(p []byte) (int, error) {
|
||||||
n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p)))
|
var buffer *byte
|
||||||
runtime.KeepAlive(d)
|
if len(p) > 0 {
|
||||||
if err != nil {
|
buffer = &p[0]
|
||||||
return 0, err
|
|
||||||
}
|
}
|
||||||
if n == 0 {
|
|
||||||
|
n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(buffer), C.size_t(len(p)))
|
||||||
|
runtime.KeepAlive(d)
|
||||||
|
switch {
|
||||||
|
case d.err != nil:
|
||||||
|
defer func() { d.err = nil }()
|
||||||
|
|
||||||
|
return 0, d.err
|
||||||
|
case err != nil:
|
||||||
|
return 0, err
|
||||||
|
case len(p) > 0 && n == 0:
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
return int(n), nil
|
return int(n), nil
|
||||||
|
@ -186,7 +207,15 @@ func (d *Data) Read(p []byte) (int, error) {
|
||||||
func (d *Data) Seek(offset int64, whence int) (int64, error) {
|
func (d *Data) Seek(offset int64, whence int) (int64, error) {
|
||||||
n, err := C.gogpgme_data_seek(d.dh, C.gpgme_off_t(offset), C.int(whence))
|
n, err := C.gogpgme_data_seek(d.dh, C.gpgme_off_t(offset), C.int(whence))
|
||||||
runtime.KeepAlive(d)
|
runtime.KeepAlive(d)
|
||||||
return int64(n), err
|
switch {
|
||||||
|
case d.err != nil:
|
||||||
|
defer func() { d.err = nil }()
|
||||||
|
|
||||||
|
return 0, d.err
|
||||||
|
case err != nil:
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return int64(n), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns the associated filename if any
|
// Name returns the associated filename if any
|
||||||
|
|
|
@ -1,13 +1,5 @@
|
||||||
#include "go_gpgme.h"
|
#include "go_gpgme.h"
|
||||||
|
|
||||||
gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle) {
|
|
||||||
return gpgme_data_new_from_cbs(dh, cbs, (void *)handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle) {
|
|
||||||
gpgme_set_passphrase_cb(ctx, cb, (void *)handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) {
|
gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) {
|
||||||
return gpgme_data_seek(dh, offset, whence);
|
return gpgme_data_seek(dh, offset, whence);
|
||||||
}
|
}
|
||||||
|
@ -15,17 +7,17 @@ gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) {
|
||||||
gpgme_error_t gogpgme_op_assuan_transact_ext(
|
gpgme_error_t gogpgme_op_assuan_transact_ext(
|
||||||
gpgme_ctx_t ctx,
|
gpgme_ctx_t ctx,
|
||||||
char* cmd,
|
char* cmd,
|
||||||
uintptr_t data_h,
|
void* data_h,
|
||||||
uintptr_t inquiry_h,
|
void* inquiry_h,
|
||||||
uintptr_t status_h,
|
void* status_h,
|
||||||
gpgme_error_t *operr
|
gpgme_error_t *operr
|
||||||
){
|
){
|
||||||
return gpgme_op_assuan_transact_ext(
|
return gpgme_op_assuan_transact_ext(
|
||||||
ctx,
|
ctx,
|
||||||
cmd,
|
cmd,
|
||||||
(gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, (void *)data_h,
|
(gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, data_h,
|
||||||
(gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, (void *)inquiry_h,
|
(gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, inquiry_h,
|
||||||
(gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, (void *)status_h,
|
(gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, status_h,
|
||||||
operr
|
operr
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,11 +10,9 @@ extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size);
|
||||||
extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size);
|
extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size);
|
||||||
extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence);
|
extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence);
|
||||||
extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd);
|
extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd);
|
||||||
extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle);
|
|
||||||
extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle);
|
|
||||||
extern gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence);
|
extern gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence);
|
||||||
|
|
||||||
extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, uintptr_t data_h, uintptr_t inquiry_h , uintptr_t status_h, gpgme_error_t *operr);
|
extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, void *data_h, void *inquiry_h , void *status_h, gpgme_error_t *operr);
|
||||||
|
|
||||||
extern gpgme_error_t gogpgme_assuan_data_callback(void *opaque, void* data, size_t datalen );
|
extern gpgme_error_t gogpgme_assuan_data_callback(void *opaque, void* data, size_t datalen );
|
||||||
extern gpgme_error_t gogpgme_assuan_inquiry_callback(void *opaque, char* name, char* args);
|
extern gpgme_error_t gogpgme_assuan_inquiry_callback(void *opaque, char* name, char* args);
|
||||||
|
|
|
@ -7,11 +7,13 @@ package gpgme
|
||||||
// #include <gpgme.h>
|
// #include <gpgme.h>
|
||||||
// #include "go_gpgme.h"
|
// #include "go_gpgme.h"
|
||||||
import "C"
|
import "C"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"runtime/cgo"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
@ -27,7 +29,8 @@ type Callback func(uidHint string, prevWasBad bool, f *os.File) error
|
||||||
|
|
||||||
//export gogpgme_passfunc
|
//export gogpgme_passfunc
|
||||||
func gogpgme_passfunc(hook unsafe.Pointer, uid_hint, passphrase_info *C.char, prev_was_bad, fd C.int) C.gpgme_error_t {
|
func gogpgme_passfunc(hook unsafe.Pointer, uid_hint, passphrase_info *C.char, prev_was_bad, fd C.int) C.gpgme_error_t {
|
||||||
c := callbackLookup(uintptr(hook)).(*Context)
|
h := *(*cgo.Handle)(hook)
|
||||||
|
c := h.Value().(*Context)
|
||||||
go_uid_hint := C.GoString(uid_hint)
|
go_uid_hint := C.GoString(uid_hint)
|
||||||
f := os.NewFile(uintptr(fd), go_uid_hint)
|
f := os.NewFile(uintptr(fd), go_uid_hint)
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
@ -233,6 +236,17 @@ func SetEngineInfo(proto Protocol, fileName, homeDir string) error {
|
||||||
return handleError(C.gpgme_set_engine_info(C.gpgme_protocol_t(proto), cfn, chome))
|
return handleError(C.gpgme_set_engine_info(C.gpgme_protocol_t(proto), cfn, chome))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetDirInfo(what string) string {
|
||||||
|
cwhat := C.CString(what)
|
||||||
|
defer C.free(unsafe.Pointer(cwhat))
|
||||||
|
|
||||||
|
cdir := C.gpgme_get_dirinfo(cwhat)
|
||||||
|
if cdir == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return C.GoString(cdir)
|
||||||
|
}
|
||||||
|
|
||||||
func FindKeys(pattern string, secretOnly bool) ([]*Key, error) {
|
func FindKeys(pattern string, secretOnly bool) ([]*Key, error) {
|
||||||
var keys []*Key
|
var keys []*Key
|
||||||
ctx, err := New()
|
ctx, err := New()
|
||||||
|
@ -243,7 +257,7 @@ func FindKeys(pattern string, secretOnly bool) ([]*Key, error) {
|
||||||
if err := ctx.KeyListStart(pattern, secretOnly); err != nil {
|
if err := ctx.KeyListStart(pattern, secretOnly); err != nil {
|
||||||
return keys, err
|
return keys, err
|
||||||
}
|
}
|
||||||
defer ctx.KeyListEnd()
|
defer func() { _ = ctx.KeyListEnd() }()
|
||||||
for ctx.KeyListNext() {
|
for ctx.KeyListNext() {
|
||||||
keys = append(keys, ctx.Key)
|
keys = append(keys, ctx.Key)
|
||||||
}
|
}
|
||||||
|
@ -268,8 +282,10 @@ func Decrypt(r io.Reader) (*Data, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
err = ctx.Decrypt(cipher, plain)
|
if err := ctx.Decrypt(cipher, plain); err != nil {
|
||||||
plain.Seek(0, SeekSet)
|
return nil, err
|
||||||
|
}
|
||||||
|
_, err = plain.Seek(0, SeekSet)
|
||||||
return plain, err
|
return plain, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -278,7 +294,7 @@ type Context struct {
|
||||||
KeyError error
|
KeyError error
|
||||||
|
|
||||||
callback Callback
|
callback Callback
|
||||||
cbc uintptr // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx)
|
cbc cgo.Handle // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx)
|
||||||
|
|
||||||
ctx C.gpgme_ctx_t // WARNING: Call runtime.KeepAlive(c) after ANY passing of c.ctx to C
|
ctx C.gpgme_ctx_t // WARNING: Call runtime.KeepAlive(c) after ANY passing of c.ctx to C
|
||||||
}
|
}
|
||||||
|
@ -295,7 +311,7 @@ func (c *Context) Release() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if c.cbc > 0 {
|
if c.cbc > 0 {
|
||||||
callbackDelete(c.cbc)
|
c.cbc.Delete()
|
||||||
}
|
}
|
||||||
C.gpgme_release(c.ctx)
|
C.gpgme_release(c.ctx)
|
||||||
runtime.KeepAlive(c)
|
runtime.KeepAlive(c)
|
||||||
|
@ -364,15 +380,14 @@ func (c *Context) SetCallback(callback Callback) error {
|
||||||
var err error
|
var err error
|
||||||
c.callback = callback
|
c.callback = callback
|
||||||
if c.cbc > 0 {
|
if c.cbc > 0 {
|
||||||
callbackDelete(c.cbc)
|
c.cbc.Delete()
|
||||||
}
|
}
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
cbc := callbackAdd(c)
|
c.cbc = cgo.NewHandle(c)
|
||||||
c.cbc = cbc
|
_, err = C.gpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), unsafe.Pointer(&c.cbc))
|
||||||
_, err = C.gogpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), C.uintptr_t(cbc))
|
|
||||||
} else {
|
} else {
|
||||||
c.cbc = 0
|
c.cbc = 0
|
||||||
_, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0)
|
_, err = C.gpgme_set_passphrase_cb(c.ctx, nil, nil)
|
||||||
}
|
}
|
||||||
runtime.KeepAlive(c)
|
runtime.KeepAlive(c)
|
||||||
return err
|
return err
|
||||||
|
@ -564,9 +579,11 @@ func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
type AssuanDataCallback func(data []byte) error
|
type (
|
||||||
type AssuanInquireCallback func(name, args string) error
|
AssuanDataCallback func(data []byte) error
|
||||||
type AssuanStatusCallback func(status, args string) error
|
AssuanInquireCallback func(name, args string) error
|
||||||
|
AssuanStatusCallback func(status, args string) error
|
||||||
|
)
|
||||||
|
|
||||||
// AssuanSend sends a raw Assuan command to gpg-agent
|
// AssuanSend sends a raw Assuan command to gpg-agent
|
||||||
func (c *Context) AssuanSend(
|
func (c *Context) AssuanSend(
|
||||||
|
@ -577,17 +594,17 @@ func (c *Context) AssuanSend(
|
||||||
) error {
|
) error {
|
||||||
var operr C.gpgme_error_t
|
var operr C.gpgme_error_t
|
||||||
|
|
||||||
dataPtr := callbackAdd(&data)
|
dataPtr := cgo.NewHandle(&data)
|
||||||
inquiryPtr := callbackAdd(&inquiry)
|
inquiryPtr := cgo.NewHandle(&inquiry)
|
||||||
statusPtr := callbackAdd(&status)
|
statusPtr := cgo.NewHandle(&status)
|
||||||
cmdCStr := C.CString(cmd)
|
cmdCStr := C.CString(cmd)
|
||||||
defer C.free(unsafe.Pointer(cmdCStr))
|
defer C.free(unsafe.Pointer(cmdCStr))
|
||||||
err := C.gogpgme_op_assuan_transact_ext(
|
err := C.gogpgme_op_assuan_transact_ext(
|
||||||
c.ctx,
|
c.ctx,
|
||||||
cmdCStr,
|
cmdCStr,
|
||||||
C.uintptr_t(dataPtr),
|
unsafe.Pointer(&dataPtr),
|
||||||
C.uintptr_t(inquiryPtr),
|
unsafe.Pointer(&inquiryPtr),
|
||||||
C.uintptr_t(statusPtr),
|
unsafe.Pointer(&statusPtr),
|
||||||
&operr,
|
&operr,
|
||||||
)
|
)
|
||||||
runtime.KeepAlive(c)
|
runtime.KeepAlive(c)
|
||||||
|
@ -600,11 +617,14 @@ func (c *Context) AssuanSend(
|
||||||
|
|
||||||
//export gogpgme_assuan_data_callback
|
//export gogpgme_assuan_data_callback
|
||||||
func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, datalen C.size_t) C.gpgme_error_t {
|
func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, datalen C.size_t) C.gpgme_error_t {
|
||||||
c := callbackLookup(uintptr(handle)).(*AssuanDataCallback)
|
h := *(*cgo.Handle)(handle)
|
||||||
|
c := h.Value().(*AssuanDataCallback)
|
||||||
if *c == nil {
|
if *c == nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
(*c)(C.GoBytes(data, C.int(datalen)))
|
if err := (*c)(C.GoBytes(data, C.int(datalen))); err != nil {
|
||||||
|
return C.gpgme_error(C.GPG_ERR_USER_1)
|
||||||
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -612,11 +632,14 @@ func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, da
|
||||||
func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs *C.char) C.gpgme_error_t {
|
func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs *C.char) C.gpgme_error_t {
|
||||||
name := C.GoString(cName)
|
name := C.GoString(cName)
|
||||||
args := C.GoString(cArgs)
|
args := C.GoString(cArgs)
|
||||||
c := callbackLookup(uintptr(handle)).(*AssuanInquireCallback)
|
h := *(*cgo.Handle)(handle)
|
||||||
|
c := h.Value().(*AssuanInquireCallback)
|
||||||
if *c == nil {
|
if *c == nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
(*c)(name, args)
|
if err := (*c)(name, args); err != nil {
|
||||||
|
return C.gpgme_error(C.GPG_ERR_USER_1)
|
||||||
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -624,11 +647,14 @@ func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs
|
||||||
func gogpgme_assuan_status_callback(handle unsafe.Pointer, cStatus *C.char, cArgs *C.char) C.gpgme_error_t {
|
func gogpgme_assuan_status_callback(handle unsafe.Pointer, cStatus *C.char, cArgs *C.char) C.gpgme_error_t {
|
||||||
status := C.GoString(cStatus)
|
status := C.GoString(cStatus)
|
||||||
args := C.GoString(cArgs)
|
args := C.GoString(cArgs)
|
||||||
c := callbackLookup(uintptr(handle)).(*AssuanStatusCallback)
|
h := *(*cgo.Handle)(handle)
|
||||||
|
c := h.Value().(*AssuanStatusCallback)
|
||||||
if *c == nil {
|
if *c == nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
(*c)(status, args)
|
if err := (*c)(status, args); err != nil {
|
||||||
|
return C.gpgme_error(C.GPG_ERR_USER_1)
|
||||||
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package gpgme
|
package gpgme
|
||||||
|
|
|
@ -134,7 +134,7 @@ func (i *InteractiveIDTokenGetter) doOobFlow(cfg *oauth2.Config, stateToken stri
|
||||||
fmt.Fprintln(i.GetOutput(), "Go to the following link in a browser:\n\n\t", authURL)
|
fmt.Fprintln(i.GetOutput(), "Go to the following link in a browser:\n\n\t", authURL)
|
||||||
fmt.Fprintf(i.GetOutput(), "Enter verification code: ")
|
fmt.Fprintf(i.GetOutput(), "Enter verification code: ")
|
||||||
var code string
|
var code string
|
||||||
fmt.Fscanf(i.GetInput(), "%s", &code)
|
_, _ = fmt.Fscanf(i.GetInput(), "%s", &code)
|
||||||
// New line in case read input doesn't move cursor to next line.
|
// New line in case read input doesn't move cursor to next line.
|
||||||
fmt.Fprintln(i.GetOutput())
|
fmt.Fprintln(i.GetOutput())
|
||||||
return code
|
return code
|
||||||
|
|
|
@ -23,21 +23,19 @@ var errAlignmentOverflow = errors.New("integer overflow when calculating alignme
|
||||||
|
|
||||||
// nextAligned finds the next offset that satisfies alignment.
|
// nextAligned finds the next offset that satisfies alignment.
|
||||||
func nextAligned(offset int64, alignment int) (int64, error) {
|
func nextAligned(offset int64, alignment int) (int64, error) {
|
||||||
align64 := uint64(alignment)
|
align64 := int64(alignment)
|
||||||
offset64 := uint64(offset)
|
|
||||||
|
|
||||||
if align64 <= 0 || offset64%align64 == 0 {
|
if align64 <= 0 || offset%align64 == 0 {
|
||||||
return offset, nil
|
return offset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
offset64 += (align64 - offset64%align64)
|
align64 -= offset % align64
|
||||||
|
|
||||||
if offset64 > math.MaxInt64 {
|
if (math.MaxInt64 - offset) < align64 {
|
||||||
return 0, errAlignmentOverflow
|
return 0, errAlignmentOverflow
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gosec // Overflow handled above.
|
return offset + align64, nil
|
||||||
return int64(offset64), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeDataObjectAt writes the data object described by di to ws, using time t, recording details
|
// writeDataObjectAt writes the data object described by di to ws, using time t, recording details
|
||||||
|
|
|
@ -5,15 +5,6 @@
|
||||||
|
|
||||||
oauth2 package contains a client implementation for OAuth 2.0 spec.
|
oauth2 package contains a client implementation for OAuth 2.0 spec.
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
~~~~
|
|
||||||
go get golang.org/x/oauth2
|
|
||||||
~~~~
|
|
||||||
|
|
||||||
Or you can manually git clone the repository to
|
|
||||||
`$(go env GOPATH)/src/golang.org/x/oauth2`.
|
|
||||||
|
|
||||||
See pkg.go.dev for further documentation and examples.
|
See pkg.go.dev for further documentation and examples.
|
||||||
|
|
||||||
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
|
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
|
||||||
|
@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at
|
||||||
https://github.com/golang/oauth2/issues.
|
https://github.com/golang/oauth2/issues.
|
||||||
|
|
||||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||||
this repository, see https://golang.org/doc/contribute.html. In particular:
|
this repository, see https://go.dev/doc/contribute.
|
||||||
|
|
||||||
|
The git repository is https://go.googlesource.com/oauth2.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
|
||||||
* Excluding trivial changes, all contributions should be connected to an existing issue.
|
* Excluding trivial changes, all contributions should be connected to an existing issue.
|
||||||
* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted.
|
* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted.
|
||||||
|
|
|
@ -56,7 +56,7 @@ type Config struct {
|
||||||
// the OAuth flow, after the resource owner's URLs.
|
// the OAuth flow, after the resource owner's URLs.
|
||||||
RedirectURL string
|
RedirectURL string
|
||||||
|
|
||||||
// Scope specifies optional requested permissions.
|
// Scopes specifies optional requested permissions.
|
||||||
Scopes []string
|
Scopes []string
|
||||||
|
|
||||||
// authStyleCache caches which auth style to use when Endpoint.AuthStyle is
|
// authStyleCache caches which auth style to use when Endpoint.AuthStyle is
|
||||||
|
|
|
@ -129,7 +129,7 @@ github.com/containerd/platforms
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/containerd/stargz-snapshotter/estargz
|
github.com/containerd/stargz-snapshotter/estargz
|
||||||
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
||||||
# github.com/containerd/typeurl/v2 v2.2.0
|
# github.com/containerd/typeurl/v2 v2.2.3
|
||||||
## explicit; go 1.21
|
## explicit; go 1.21
|
||||||
github.com/containerd/typeurl/v2
|
github.com/containerd/typeurl/v2
|
||||||
# github.com/containernetworking/cni v1.2.3
|
# github.com/containernetworking/cni v1.2.3
|
||||||
|
@ -251,8 +251,8 @@ github.com/containers/conmon/runner/config
|
||||||
# github.com/containers/gvisor-tap-vsock v0.8.1
|
# github.com/containers/gvisor-tap-vsock v0.8.1
|
||||||
## explicit; go 1.22.0
|
## explicit; go 1.22.0
|
||||||
github.com/containers/gvisor-tap-vsock/pkg/types
|
github.com/containers/gvisor-tap-vsock/pkg/types
|
||||||
# github.com/containers/image/v5 v5.33.0
|
# github.com/containers/image/v5 v5.33.1-0.20250107140133-43c2a741242f
|
||||||
## explicit; go 1.22.6
|
## explicit; go 1.22.8
|
||||||
github.com/containers/image/v5/copy
|
github.com/containers/image/v5/copy
|
||||||
github.com/containers/image/v5/directory
|
github.com/containers/image/v5/directory
|
||||||
github.com/containers/image/v5/directory/explicitfilepath
|
github.com/containers/image/v5/directory/explicitfilepath
|
||||||
|
@ -277,6 +277,7 @@ github.com/containers/image/v5/internal/multierr
|
||||||
github.com/containers/image/v5/internal/pkg/platform
|
github.com/containers/image/v5/internal/pkg/platform
|
||||||
github.com/containers/image/v5/internal/private
|
github.com/containers/image/v5/internal/private
|
||||||
github.com/containers/image/v5/internal/putblobdigest
|
github.com/containers/image/v5/internal/putblobdigest
|
||||||
|
github.com/containers/image/v5/internal/reflink
|
||||||
github.com/containers/image/v5/internal/rootless
|
github.com/containers/image/v5/internal/rootless
|
||||||
github.com/containers/image/v5/internal/set
|
github.com/containers/image/v5/internal/set
|
||||||
github.com/containers/image/v5/internal/signature
|
github.com/containers/image/v5/internal/signature
|
||||||
|
@ -468,7 +469,6 @@ github.com/distribution/reference
|
||||||
## explicit
|
## explicit
|
||||||
github.com/docker/distribution/registry/api/errcode
|
github.com/docker/distribution/registry/api/errcode
|
||||||
github.com/docker/distribution/registry/api/v2
|
github.com/docker/distribution/registry/api/v2
|
||||||
github.com/docker/distribution/registry/client/auth/challenge
|
|
||||||
# github.com/docker/docker v27.4.1+incompatible
|
# github.com/docker/docker v27.4.1+incompatible
|
||||||
## explicit
|
## explicit
|
||||||
github.com/docker/docker/api
|
github.com/docker/docker/api
|
||||||
|
@ -966,8 +966,8 @@ github.com/pmezard/go-difflib/difflib
|
||||||
# github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c
|
# github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/power-devops/perfstat
|
github.com/power-devops/perfstat
|
||||||
# github.com/proglottis/gpgme v0.1.3
|
# github.com/proglottis/gpgme v0.1.4
|
||||||
## explicit; go 1.11
|
## explicit; go 1.17
|
||||||
github.com/proglottis/gpgme
|
github.com/proglottis/gpgme
|
||||||
# github.com/rivo/uniseg v0.4.7
|
# github.com/rivo/uniseg v0.4.7
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
|
@ -989,7 +989,7 @@ github.com/rootless-containers/rootlesskit/v2/pkg/port/portutil
|
||||||
# github.com/seccomp/libseccomp-golang v0.10.0
|
# github.com/seccomp/libseccomp-golang v0.10.0
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/seccomp/libseccomp-golang
|
github.com/seccomp/libseccomp-golang
|
||||||
# github.com/secure-systems-lab/go-securesystemslib v0.8.0
|
# github.com/secure-systems-lab/go-securesystemslib v0.9.0
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
github.com/secure-systems-lab/go-securesystemslib/encrypted
|
github.com/secure-systems-lab/go-securesystemslib/encrypted
|
||||||
# github.com/segmentio/ksuid v1.0.4
|
# github.com/segmentio/ksuid v1.0.4
|
||||||
|
@ -1017,8 +1017,8 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey
|
||||||
github.com/sigstore/rekor/pkg/generated/client/tlog
|
github.com/sigstore/rekor/pkg/generated/client/tlog
|
||||||
github.com/sigstore/rekor/pkg/generated/models
|
github.com/sigstore/rekor/pkg/generated/models
|
||||||
github.com/sigstore/rekor/pkg/util
|
github.com/sigstore/rekor/pkg/util
|
||||||
# github.com/sigstore/sigstore v1.8.9
|
# github.com/sigstore/sigstore v1.8.11
|
||||||
## explicit; go 1.22.5
|
## explicit; go 1.22.0
|
||||||
github.com/sigstore/sigstore/pkg/cryptoutils
|
github.com/sigstore/sigstore/pkg/cryptoutils
|
||||||
github.com/sigstore/sigstore/pkg/oauth
|
github.com/sigstore/sigstore/pkg/oauth
|
||||||
github.com/sigstore/sigstore/pkg/oauthflow
|
github.com/sigstore/sigstore/pkg/oauthflow
|
||||||
|
@ -1053,8 +1053,8 @@ github.com/stefanberger/go-pkcs11uri
|
||||||
github.com/stretchr/testify/assert
|
github.com/stretchr/testify/assert
|
||||||
github.com/stretchr/testify/assert/yaml
|
github.com/stretchr/testify/assert/yaml
|
||||||
github.com/stretchr/testify/require
|
github.com/stretchr/testify/require
|
||||||
# github.com/sylabs/sif/v2 v2.19.1
|
# github.com/sylabs/sif/v2 v2.20.2
|
||||||
## explicit; go 1.22.5
|
## explicit; go 1.22.0
|
||||||
github.com/sylabs/sif/v2/pkg/sif
|
github.com/sylabs/sif/v2/pkg/sif
|
||||||
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||||
## explicit
|
## explicit
|
||||||
|
@ -1228,7 +1228,7 @@ golang.org/x/net/internal/socks
|
||||||
golang.org/x/net/internal/timeseries
|
golang.org/x/net/internal/timeseries
|
||||||
golang.org/x/net/proxy
|
golang.org/x/net/proxy
|
||||||
golang.org/x/net/trace
|
golang.org/x/net/trace
|
||||||
# golang.org/x/oauth2 v0.23.0
|
# golang.org/x/oauth2 v0.25.0
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
golang.org/x/oauth2
|
golang.org/x/oauth2
|
||||||
golang.org/x/oauth2/internal
|
golang.org/x/oauth2/internal
|
||||||
|
|
Loading…
Reference in New Issue