build(deps): bump github.com/docker/docker

Bumps [github.com/docker/docker](https://github.com/docker/docker) from 23.0.1+incompatible to 23.0.2+incompatible.
- [Release notes](https://github.com/docker/docker/releases)
- [Commits](https://github.com/docker/docker/compare/v23.0.1...v23.0.2)

---
updated-dependencies:
- dependency-name: github.com/docker/docker
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2023-03-29 05:02:42 +00:00 committed by GitHub
parent 7c626032fa
commit b84daf20e7
161 changed files with 3633 additions and 6038 deletions

View File

@ -8,7 +8,7 @@ require (
github.com/containerd/containerd v1.7.0
github.com/containernetworking/cni v1.1.2
github.com/containernetworking/plugins v1.2.0
github.com/containers/image/v5 v5.24.3-0.20230324204529-08b04b816eb8
github.com/containers/image/v5 v5.24.2
github.com/containers/ocicrypt v1.1.7
github.com/containers/storage v1.45.5-0.20230326103843-b1216421c44b
github.com/coreos/go-systemd/v22 v22.5.0
@ -16,7 +16,7 @@ require (
github.com/davecgh/go-spew v1.1.1
github.com/disiqueira/gotree/v3 v3.0.2
github.com/docker/distribution v2.8.1+incompatible
github.com/docker/docker v23.0.1+incompatible
github.com/docker/docker v23.0.2+incompatible
github.com/docker/go-units v0.5.0
github.com/fsnotify/fsnotify v1.6.0
github.com/ghodss/yaml v1.0.0
@ -115,12 +115,11 @@ require (
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.11 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect
github.com/vbauerster/mpb/v8 v8.3.0 // indirect
github.com/vbauerster/mpb/v7 v7.5.3 // indirect
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect
go.mongodb.org/mongo-driver v1.11.1 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/text v0.8.0 // indirect

View File

@ -46,8 +46,8 @@ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl3
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP7mkPDoSfP9VU=
github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4=
github.com/containers/image/v5 v5.24.3-0.20230324204529-08b04b816eb8 h1:hL/KrmP4ZMRmokrz+YNBem1ECKtytLU4/kUn3mSSkz0=
github.com/containers/image/v5 v5.24.3-0.20230324204529-08b04b816eb8/go.mod h1:pquu2CUlF4i+OBB5MM6kb36ZkYGQze8Wqv91aYIe9eo=
github.com/containers/image/v5 v5.24.2 h1:QcMsHBAXBPPnVYo6iEFarvaIpym7sBlwsGHPJlucxN0=
github.com/containers/image/v5 v5.24.2/go.mod h1:oss5F6ssGQz8ZtC79oY+fuzYA3m3zBek9tq9gmhuvHc=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U=
@ -70,8 +70,8 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh
github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY=
github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v23.0.2+incompatible h1:q81C2qQ/EhPm8COZMUGOQYh4qLv4Xu6CXELJ3WK/mlU=
github.com/docker/docker v23.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@ -262,6 +262,7 @@ github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYt
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
@ -279,7 +280,7 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -406,8 +407,8 @@ github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/vbauerster/mpb/v8 v8.3.0 h1:xw2eMJ6v5NP8Rd7yOVzU6OqnRPrS1yWAoLTrWe7W4Nc=
github.com/vbauerster/mpb/v8 v8.3.0/go.mod h1:bngtYUAu25QGxcYYglsF6oyoHlC9Yhh582xF9LjfmL4=
github.com/vbauerster/mpb/v7 v7.5.3 h1:BkGfmb6nMrrBQDFECR/Q7RkKCw7ylMetCb4079CGs4w=
github.com/vbauerster/mpb/v7 v7.5.3/go.mod h1:i+h4QY6lmLvBNK2ah1fSreiw3ajskRlBp9AhY/PnuOE=
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
@ -455,8 +456,6 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 h1:LGJsf5LRplCck6jUCH3dBL2dmycNruWNF5xugkSlfXw=
golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@ -523,6 +522,7 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@ -612,7 +612,7 @@ gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=

View File

@ -104,11 +104,12 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
if !isConfig {
options.LayerIndex = &layerIndex
}
destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
if err != nil {
return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err)
}
uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob)
uploadedInfo.Annotations = stream.info.Annotations
compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations)
decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation)
@ -168,20 +169,3 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
}
return n, err
}
// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo.
func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo {
// The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size.
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
// of the generic code in this package.
return types.BlobInfo{
Digest: uploadedBlob.Digest,
Size: uploadedBlob.Size,
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
Annotations: inputInfo.Annotations,
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto.
CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream.
CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream.
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream.
}
}

View File

@ -6,30 +6,10 @@ import (
"io"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
var (
// defaultCompressionFormat is used if the destination transport requests
// compression, and the user does not explicitly instruct us to use an algorithm.
defaultCompressionFormat = &compression.Gzip
// compressionBufferSize is the buffer size used to compress a blob
compressionBufferSize = 1048576
// expectedCompressionFormats is used to check if a blob with a specified media type is compressed
// using the algorithm that the media type says it should be compressed with
expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
}
)
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
@ -129,13 +109,13 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed {
logrus.Debugf("Compressing blob on the fly")
var uploadedAlgorithm *compressiontypes.Algorithm
if ic.compressionFormat != nil {
uploadedAlgorithm = ic.compressionFormat
if ic.c.compressionFormat != nil {
uploadedAlgorithm = ic.c.compressionFormat
} else {
uploadedAlgorithm = defaultCompressionFormat
}
reader, annotations := ic.compressedStream(stream.reader, *uploadedAlgorithm)
reader, annotations := ic.c.compressedStream(stream.reader, *uploadedAlgorithm)
// Note: reader must be closed on all return paths.
stream.reader = reader
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
@ -157,7 +137,7 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() {
ic.c.compressionFormat != nil && ic.c.compressionFormat.Name() != detected.format.Name() {
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
// re-compressed using the desired format.
logrus.Debugf("Blob will be converted")
@ -173,7 +153,7 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
}
}()
recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat)
recompressed, annotations := ic.c.compressedStream(decompressed, *ic.c.compressionFormat)
// Note: recompressed must be closed on all return paths.
stream.reader = recompressed
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
@ -183,10 +163,10 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
succeeded = true
return &bpCompressionStepData{
operation: types.PreserveOriginal,
uploadedAlgorithm: ic.compressionFormat,
uploadedAlgorithm: ic.c.compressionFormat,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: ic.compressionFormat.Name(),
uploadedCompressorName: ic.c.compressionFormat.Name(),
closers: []io.Closer{decompressed, recompressed},
}, nil
}
@ -219,9 +199,7 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
}
// bpcPreserveOriginal returns a *bpCompressionStepData for not changing the original blob.
// This does not change the sourceStream parameter; we include it for symmetry with other
// pipeline steps.
func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCompressionStepData,
func (ic *imageCopier) bpcPreserveOriginal(stream *sourceStream, detected bpDetectCompressionStepData,
layerCompressionChangeSupported bool) *bpCompressionStepData {
logrus.Debugf("Using original blob without modification")
// Remember if the original blob was compressed, and if so how, so that if
@ -254,7 +232,9 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom
if *annotations == nil {
*annotations = map[string]string{}
}
maps.Copy(*annotations, d.uploadedAnnotations)
for k, v := range d.uploadedAnnotations {
(*annotations)[k] = v
}
}
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.
@ -318,24 +298,24 @@ func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, co
}
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
func (ic *imageCopier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
err = doCompression(dest, src, metadata, compressionFormat, ic.compressionLevel)
err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
}
// compressedStream returns a stream the input reader compressed using format, and a metadata map.
// The caller must close the returned reader.
// AFTER the stream is consumed, metadata will be updated with annotations to use on the data.
func (ic *imageCopier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
func (c *copier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
pipeReader, pipeWriter := io.Pipe()
annotations := map[string]string{}
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we dont care.
go ic.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
go c.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
return pipeReader, annotations
}

File diff suppressed because it is too large Load Diff

View File

@ -7,8 +7,6 @@ import (
"github.com/containers/image/v5/types"
"github.com/containers/ocicrypt"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
@ -20,9 +18,12 @@ func isOciEncrypted(mediatype string) bool {
// isEncrypted checks if an image is encrypted
func isEncrypted(i types.Image) bool {
layers := i.LayerInfos()
return slices.ContainsFunc(layers, func(l types.BlobInfo) bool {
return isOciEncrypted(l.MediaType)
})
for _, l := range layers {
if isOciEncrypted(l.MediaType) {
return true
}
}
return false
}
// bpDecryptionStepData contains data that the copy pipeline needs about the decryption step.
@ -46,9 +47,11 @@ func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.
stream.reader = reader
stream.info.Digest = decryptedDigest
stream.info.Size = -1
maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool {
return strings.HasPrefix(k, "org.opencontainers.image.enc")
})
for k := range stream.info.Annotations {
if strings.HasPrefix(k, "org.opencontainers.image.enc") {
delete(stream.info.Annotations, k)
}
}
return &bpDecryptionStepData{
decrypting: true,
}, nil
@ -119,6 +122,8 @@ func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *ty
if *annotations == nil {
*annotations = map[string]string{}
}
maps.Copy(*annotations, encryptAnnotations)
for k, v := range encryptAnnotations {
(*annotations)[k] = v
}
return nil
}

View File

@ -6,11 +6,9 @@ import (
"fmt"
"strings"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
@ -21,7 +19,7 @@ var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, man
// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
type orderedSet struct {
list []string
included *set.Set[string]
included map[string]struct{}
}
// newOrderedSet creates a correctly initialized orderedSet.
@ -29,15 +27,15 @@ type orderedSet struct {
func newOrderedSet() *orderedSet {
return &orderedSet{
list: []string{},
included: set.New[string](),
included: map[string]struct{}{},
}
}
// append adds s to the end of os, only if it is not included already.
func (os *orderedSet) append(s string) {
if !os.included.Contains(s) {
if _, ok := os.included[s]; !ok {
os.list = append(os.list, s)
os.included.Add(s)
os.included[s] = struct{}{}
}
}
@ -82,10 +80,10 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
otherMIMETypeCandidates: []string{},
}, nil
}
supportedByDest := set.New[string]()
supportedByDest := map[string]struct{}{}
for _, t := range destSupportedManifestMIMETypes {
if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) {
supportedByDest.Add(t)
supportedByDest[t] = struct{}{}
}
}
@ -98,7 +96,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
prioritizedTypes := newOrderedSet()
// First of all, prefer to keep the original manifest unmodified.
if supportedByDest.Contains(srcType) {
if _, ok := supportedByDest[srcType]; ok {
prioritizedTypes.append(srcType)
}
if in.cannotModifyManifestReason != "" {
@ -115,7 +113,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
// Then use our list of preferred types.
for _, t := range preferredManifestMIMETypes {
if supportedByDest.Contains(t) {
if _, ok := supportedByDest[t]; ok {
prioritizedTypes.append(t)
}
}
@ -168,8 +166,11 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
prioritizedTypes := newOrderedSet()
// The first priority is the current type, if it's in the list, since that lets us avoid a
// conversion that isn't strictly necessary.
if slices.Contains(destSupportedMIMETypes, currentListMIMEType) {
prioritizedTypes.append(currentListMIMEType)
for _, t := range destSupportedMIMETypes {
if t == currentListMIMEType {
prioritizedTypes.append(currentListMIMEType)
break
}
}
// Pick out the other list types that we support.
for _, t := range destSupportedMIMETypes {

View File

@ -1,198 +0,0 @@
package copy
import (
"bytes"
"context"
"errors"
"fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/signature"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// copyMultipleImages copies some or all of an image list's instances, using
// policyContext to validate source image admissibility.
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) {
// Parse the list and get a copy of the original value after it's re-encoded.
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
if err != nil {
return nil, fmt.Errorf("reading manifest list: %w", err)
}
originalList, err := internalManifest.ListFromBlob(manifestList, manifestType)
if err != nil {
return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err)
}
updatedList := originalList.CloneInternal()
sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options,
"Getting image list signatures",
"Checking if image list destination supports signatures")
if err != nil {
return nil, err
}
// If the destination is a digested reference, make a note of that, determine what digest value we're
// expecting, and check that the source manifest matches it.
destIsDigestedReference := false
if named := c.dest.Reference().DockerReference(); named != nil {
if digested, ok := named.(reference.Digested); ok {
destIsDigestedReference = true
matches, err := manifest.MatchesDigest(manifestList, digested.Digest())
if err != nil {
return nil, fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
return nil, errors.New("Digest of source image's manifest would not match destination reference")
}
}
}
// Determine if we're allowed to modify the manifest list.
// If we can, set to the empty string. If we can't, set to the reason why.
// Compare, and perhaps keep in sync with, the version in copySingleImage.
cannotModifyManifestListReason := ""
if len(sigs) > 0 {
cannotModifyManifestListReason = "Would invalidate signatures"
}
if destIsDigestedReference {
cannotModifyManifestListReason = "Destination specifies a digest"
}
if options.PreserveDigests {
cannotModifyManifestListReason = "Instructed to preserve digests"
}
// Determine if we'll need to convert the manifest list to a different format.
forceListMIMEType := options.ForceManifestMIMEType
switch forceListMIMEType {
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
forceListMIMEType = manifest.DockerV2ListMediaType
case imgspecv1.MediaTypeImageManifest:
forceListMIMEType = imgspecv1.MediaTypeImageIndex
}
selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
if err != nil {
return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err)
}
if selectedListType != originalList.MIMEType() {
if cannotModifyManifestListReason != "" {
return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason)
}
}
// Copy each image, or just the ones we want to copy, in turn.
instanceDigests := updatedList.Instances()
imagesToCopy := len(instanceDigests)
if options.ImageListSelection == CopySpecificImages {
imagesToCopy = len(options.Instances)
}
c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests))
updates := make([]manifest.ListUpdate, len(instanceDigests))
instancesCopied := 0
for i, instanceDigest := range instanceDigests {
if options.ImageListSelection == CopySpecificImages &&
!slices.Contains(options.Instances, instanceDigest) {
update, err := updatedList.Instance(instanceDigest)
if err != nil {
return nil, err
}
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
// Record the digest/size/type of the manifest that we didn't copy.
updates[i] = update
continue
}
logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy)
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
if err != nil {
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err)
}
instancesCopied++
// Record the result of a possible conversion here.
update := manifest.ListUpdate{
Digest: updatedManifestDigest,
Size: int64(len(updatedManifest)),
MediaType: updatedManifestType,
}
updates[i] = update
}
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
if err = updatedList.UpdateInstances(updates); err != nil {
return nil, fmt.Errorf("updating manifest list: %w", err)
}
// Iterate through supported list types, preferred format first.
c.Printf("Writing manifest list to image destination\n")
var errs []string
for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) {
var attemptedList internalManifest.ListPublic = updatedList
logrus.Debugf("Trying to use manifest list type %s…", thisListType)
// Perform the list conversion, if we need one.
if thisListType != updatedList.MIMEType() {
attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
if err != nil {
return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err)
}
}
// Check if the updates or a type conversion meaningfully changed the list of images
// by serializing them both so that we can compare them.
attemptedManifestList, err := attemptedList.Serialize()
if err != nil {
return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err)
}
originalManifestList, err := originalList.Serialize()
if err != nil {
return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err)
}
// If we can't just use the original value, but we have to change it, flag an error.
if !bytes.Equal(attemptedManifestList, originalManifestList) {
if cannotModifyManifestListReason != "" {
return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason)
}
logrus.Debugf("Manifest list has been updated")
} else {
// We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest.
attemptedManifestList = manifestList
}
// Save the manifest list.
err = c.dest.PutManifest(ctx, attemptedManifestList, nil)
if err != nil {
logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err)
errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err))
continue
}
errs = nil
manifestList = attemptedManifestList
break
}
if errs != nil {
return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", "))
}
// Sign the manifest list.
newSigs, err := c.createSignatures(ctx, manifestList, options.SignIdentity)
if err != nil {
return nil, err
}
sigs = append(sigs, newSigs...)
c.Printf("Storing list signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {
return nil, fmt.Errorf("writing signatures: %w", err)
}
return manifestList, nil
}

View File

@ -7,8 +7,8 @@ import (
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
"github.com/vbauerster/mpb/v8"
"github.com/vbauerster/mpb/v8/decor"
"github.com/vbauerster/mpb/v7"
"github.com/vbauerster/mpb/v7/decor"
)
// newProgressPool creates a *mpb.Progress.
@ -120,7 +120,7 @@ func (bar *progressBar) mark100PercentComplete() {
bar.SetCurrent(bar.originalSize) // This triggers the completion condition.
} else {
// -1 = unknown size
// 0 is somewhat of a special case: Unlike c/image, where 0 is a definite known
// 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known
// size (possible at least in theory), in mpb, zero-sized progress bars are treated
// as unknown size, in particular they are not configured to be marked as
// complete on bar.Current() reaching bar.total (because that would happen already

View File

@ -1,818 +0,0 @@
package copy
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"reflect"
"strings"
"sync"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/vbauerster/mpb/v8"
"golang.org/x/exp/slices"
)
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
type imageCopier struct {
c *copier
manifestUpdates *types.ManifestUpdateOptions
src *image.SourcedImage
diffIDsAreNeeded bool
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
canSubstituteBlobs bool
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
compressionLevel *int
ociEncryptLayers *[]int
}
// copySingleImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate
// source image admissibility.
func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) {
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
// Make sure we fail cleanly in such cases.
multiImage, err := isMultiImage(ctx, unparsedImage)
if err != nil {
// FIXME FIXME: How to name a reference for the sub-image?
return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
}
if multiImage {
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
}
// Please keep this policy check BEFORE reading any other information about the image.
// (The multiImage check above only matches the MIME type, which we have received anyway.
// Actual parsing of anything should be deferred.)
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
return nil, "", "", fmt.Errorf("Source image rejected: %w", err)
}
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
if err != nil {
return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
}
// If the destination is a digested reference, make a note of that, determine what digest value we're
// expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's
// one item from a manifest list that matches it, accept that as a match.
destIsDigestedReference := false
if named := c.dest.Reference().DockerReference(); named != nil {
if digested, ok := named.(reference.Digested); ok {
destIsDigestedReference = true
matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
if err != nil {
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
manifestList, _, err := unparsedToplevel.Manifest(ctx)
if err != nil {
return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err)
}
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
if err != nil {
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
}
}
}
}
if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil {
return nil, "", "", err
}
sigs, err := c.sourceSignatures(ctx, src, options,
"Getting image source signatures",
"Checking if image destination supports signatures")
if err != nil {
return nil, "", "", err
}
// Determine if we're allowed to modify the manifest.
// If we can, set to the empty string. If we can't, set to the reason why.
// Compare, and perhaps keep in sync with, the version in copyMultipleImages.
cannotModifyManifestReason := ""
if len(sigs) > 0 {
cannotModifyManifestReason = "Would invalidate signatures"
}
if destIsDigestedReference {
cannotModifyManifestReason = "Destination specifies a digest"
}
if options.PreserveDigests {
cannotModifyManifestReason = "Instructed to preserve digests"
}
ic := imageCopier{
c: c,
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
src: src,
// diffIDsAreNeeded is computed later
cannotModifyManifestReason: cannotModifyManifestReason,
ociEncryptLayers: options.OciEncryptLayers,
}
if options.DestinationCtx != nil {
// Note that compressionFormat and compressionLevel can be nil.
ic.compressionFormat = options.DestinationCtx.CompressionFormat
ic.compressionLevel = options.DestinationCtx.CompressionLevel
}
// Decide whether we can substitute blobs with semantic equivalents:
// - Dont do that if we cant modify the manifest at all
// - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
// This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path:
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least theres a risk
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
// and we would reuse and sign it.
ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0
if err := ic.updateEmbeddedDockerReference(); err != nil {
return nil, "", "", err
}
destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
srcMIMEType: ic.src.ManifestMIMEType,
destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
forceManifestMIMEType: options.ForceManifestMIMEType,
requiresOCIEncryption: destRequiresOciEncryption,
cannotModifyManifestReason: ic.cannotModifyManifestReason,
})
if err != nil {
return nil, "", "", err
}
// We set up this part of ic.manifestUpdates quite early, not just around the
// code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
// (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based
// on the expected destination format.
if manifestConversionPlan.preferredMIMETypeNeedsConversion {
ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType
}
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
if options.OptimizeDestinationImageAlreadyExists {
shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
noPendingManifestUpdates := ic.noPendingManifestUpdates()
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates)
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates {
isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest)
if err != nil {
logrus.Warnf("Failed to compare destination image manifest: %v", err)
return nil, "", "", err
}
if isSrcDestManifestEqual {
c.Printf("Skipping: image already present at destination\n")
return retManifest, retManifestType, retManifestDigest, nil
}
}
}
if err := ic.copyLayers(ctx); err != nil {
return nil, "", "", err
}
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
// we're altering how they're compressed. If the process succeeds, fine…
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
retManifestType = manifestConversionPlan.preferredMIMEType
if err != nil {
logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
// because we failed to create a manifest of the specified type because the specific manifest type
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
// have other options available that could still succeed.
var manifestTypeRejectedError types.ManifestTypeRejectedError
var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError
isManifestRejected := errors.As(err, &manifestTypeRejectedError)
isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError)
if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 {
// We dont have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
// Dont bother the user with MIME types if we have no choice.
return nil, "", "", err
}
// If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
// So if we are here, we will definitely be trying to convert the manifest.
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
// so lets bail out early and with a better error message.
if ic.cannotModifyManifestReason != "" {
return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
}
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)}
for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates {
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
if err != nil {
logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err)
errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err))
continue
}
// We have successfully uploaded a manifest.
manifestBytes = attemptedManifest
retManifestDigest = attemptedManifestDigest
retManifestType = manifestMIMEType
errs = nil // Mark this as a success so that we don't abort below.
break
}
if errs != nil {
return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
}
}
if targetInstance != nil {
targetInstance = &retManifestDigest
}
newSigs, err := c.createSignatures(ctx, manifestBytes, options.SignIdentity)
if err != nil {
return nil, "", "", err
}
sigs = append(sigs, newSigs...)
c.Printf("Storing signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
}
return manifestBytes, retManifestType, retManifestDigest, nil
}
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
if dest.MustMatchRuntimeOS() {
c, err := src.OCIConfig(ctx)
if err != nil {
return fmt.Errorf("parsing image configuration: %w", err)
}
wantedPlatforms, err := platform.WantedPlatforms(sys)
if err != nil {
return fmt.Errorf("getting current platform information %#v: %w", sys, err)
}
options := newOrderedSet()
match := false
for _, wantedPlatform := range wantedPlatforms {
// Waiting for https://github.com/opencontainers/image-spec/pull/777 :
// This currently cant use image.MatchesPlatform because we dont know what to use
// for image.Variant.
if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture {
match = true
break
}
options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture))
}
if !match {
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q",
c.OS, c.Architecture, strings.Join(options.list, ", "))
}
}
return nil
}
// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
func (ic *imageCopier) updateEmbeddedDockerReference() error {
if ic.c.dest.IgnoresEmbeddedDockerReference() {
return nil // Destination would prefer us not to update the embedded reference.
}
destRef := ic.c.dest.Reference().DockerReference()
if destRef == nil {
return nil // Destination does not care about Docker references
}
if !ic.src.EmbeddedDockerReferenceConflicts(destRef) {
return nil // No reference embedded in the manifest, or it matches destRef already.
}
if ic.cannotModifyManifestReason != "" {
return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q",
transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason)
}
ic.manifestUpdates.EmbeddedDockerReference = destRef
return nil
}
func (ic *imageCopier) noPendingManifestUpdates() bool {
return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly})
}
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
if err != nil {
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
}
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
if err != nil {
logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err)
return false, nil, "", "", nil
}
destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
if err != nil {
logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err)
return false, nil, "", "", nil
}
destManifestDigest, err := manifest.Digest(destManifest)
if err != nil {
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
}
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
if srcManifestDigest != destManifestDigest {
return false, nil, "", "", nil
}
// Destination and source manifests, types and digests should all be equivalent
return true, destManifest, destManifestType, destManifestDigest, nil
}
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "".
func (ic *imageCopier) copyLayers(ctx context.Context) error {
srcInfos := ic.src.LayerInfos()
numLayers := len(srcInfos)
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
if err != nil {
return err
}
srcInfosUpdated := false
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
if ic.cannotModifyManifestReason != "" {
return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
}
srcInfos = updatedSrcInfos
srcInfosUpdated = true
}
type copyLayerData struct {
destInfo types.BlobInfo
diffID digest.Digest
err error
}
// The manifest is used to extract the information whether a given
// layer is empty.
man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
if err != nil {
return err
}
manifestLayerInfos := man.LayerInfos()
// copyGroup is used to determine if all layers are copied
copyGroup := sync.WaitGroup{}
data := make([]copyLayerData, numLayers)
copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) {
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
defer copyGroup.Done()
cld := copyLayerData{}
if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
// DiffIDs are, currently, needed only when converting from schema1.
// In which case src.LayerInfos will not have URLs because schema1
// does not support them.
if ic.diffIDsAreNeeded {
cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
} else {
cld.destInfo = srcLayer
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
}
} else {
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer)
}
data[index] = cld
}
// Decide which layers to encrypt
layersToEncrypt := set.New[int]()
var encryptAll bool
if ic.ociEncryptLayers != nil {
encryptAll = len(*ic.ociEncryptLayers) == 0
totalLayers := len(srcInfos)
for _, l := range *ic.ociEncryptLayers {
// if layer is negative, it is reverse indexed.
layersToEncrypt.Add((totalLayers + l) % totalLayers)
}
if encryptAll {
for i := 0; i < len(srcInfos); i++ {
layersToEncrypt.Add(i)
}
}
}
if err := func() error { // A scope for defer
progressPool := ic.c.newProgressPool()
defer progressPool.Wait()
// Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool.
defer copyGroup.Wait()
for i, srcLayer := range srcInfos {
err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1)
if err != nil {
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
return fmt.Errorf("copying layer: %w", err)
}
copyGroup.Add(1)
go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference())
}
// A call to copyGroup.Wait() is done at this point by the defer above.
return nil
}(); err != nil {
return err
}
destInfos := make([]types.BlobInfo, numLayers)
diffIDs := make([]digest.Digest, numLayers)
for i, cld := range data {
if cld.err != nil {
return cld.err
}
destInfos[i] = cld.destInfo
diffIDs[i] = cld.diffID
}
// WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the
// OptimizeDestinationImageAlreadyExists short-circuit conditions
ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
if ic.diffIDsAreNeeded {
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
}
if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
ic.manifestUpdates.LayerInfos = destInfos
}
return nil
}
// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
func layerDigestsDiffer(a, b []types.BlobInfo) bool {
return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool {
return a.Digest == b.Digest
})
}
// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,
// stores the resulting config and manifest to the destination, and returns the stored manifest
// and its digest.
func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
var pendingImage types.Image = ic.src
if !ic.noPendingManifestUpdates() {
if ic.cannotModifyManifestReason != "" {
return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason)
}
if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) {
// We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
// So, this can only happen if we are trying to upload using one of the other MIME type candidates.
// Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise
// when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
// Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
// If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
}
pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
if err != nil {
return nil, "", fmt.Errorf("creating an updated image manifest: %w", err)
}
pendingImage = pi
}
man, _, err := pendingImage.Manifest(ctx)
if err != nil {
return nil, "", fmt.Errorf("reading manifest: %w", err)
}
if err := ic.copyConfig(ctx, pendingImage); err != nil {
return nil, "", err
}
ic.c.Printf("Writing manifest to image destination\n")
manifestDigest, err := manifest.Digest(man)
if err != nil {
return nil, "", err
}
if instanceDigest != nil {
instanceDigest = &manifestDigest
}
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
logrus.Debugf("Error %v while writing manifest %q", err, string(man))
return nil, "", fmt.Errorf("writing manifest: %w", err)
}
return man, manifestDigest, nil
}
// copyConfig copies config.json, if any, from src to dest.
func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo()
if srcInfo.Digest != "" {
if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
return fmt.Errorf("copying config: %w", err)
}
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
progressPool := ic.c.newProgressPool()
defer progressPool.Wait()
bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
defer bar.Abort(false)
ic.c.printCopyInfo("config", srcInfo)
configBlob, err := src.ConfigBlob(ctx)
if err != nil {
return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err)
}
destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false)
if err != nil {
return types.BlobInfo{}, err
}
bar.mark100PercentComplete()
return destInfo, nil
}()
if err != nil {
return err
}
if destInfo.Digest != srcInfo.Digest {
return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
}
}
return nil
}
// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine.
// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation.
type diffIDResult struct {
digest digest.Digest
err error
}
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil.
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) {
// If the srcInfo doesn't contain compression information, try to compute it from the
// MediaType, which was either read from a manifest by way of LayerInfos() or constructed
// by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob,
// the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(),
// which uses the compression information to compute the updated MediaType values.
// (Sadly UpdatedImage() is documented to not update MediaTypes from
// ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
//
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
// package (but we should preferably replace/change UpdatedImage instead of productizing
// this workaround).
if srcInfo.CompressionAlgorithm == nil {
switch srcInfo.MediaType {
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
srcInfo.CompressionAlgorithm = &compression.Gzip
case imgspecv1.MediaTypeImageLayerZstd:
srcInfo.CompressionAlgorithm = &compression.Zstd
}
}
ic.c.printCopyInfo("blob", srcInfo)
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
// When encrypting to decrypting, only use the simple code path. We might be able to optimize more
// (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
// but its not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, lets not.
encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
// Dont read the layer from the source if we already have the blob, and optimizations are acceptable.
if canAvoidProcessingCompleteLayer {
canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType)
logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
// the ImageDestination interface lets us pass in.
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
Cache: ic.c.blobInfoCache,
CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer,
LayerIndex: &layerIndex,
SrcRef: srcRef,
})
if err != nil {
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
}
if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
func() { // A scope for defer
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists")
defer bar.Abort(false)
bar.mark100PercentComplete()
}()
// Throw an event that the layer has been skipped
if ic.c.progress != nil && ic.c.progressInterval > 0 {
ic.c.progress <- types.ProgressProperties{
Event: types.ProgressEventSkipped,
Artifact: srcInfo,
}
}
return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil
}
}
// A partial pull is managed by the destination storage, that decides what portions
// of the source file are not known yet and must be fetched.
// Attempt a partial only when the source allows to retrieve a blob partially and
// the destination has support for it.
if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
hideProgressBar := true
defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
bar.Abort(hideProgressBar)
}()
proxy := blobChunkAccessorProxy{
wrapped: ic.c.rawSource,
bar: bar,
}
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
if err == nil {
if srcInfo.Size != -1 {
bar.SetRefill(srcInfo.Size - bar.Current())
}
bar.mark100PercentComplete()
hideProgressBar = false
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob)
}
logrus.Debugf("Failed to retrieve partial blob: %v", err)
return false, types.BlobInfo{}
}(); reused {
return blobInfo, cachedDiffID, nil
}
}
// Fallback: copy the layer, computing the diffID if we need to do so
return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
defer bar.Abort(false)
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
if err != nil {
return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
}
defer srcStream.Close()
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
if err != nil {
return types.BlobInfo{}, "", err
}
diffID := cachedDiffID
if diffIDIsNeeded {
select {
case <-ctx.Done():
return types.BlobInfo{}, "", ctx.Err()
case diffIDResult := <-diffIDChan:
if diffIDResult.err != nil {
return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err)
}
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
// Dont record any associations that involve encrypted data. This is a bit crude,
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
// might be safe, but its not trivially obvious, so lets be conservative for now.
// This crude approach also means we dont need to record whether a blob is encrypted
// in the blob info cache (which would probably be necessary for any more complex logic),
// and the simplicity is attractive.
if !encryptingOrDecrypting {
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
}
diffID = diffIDResult.digest
}
}
bar.mark100PercentComplete()
return blobInfo, diffID, nil
}()
}
// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo.
func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo {
// The transport is only tasked with finding the blob, determining its size if necessary, and returning the right
// compression format if the blob was substituted.
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
// of the generic code in this package.
res := types.BlobInfo{
Digest: reusedBlob.Digest,
Size: reusedBlob.Size,
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
Annotations: inputInfo.Annotations,
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
CompressionOperation: reusedBlob.CompressionOperation,
CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.
}
// The transport is only expected to fill CompressionOperation and CompressionAlgorithm
// if the blob was substituted; otherwise, fill it in based
// on what we know from the srcInfos we were given.
if reusedBlob.Digest == inputInfo.Digest {
res.CompressionOperation = inputInfo.CompressionOperation
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
}
return res
}
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
// perhaps (de/re/)compressing the stream,
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow
if diffIDIsNeeded {
diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
pipeReader, pipeWriter := io.Pipe()
defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily.
_ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer {
// If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
// reading from the pipe has failed, we dont really care.
// We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
// the return value includes an error indication, which we do check.
//
// If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be
// closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC.
go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader
return pipeWriter
}
}
blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
return blobInfo, diffIDChan, err
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
}
// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) {
result := diffIDResult{
digest: "",
err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
}
defer func() { dest <- result }()
defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead.
result.digest, result.err = computeDiffID(layerStream, decompressor)
}
// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) {
if decompressor != nil {
s, err := decompressor(stream)
if err != nil {
return "", err
}
defer s.Close()
stream = s
}
return digest.Canonical.FromReader(stream)
}

View File

@ -132,11 +132,11 @@ func (d *dirImageDestination) Close() error {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
succeeded := false
explicitClosed := false
@ -153,14 +153,14 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
@ -169,7 +169,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// always fails on Windows.
if runtime.GOOS != "windows" {
if err := blobFile.Chmod(0644); err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
}
@ -178,30 +178,32 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
blobFile.Close()
explicitClosed = true
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
succeeded = true
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if info.Digest == "" {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with unknown digest")
}
blobPath := d.ref.layerPath(info.Digest)
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
return false, private.ReusedBlob{}, nil
return false, types.BlobInfo{}, nil
}
if err != nil {
return false, private.ReusedBlob{}, err
return false, types.BlobInfo{}, err
}
return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes manifest to the destination.

View File

@ -8,9 +8,9 @@ import (
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)

View File

@ -89,7 +89,7 @@ func (ref dirReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dirReference) StringWithinTransport() string {
return ref.path

View File

@ -9,7 +9,7 @@ import (
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
// To do so, all elements of the input path must exist; as a special case, the final component may be
// a non-existent name (but not a symlink pointing to a non-existent name)
// This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc.
func ResolvePathToFullyExplicit(path string) (string, error) {
switch _, err := os.Lstat(path); {
case err == nil:

View File

@ -1,6 +1,8 @@
package archive
import (
"context"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
@ -13,7 +15,7 @@ type archiveImageSource struct {
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (private.ImageSource, error) {
var archive *tarfile.Reader
var closeArchive bool
if ref.archiveReader != nil {

View File

@ -62,23 +62,24 @@ func ParseReference(refString string) (types.ImageReference, error) {
return nil, fmt.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
}
path, tagOrIndex, gotTagOrIndex := strings.Cut(refString, ":")
parts := strings.SplitN(refString, ":", 2)
path := parts[0]
var nt reference.NamedTagged
sourceIndex := -1
if gotTagOrIndex {
if len(parts) == 2 {
// A :tag or :@index was specified.
if len(tagOrIndex) > 0 && tagOrIndex[0] == '@' {
i, err := strconv.Atoi(tagOrIndex[1:])
if len(parts[1]) > 0 && parts[1][0] == '@' {
i, err := strconv.Atoi(parts[1][1:])
if err != nil {
return nil, fmt.Errorf("Invalid source index %s: %w", tagOrIndex, err)
return nil, fmt.Errorf("Invalid source index %s: %w", parts[1], err)
}
if i < 0 {
return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i)
}
sourceIndex = i
} else {
ref, err := reference.ParseNormalizedNamed(tagOrIndex)
ref, err := reference.ParseNormalizedNamed(parts[1])
if err != nil {
return nil, fmt.Errorf("docker-archive parsing reference: %w", err)
}
@ -136,7 +137,7 @@ func (ref archiveReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref archiveReference) StringWithinTransport() string {
switch {
@ -190,7 +191,7 @@ func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemConte
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
return newImageSource(sys, ref)
return newImageSource(ctx, sys, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.

View File

@ -19,7 +19,7 @@ type Writer struct {
archive *tarfile.Writer
writer io.Closer
// The following state can only be accessed with the mutex held.
// The following state can only be acccessed with the mutex held.
mutex sync.Mutex
hadCommit bool // At least one successful commit has happened
}

View File

@ -41,7 +41,7 @@ type bodyReader struct {
}
// newBodyReader creates a bodyReader for request path in c.
// firstBody is an already correctly opened body for the blob, returning the full blob from the start.
// firstBody is an already correctly opened body for the blob, returing the full blob from the start.
// If reading from firstBody fails, bodyReader may heuristically decide to resume.
func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) {
logURL, err := c.resolveRequestURL(path)
@ -193,7 +193,7 @@ func (br *bodyReader) Read(p []byte) (int, error) {
return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err)
}
logrus.Debugf("Successfully reconnected to %s", redactedURL)
logrus.Debugf("Succesfully reconnected to %s", redactedURL)
consumedBody = true
br.body = res.Body
br.lastRetryOffset = br.offset

View File

@ -69,7 +69,6 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel
func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) {
defer c.Close()
err := errors.New("Internal error: unexpected panic in imageLoadGoroutine")
defer func() {
logrus.Debugf("docker-daemon: sending done, status %v", err)

View File

@ -28,8 +28,6 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
if err != nil {
return nil, fmt.Errorf("initializing docker engine client: %w", err)
}
defer c.Close()
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
// Either way ImageSave should create a tarball with exactly one image.
inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()})

View File

@ -118,7 +118,7 @@ func (ref daemonReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
func (ref daemonReference) StringWithinTransport() string {

View File

@ -213,7 +213,6 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
// signatureBase is always set in the return value
// The caller must call .Close() on the returned client when done.
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) {
auth, err := config.GetCredentialsForRef(sys, ref.ref)
if err != nil {
@ -248,7 +247,6 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, regis
// (e.g., "registry.com[:5000][/some/namespace]/repo").
// Please note that newDockerClient does not set all members of dockerClient
// (e.g., username and password); those must be set by callers if necessary.
// The caller must call .Close() on the returned client when done.
func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) {
hostName := registry
if registry == dockerHostname {
@ -304,7 +302,6 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
if err != nil {
return fmt.Errorf("creating new docker client: %w", err)
}
defer client.Close()
client.auth = types.DockerAuthConfig{
Username: username,
Password: password,
@ -374,7 +371,6 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
if err != nil {
return nil, fmt.Errorf("creating new docker client: %w", err)
}
defer client.Close()
client.auth = auth
if sys != nil {
client.registryToken = sys.DockerBearerRegistryToken
@ -452,8 +448,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
if link == "" {
break
}
linkURLPart, _, _ := strings.Cut(link, ";")
linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
if err != nil {
return searchRes, err
}
@ -600,7 +596,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
case <-time.After(delay):
// Nothing
}
delay *= 2 // If the registry does not specify a delay, back off exponentially.
delay = delay * 2 // If the registry does not specify a delay, back off exponentially.
}
}
@ -989,7 +985,7 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
return reconnectingReader, blobSize, nil
}
// getOCIDescriptorContents returns the contents a blob specified by descriptor in ref, which must fit within limit.
// getOCIDescriptorContents returns the contents a blob spcified by descriptor in ref, which must fit within limit.
func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
// Note that this copies all kinds of attachments: attestations, and whatever else is there,
// not just signatures. We leave the signature consumers to decide based on the MIME type.
@ -998,7 +994,7 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
return nil, err
}
defer reader.Close()
payload, err := iolimits.ReadAtMost(reader, maxSize)
payload, err := iolimits.ReadAtMost(reader, iolimits.MaxSignatureBodySize)
if err != nil {
return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
}
@ -1088,11 +1084,3 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
func sigstoreAttachmentTag(d digest.Digest) string {
return strings.Replace(d.String(), ":", "-", 1) + ".sig"
}
// Close removes resources associated with an initialized dockerClient, if any.
func (c *dockerClient) Close() error {
if c.client != nil {
c.client.CloseIdleConnections()
}
return nil
}

View File

@ -68,7 +68,6 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
if err != nil {
return nil, fmt.Errorf("failed to create client: %w", err)
}
defer client.Close()
tags := make([]string, 0)
@ -95,8 +94,8 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
break
}
linkURLPart, _, _ := strings.Cut(link, ";")
linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>"))
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
if err != nil {
return tags, err
}
@ -137,7 +136,6 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
if err != nil {
return "", fmt.Errorf("failed to create client: %w", err)
}
defer client.Close()
path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest)
headers := map[string][]string{

View File

@ -21,7 +21,6 @@ import (
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/internal/streamdigest"
"github.com/containers/image/v5/internal/uploadreader"
@ -33,8 +32,6 @@ import (
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
type dockerImageDestination struct {
@ -93,7 +90,7 @@ func (d *dockerImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *dockerImageDestination) Close() error {
return d.c.Close()
return nil
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
@ -132,8 +129,8 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
@ -141,7 +138,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
defer cleanup()
stream = streamCopy
@ -152,10 +149,10 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache)
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
if haveBlob {
return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
return reusedInfo, nil
}
}
@ -164,16 +161,16 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
logrus.Debugf("Uploading %s", uploadPath)
res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
return private.UploadedBlob{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
return types.BlobInfo{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
}
uploadLocation, err := res.Location()
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err)
return types.BlobInfo{}, fmt.Errorf("determining upload URL: %w", err)
}
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
@ -201,7 +198,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
return uploadLocation, nil
}()
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
blobDigest := digester.Digest()
@ -212,17 +209,17 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
uploadLocation.RawQuery = locationQuery.Encode()
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
return private.UploadedBlob{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
return types.BlobInfo{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
}
logrus.Debugf("Upload of layer %s complete", blobDigest)
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
return private.UploadedBlob{Digest: blobDigest, Size: sizeCounter.size}, nil
return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil
}
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
@ -299,32 +296,34 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
// The caller must ensure info.Digest is set.
func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, private.ReusedBlob, error) {
func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, types.BlobInfo, error) {
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
if err != nil {
return false, private.ReusedBlob{}, err
return false, types.BlobInfo{}, err
}
if exists {
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
}
return false, private.ReusedBlob{}, nil
return false, types.BlobInfo{}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if info.Digest == "" {
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
}
// First, check whether the blob happens to already exist at the destination.
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
if err != nil {
return false, private.ReusedBlob{}, err
return false, types.BlobInfo{}, err
}
if haveBlob {
return true, reusedInfo, nil
@ -394,14 +393,10 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
continue
}
return true, private.ReusedBlob{
Digest: candidate.Digest,
Size: size,
CompressionOperation: compressionOperation,
CompressionAlgorithm: compressionAlgorithm}, nil
return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil
}
return false, private.ReusedBlob{}, nil
return false, types.BlobInfo{}, nil
}
// PutManifest writes manifest to the destination.
@ -736,15 +731,24 @@ func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
// But right now we dont want to deal with corner cases like bad digest formats
// or unavailable algorithms; in the worst case we end up with duplicate signature
// entries.
layer.Digest.String() != digest.FromBytes(payloadBlob).String() ||
!maps.Equal(layer.Annotations, annotations) {
layer.Digest.String() != digest.FromBytes(payloadBlob).String() {
return false
}
if len(layer.Annotations) != len(annotations) {
return false
}
for k, v1 := range layer.Annotations {
if v2, ok := annotations[k]; !ok || v1 != v2 {
return false
}
}
// All annotations in layer exist in sig, and the number of annotations is the same, so all annotations
// in sig also exist in layer.
return true
}
// putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate
// OCI descriptor.
// OCI descriptior.
func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) {
blobDigest := digest.FromBytes(contents)
info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents),
@ -799,11 +803,12 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
if err != nil {
return err
}
existingSigNames := set.New[string]()
existingSigNames := map[string]struct{}{}
for _, sig := range existingSignatures.Signatures {
existingSigNames.Add(sig.Name)
existingSigNames[sig.Name] = struct{}{}
}
sigExists:
for _, newSigWithFormat := range signatures {
newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
if !ok {
@ -811,10 +816,10 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
}
newSig := newSigSimple.UntrustedSignature()
if slices.ContainsFunc(existingSignatures.Signatures, func(existingSig extensionSignature) bool {
return existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
}) {
continue
for _, existingSig := range existingSignatures.Signatures {
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
}
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
@ -826,7 +831,7 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
return fmt.Errorf("generating random signature len %d: %w", n, err)
}
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
if !existingSigNames.Contains(signatureName) {
if _, ok := existingSigNames[signatureName]; !ok {
break
}
}

View File

@ -153,7 +153,6 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica
s.Compat = impl.AddCompat(s)
if err := s.ensureManifestIsLoaded(ctx); err != nil {
client.Close()
return nil, err
}
return s, nil
@ -167,7 +166,7 @@ func (s *dockerImageSource) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageSource, if any.
func (s *dockerImageSource) Close() error {
return s.c.Close()
return nil
}
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
@ -251,7 +250,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
currentOffset += toSkip
}
s := signalCloseReader{
closed: make(chan struct{}),
closed: make(chan interface{}),
stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
consumeStream: true,
}
@ -293,7 +292,7 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
return
}
s := signalCloseReader{
closed: make(chan struct{}),
closed: make(chan interface{}),
stream: p,
}
streams <- s
@ -336,7 +335,7 @@ func parseMediaType(contentType string) (string, map[string]string, error) {
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
headers := make(map[string][]string)
rangeVals := make([]string, 0, len(chunks))
var rangeVals []string
for _, c := range chunks {
rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1))
}
@ -606,7 +605,6 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
if err != nil {
return err
}
defer c.Close()
headers := map[string][]string{
"Accept": manifest.DefaultRequestedManifestMIMETypes,
@ -768,7 +766,7 @@ func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint)
}
type signalCloseReader struct {
closed chan struct{}
closed chan interface{}
stream io.ReadCloser
consumeStream bool
}

View File

@ -92,7 +92,7 @@ func (ref dockerReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dockerReference) StringWithinTransport() string {
return "//" + reference.FamiliarString(ref.ref)

View File

@ -40,7 +40,7 @@ func httpResponseToError(res *http.Response, context string) error {
return ErrUnauthorizedForCredentials{Err: err}
default:
if context != "" {
context += ": "
context = context + ": "
}
return fmt.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
}

View File

@ -76,15 +76,15 @@ func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
if inputInfo.Size == -1 || inputInfo.Digest == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo)
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
defer cleanup()
stream = streamCopy
@ -92,45 +92,47 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
}
if err := d.archive.lock(); err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
defer d.archive.unlock()
// Maybe the blob has been already sent
ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
if ok {
return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
return reusedInfo, nil
}
if options.IsConfig {
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err)
return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err)
}
d.config = buf
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err)
return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err)
}
} else {
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
}
d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
return private.UploadedBlob{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if err := d.archive.lock(); err != nil {
return false, private.ReusedBlob{}, err
return false, types.BlobInfo{}, err
}
defer d.archive.unlock()

View File

@ -13,13 +13,10 @@ import (
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// Writer allows creating a (docker save)-formatted tar archive containing one or more images.
@ -32,7 +29,7 @@ type Writer struct {
// Other state.
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
repositories map[string]map[string]string
legacyLayers *set.Set[string] // A set of IDs of legacy layers that have been already sent.
legacyLayers map[string]struct{} // A set of IDs of legacy layers that have been already sent.
manifest []ManifestItem
manifestByConfig map[digest.Digest]int // A map from config digest to an entry index in manifest above.
}
@ -45,7 +42,7 @@ func NewWriter(dest io.Writer) *Writer {
tar: tar.NewWriter(dest),
blobs: make(map[digest.Digest]types.BlobInfo),
repositories: map[string]map[string]string{},
legacyLayers: set.New[string](),
legacyLayers: map[string]struct{}{},
manifestByConfig: map[digest.Digest]int{},
}
}
@ -70,17 +67,17 @@ func (w *Writer) unlock() {
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// The caller must have locked the Writer.
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, private.ReusedBlob, error) {
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
if info.Digest == "" {
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
}
if blob, ok := w.blobs[info.Digest]; ok {
return true, private.ReusedBlob{Digest: info.Digest, Size: blob.Size}, nil
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
}
return false, private.ReusedBlob{}, nil
return false, types.BlobInfo{}, nil
}
// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.
@ -92,7 +89,7 @@ func (w *Writer) recordBlobLocked(info types.BlobInfo) {
// ensureSingleLegacyLayerLocked writes legacy VERSION and configuration files for a single layer
// The caller must have locked the Writer.
func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest digest.Digest, configBytes []byte) error {
if !w.legacyLayers.Contains(layerID) {
if _, ok := w.legacyLayers[layerID]; !ok {
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
// See also the comment in physicalLayerPath.
physicalLayerPath := w.physicalLayerPath(layerDigest)
@ -109,7 +106,7 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
return fmt.Errorf("writing config json file: %w", err)
}
w.legacyLayers.Add(layerID)
w.legacyLayers[layerID] = struct{}{}
}
return nil
}
@ -120,7 +117,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
lastLayerID := ""
for i, l := range layerDescriptors {
// The legacy format requires a config file per layer
layerConfig := make(map[string]any)
layerConfig := make(map[string]interface{})
// The root layer doesn't have any parent
if lastLayerID != "" {
@ -191,9 +188,14 @@ func checkManifestItemsMatch(a, b *ManifestItem) error {
if a.Config != b.Config {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with configs %#v vs. %#v", a.Config, b.Config)
}
if !slices.Equal(a.Layers, b.Layers) {
if len(a.Layers) != len(b.Layers) {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers %#v vs. %#v", a.Layers, b.Layers)
}
for i := range a.Layers {
if a.Layers[i] != b.Layers[i] {
return fmt.Errorf("Internal error: Trying to reuse ManifestItem values with layers[i] %#v vs. %#v", a.Layers[i], b.Layers[i])
}
}
// Ignore RepoTags, that will be built later.
// Ignore Parent and LayerSources, which we dont set to anything meaningful.
return nil
@ -227,9 +229,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des
item = &w.manifest[i]
}
knownRepoTags := set.New[string]()
knownRepoTags := map[string]struct{}{}
for _, repoTag := range item.RepoTags {
knownRepoTags.Add(repoTag)
knownRepoTags[repoTag] = struct{}{}
}
for _, tag := range repoTags {
// For github.com/docker/docker consumers, this works just as well as
@ -250,9 +252,9 @@ func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Des
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", tag.Name(), tag.Tag())
if !knownRepoTags.Contains(refString) {
if _, ok := knownRepoTags[refString]; !ok {
item.RepoTags = append(item.RepoTags, refString)
knownRepoTags.Add(refString)
knownRepoTags[refString] = struct{}{}
}
}
@ -335,7 +337,7 @@ func (t *tarFI) ModTime() time.Time {
func (t *tarFI) IsDir() bool {
return false
}
func (t *tarFI) Sys() any {
func (t *tarFI) Sys() interface{} {
return nil
}

View File

@ -104,7 +104,7 @@ func splitDockerDomain(name string) (domain, remainder string) {
}
// familiarizeName returns a shortened version of the name familiar
// to the Docker UI. Familiar names have the default domain
// to to the Docker UI. Familiar names have the default domain
// "docker.io" and "library/" repository prefix removed.
// For example, "docker.io/library/redis" will have the familiar
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".

View File

@ -8,8 +8,8 @@
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alphanumeric [separator alphanumeric]*
// alphanumeric := /[a-z0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/
@ -175,7 +175,7 @@ func splitDomain(name string) (string, string) {
// hostname and name string. If no valid hostname is
// found, the hostname is empty and the full value
// is returned as name
// Deprecated: Use Domain or Path
// DEPRECATED: Use Domain or Path
func SplitHostname(named Named) (string, string) {
if r, ok := named.(namedRepository); ok {
return r.Domain(), r.Path()

View File

@ -1,10 +1,9 @@
package reference
import (
storageRegexp "github.com/containers/storage/pkg/regexp"
"regexp"
"strings"
storageRegexp "github.com/containers/storage/pkg/regexp"
)
const (

View File

@ -13,9 +13,9 @@ import (
"github.com/containers/image/v5/internal/rootless"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/ghodss/yaml"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v3"
)
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
@ -39,18 +39,18 @@ var defaultDockerDir = "/var/lib/containers/sigstore"
// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
// NOTE: Keep this in sync with docs/registries.d.md!
type registryConfiguration struct {
DefaultDocker *registryNamespace `yaml:"default-docker"`
DefaultDocker *registryNamespace `json:"default-docker"`
// The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
Docker map[string]registryNamespace `yaml:"docker"`
Docker map[string]registryNamespace `json:"docker"`
}
// registryNamespace defines lookaside locations for a single namespace.
type registryNamespace struct {
Lookaside string `yaml:"lookaside"` // For reading, and if LookasideStaging is not present, for writing.
LookasideStaging string `yaml:"lookaside-staging"` // For writing only.
SigStore string `yaml:"sigstore"` // For compatibility, deprecated in favor of Lookaside.
SigStoreStaging string `yaml:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging.
UseSigstoreAttachments *bool `yaml:"use-sigstore-attachments,omitempty"`
Lookaside string `json:"lookaside"` // For reading, and if LookasideStaging is not present, for writing.
LookasideStaging string `json:"lookaside-staging"` // For writing only.
SigStore string `json:"sigstore"` // For compatibility, deprecated in favor of Lookaside.
SigStoreStaging string `json:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging.
UseSigstoreAttachments *bool `json:"use-sigstore-attachments,omitempty"`
}
// lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage.

View File

@ -149,7 +149,7 @@ func expectTokenOrQuoted(s string) (value string, rest string) {
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i++; i < len(s); i++ {
for i = i + 1; i < len(s); i++ {
b := s[i]
switch {
case escape:

View File

@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
)

View File

@ -381,7 +381,7 @@ func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwawa
delete(rawContents, "rootfs")
delete(rawContents, "history")
updates := map[string]any{"id": v1ID}
updates := map[string]interface{}{"id": v1ID}
if parentV1ID != "" {
updates["parent"] = parentV1ID
}

View File

@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
)

View File

@ -10,7 +10,7 @@ import (
)
// FromReference returns a types.ImageCloser implementation for the default instance reading from reference.
// If reference points to a manifest list, .Manifest() still returns the manifest list,
// If reference poitns to a manifest list, .Manifest() still returns the manifest list,
// but other methods transparently return data from an appropriate image instance.
//
// The caller must call .Close() on the returned ImageCloser.

View File

@ -43,17 +43,10 @@ func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
res, err := c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
return c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
Cache: blobinfocache.FromBlobInfoCache(cache),
IsConfig: isConfig,
})
if err != nil {
return types.BlobInfo{}, err
}
return types.BlobInfo{
Digest: res.Digest,
Size: res.Size,
}, nil
}
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
@ -66,26 +59,10 @@ func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
reused, blob, err := c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
return c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
Cache: blobinfocache.FromBlobInfoCache(cache),
CanSubstitute: canSubstitute,
})
if !reused || err != nil {
return reused, types.BlobInfo{}, err
}
res := types.BlobInfo{
Digest: blob.Digest,
Size: blob.Size,
CompressionOperation: blob.CompressionOperation,
CompressionAlgorithm: blob.CompressionAlgorithm,
}
// This is probably not necessary; we preserve MediaType to decrease risks of breaking for external callers.
// Some transports were not setting the MediaType field anyway, and others were setting the old value on substitution;
// provide the value in cases where it is likely to be correct.
if blob.Digest == info.Digest {
res.MediaType = info.MediaType
}
return true, res, nil
}
// PutSignatures writes a set of signatures to the destination.

View File

@ -39,8 +39,8 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
}
// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true.

View File

@ -46,34 +46,20 @@ func FromPublic(dest types.ImageDestination) private.ImageDestination {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
if err != nil {
return private.UploadedBlob{}, err
}
return private.UploadedBlob{
Digest: res.Digest,
Size: res.Size,
}, nil
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
if !reused || err != nil {
return reused, private.ReusedBlob{}, err
}
return true, private.ReusedBlob{
Digest: blob.Digest,
Size: blob.Size,
CompressionOperation: blob.CompressionOperation,
CompressionAlgorithm: blob.CompressionAlgorithm,
}, nil
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
}
// PutSignaturesWithFormat writes a set of signatures to the destination.

View File

@ -1,72 +0,0 @@
package manifest
import (
"encoding/json"
"fmt"
)
// AllowedManifestFields is a bit mask of “essential” manifest fields that ValidateUnambiguousManifestFormat
// can expect to be present.
type AllowedManifestFields int
const (
AllowedFieldConfig AllowedManifestFields = 1 << iota
AllowedFieldFSLayers
AllowedFieldHistory
AllowedFieldLayers
AllowedFieldManifests
AllowedFieldFirstUnusedBit // Keep this at the end!
)
// ValidateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields
// other than the ones the caller specifically allows.
// expectedMIMEType is used only for diagnostics.
// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
// data that just isnt what was expected, as opposed to actually ambiguous data.
func ValidateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
allowed AllowedManifestFields) error {
if allowed >= AllowedFieldFirstUnusedBit {
return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
}
// Use a private type to decode, not just a map[string]any, because we want
// to also reject case-insensitive matches (which would be used by Go when really decoding
// the manifest).
// (It is expected that as manifest formats are added or extended over time, more fields will be added
// here.)
detectedFields := struct {
Config any `json:"config"`
FSLayers any `json:"fsLayers"`
History any `json:"history"`
Layers any `json:"layers"`
Manifests any `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &detectedFields); err != nil {
// The caller was supposed to already validate version numbers, so this should not happen;
// lets not bother with making this error “nice”.
return err
}
unexpected := []string{}
// Sadly this isnt easy to automate in Go, without reflection. So, copy&paste.
if detectedFields.Config != nil && (allowed&AllowedFieldConfig) == 0 {
unexpected = append(unexpected, "config")
}
if detectedFields.FSLayers != nil && (allowed&AllowedFieldFSLayers) == 0 {
unexpected = append(unexpected, "fsLayers")
}
if detectedFields.History != nil && (allowed&AllowedFieldHistory) == 0 {
unexpected = append(unexpected, "history")
}
if detectedFields.Layers != nil && (allowed&AllowedFieldLayers) == 0 {
unexpected = append(unexpected, "layers")
}
if detectedFields.Manifests != nil && (allowed&AllowedFieldManifests) == 0 {
unexpected = append(unexpected, "manifests")
}
if len(unexpected) != 0 {
return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
unexpected, expectedMIMEType)
}
return nil
}

View File

@ -1,15 +0,0 @@
package manifest
import (
"github.com/opencontainers/go-digest"
)
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
//
// This is publicly visible as c/image/manifest.Schema2Descriptor.
type Schema2Descriptor struct {
MediaType string `json:"mediaType"`
Size int64 `json:"size"`
Digest digest.Digest `json:"digest"`
URLs []string `json:"urls,omitempty"`
}

View File

@ -1,260 +0,0 @@
package manifest
import (
"encoding/json"
"fmt"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
// Schema2PlatformSpec describes the platform which a particular manifest is
// specialized for.
// This is publicly visible as c/image/manifest.Schema2PlatformSpec.
type Schema2PlatformSpec struct {
Architecture string `json:"architecture"`
OS string `json:"os"`
OSVersion string `json:"os.version,omitempty"`
OSFeatures []string `json:"os.features,omitempty"`
Variant string `json:"variant,omitempty"`
Features []string `json:"features,omitempty"` // removed in OCI
}
// Schema2ManifestDescriptor references a platform-specific manifest.
// This is publicly visible as c/image/manifest.Schema2ManifestDescriptor.
type Schema2ManifestDescriptor struct {
Schema2Descriptor
Platform Schema2PlatformSpec `json:"platform"`
}
// Schema2ListPublic is a list of platform-specific manifests.
// This is publicly visible as c/image/manifest.Schema2List.
// Internal users should usually use Schema2List instead.
type Schema2ListPublic struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
Manifests []Schema2ManifestDescriptor `json:"manifests"`
}
// MIMEType returns the MIME type of this particular manifest list.
func (list *Schema2ListPublic) MIMEType() string {
return list.MediaType
}
// Instances returns a slice of digests of the manifests that this list knows of.
func (list *Schema2ListPublic) Instances() []digest.Digest {
results := make([]digest.Digest, len(list.Manifests))
for i, m := range list.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the list.
func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range list.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(list.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
list.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
list.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
}
list.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
// ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list.
return list.ChooseInstance(ctx)
}
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
// of the image which is appropriate for the current environment.
func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range list.Manifests {
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
}
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (list *Schema2ListPublic) Serialize() ([]byte, error) {
buf, err := json.Marshal(list)
if err != nil {
return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
}
return buf, nil
}
// Schema2ListPublicFromComponents creates a Schema2 manifest list instance from the
// supplied data.
// This is publicly visible as c/image/manifest.Schema2ListFromComponents.
func Schema2ListPublicFromComponents(components []Schema2ManifestDescriptor) *Schema2ListPublic {
list := Schema2ListPublic{
SchemaVersion: 2,
MediaType: DockerV2ListMediaType,
Manifests: make([]Schema2ManifestDescriptor, len(components)),
}
for i, component := range components {
m := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: slices.Clone(component.URLs),
},
Schema2PlatformSpec{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: slices.Clone(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
Features: slices.Clone(component.Platform.Features),
},
}
list.Manifests[i] = m
}
return &list
}
// Schema2ListPublicClone creates a deep copy of the passed-in list.
// This is publicly visible as c/image/manifest.Schema2ListClone.
func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic {
return Schema2ListPublicFromComponents(list.Manifests)
}
// ToOCI1Index returns the list encoded as an OCI1 index.
func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
for _, manifest := range list.Manifests {
converted := imgspecv1.Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: slices.Clone(manifest.URLs),
Platform: &imgspecv1.Platform{
OS: manifest.Platform.OS,
Architecture: manifest.Platform.Architecture,
OSFeatures: slices.Clone(manifest.Platform.OSFeatures),
OSVersion: manifest.Platform.OSVersion,
Variant: manifest.Platform.Variant,
},
}
components = append(components, converted)
}
oci := OCI1IndexPublicFromComponents(components, nil)
return oci, nil
}
// ToSchema2List returns the list encoded as a Schema2 list.
func (list *Schema2ListPublic) ToSchema2List() (*Schema2ListPublic, error) {
return Schema2ListPublicClone(list), nil
}
// Schema2ListPublicFromManifest creates a Schema2 manifest list instance from marshalled
// JSON, presumably generated by encoding a Schema2 manifest list.
// This is publicly visible as c/image/manifest.Schema2ListFromManifest.
func Schema2ListPublicFromManifest(manifest []byte) (*Schema2ListPublic, error) {
list := Schema2ListPublic{
Manifests: []Schema2ManifestDescriptor{},
}
if err := json.Unmarshal(manifest, &list); err != nil {
return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
}
if err := ValidateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
AllowedFieldManifests); err != nil {
return nil, err
}
return &list, nil
}
// Clone returns a deep copy of this list and its contents.
func (list *Schema2ListPublic) Clone() ListPublic {
return Schema2ListPublicClone(list)
}
// ConvertToMIMEType converts the passed-in manifest list to a manifest
// list of the specified type.
func (list *Schema2ListPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return list.Clone(), nil
case imgspecv1.MediaTypeImageIndex:
return list.ToOCI1Index()
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
}
}
// Schema2List is a list of platform-specific manifests.
type Schema2List struct {
Schema2ListPublic
}
func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
return &Schema2List{*public}
}
func (index *Schema2List) CloneInternal() List {
return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
}
func (index *Schema2List) Clone() ListPublic {
return index.CloneInternal()
}
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
// JSON, presumably generated by encoding a Schema2 manifest list.
func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
public, err := Schema2ListPublicFromManifest(manifest)
if err != nil {
return nil, err
}
return schema2ListFromPublic(public), nil
}

View File

@ -2,10 +2,6 @@ package manifest
import "fmt"
// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType.
// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 .
const dockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
//
@ -32,9 +28,5 @@ func NewNonImageArtifactError(mimeType string) error {
}
func (e NonImageArtifactError) Error() string {
// Special-case these invalid mixed images, which show up from time to time:
if e.mimeType == dockerV2Schema2ConfigMediaType {
return fmt.Sprintf("invalid mixed OCI image with Docker v2s2 config (%q)", e.mimeType)
}
return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType)
}

View File

@ -1,90 +0,0 @@
package manifest
import (
"fmt"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// ListPublic is a subset of List which is a part of the public API;
// so no methods can be added, removed or changed.
//
// Internal users should usually use List instead.
type ListPublic interface {
// MIMEType returns the MIME type of this particular manifest list.
MIMEType() string
// Instances returns a list of the manifests that this list knows of, other than its own.
Instances() []digest.Digest
// Update information about the list's instances. The length of the passed-in slice must
// match the length of the list of instances which the list already contains, and every field
// must be specified.
UpdateInstances([]ListUpdate) error
// Instance returns the size and MIME type of a particular instance in the list.
Instance(digest.Digest) (ListUpdate, error)
// ChooseInstance selects which manifest is most appropriate for the platform described by the
// SystemContext, or for the current platform if the SystemContext doesn't specify any details.
ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
// from, even if no modifications were made!
Serialize() ([]byte, error)
// ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
ConvertToMIMEType(mimeType string) (ListPublic, error)
// Clone returns a deep copy of this list and its contents.
Clone() ListPublic
}
// List is an interface for parsing, modifying lists of image manifests.
// Callers can either use this abstract interface without understanding the details of the formats,
// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
// directly.
type List interface {
ListPublic
// CloneInternal returns a deep copy of this list and its contents.
CloneInternal() List
// ChooseInstanceInstanceByCompression selects which manifest is most appropriate for the platform and compression described by the
// SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which
// when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined.
ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error)
}
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
// This is publicly visible as c/image/manifest.ListUpdate.
type ListUpdate struct {
Digest digest.Digest
Size int64
MediaType string
}
// ListPublicFromBlob parses a list of manifests.
// This is publicly visible as c/image/manifest.ListFromBlob.
func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) {
list, err := ListFromBlob(manifest, manifestMIMEType)
if err != nil {
return nil, err
}
return list, nil
}
// ListFromBlob parses a list of manifests.
func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
normalized := NormalizedMIMEType(manifestMIMEType)
switch normalized {
case DockerV2ListMediaType:
return Schema2ListFromManifest(manifest)
case imgspecv1.MediaTypeImageIndex:
return OCI1IndexFromManifest(manifest)
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
}
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
}

View File

@ -1,167 +0,0 @@
package manifest
import (
"encoding/json"
"github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
// FIXME(runcom, mitr): should we have a mediatype pkg??
const (
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
// DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
// DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar"
// DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
)
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
// but we may not have such metadata available (e.g. when the manifest is a local file).
// This is publicly visible as c/image/manifest.GuessMIMEType.
func GuessMIMEType(manifest []byte) string {
// A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
// Also docker/distribution/manifest.Versioned.
meta := struct {
MediaType string `json:"mediaType"`
SchemaVersion int `json:"schemaVersion"`
Signatures any `json:"signatures"`
}{}
if err := json.Unmarshal(manifest, &meta); err != nil {
return ""
}
switch meta.MediaType {
case DockerV2Schema2MediaType, DockerV2ListMediaType,
imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type.
return meta.MediaType
}
// this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
switch meta.SchemaVersion {
case 1:
if meta.Signatures != nil {
return DockerV2Schema1SignedMediaType
}
return DockerV2Schema1MediaType
case 2:
// Best effort to understand if this is an OCI image since mediaType
// wasn't in the manifest for OCI image-spec < 1.0.2.
// For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
ociMan := struct {
Config struct {
MediaType string `json:"mediaType"`
} `json:"config"`
}{}
if err := json.Unmarshal(manifest, &ociMan); err != nil {
return ""
}
switch ociMan.Config.MediaType {
case imgspecv1.MediaTypeImageConfig:
return imgspecv1.MediaTypeImageManifest
case DockerV2Schema2ConfigMediaType:
// This case should not happen since a Docker image
// must declare a top-level media type and
// `meta.MediaType` has already been checked.
return DockerV2Schema2MediaType
}
// Maybe an image index or an OCI artifact.
ociIndex := struct {
Manifests []imgspecv1.Descriptor `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &ociIndex); err != nil {
return ""
}
if len(ociIndex.Manifests) != 0 {
if ociMan.Config.MediaType == "" {
return imgspecv1.MediaTypeImageIndex
}
// FIXME: this is mixing media types of manifests and configs.
return ociMan.Config.MediaType
}
// It's most likely an OCI artifact with a custom config media
// type which is not (and cannot) be covered by the media-type
// checks cabove.
return imgspecv1.MediaTypeImageManifest
}
return ""
}
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
// This is publicly visible as c/image/manifest.Digest.
func Digest(manifest []byte) (digest.Digest, error) {
if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
if err != nil {
return "", err
}
manifest, err = sig.Payload()
if err != nil {
// Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
// that libtrust itself has josebase64UrlEncode()d
return "", err
}
}
return digest.FromBytes(manifest), nil
}
// MatchesDigest returns true iff the manifest matches expectedDigest.
// Error may be set if this returns false.
// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
// This is publicly visible as c/image/manifest.MatchesDigest.
func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
// This should eventually support various digest types.
actualDigest, err := Digest(manifest)
if err != nil {
return false, err
}
return expectedDigest == actualDigest, nil
}
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
// centralizing various workarounds.
// This is publicly visible as c/image/manifest.NormalizedMIMEType.
func NormalizedMIMEType(input string) string {
switch input {
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
// need to happen within the ImageSource.
case "application/json":
return DockerV2Schema1SignedMediaType
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
imgspecv1.MediaTypeImageManifest,
imgspecv1.MediaTypeImageIndex,
DockerV2Schema2MediaType,
DockerV2ListMediaType:
return input
default:
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
//
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
// This makes no real sense, but it happens
// because requests for manifests are
// redirected to a content distribution
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
return DockerV2Schema1SignedMediaType
}
}

View File

@ -1,329 +0,0 @@
package manifest
import (
"encoding/json"
"fmt"
"math"
"runtime"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
const (
// OCI1InstanceAnnotationCompressionZSTD is an annotation name that can be placed on a manifest descriptor in an OCI index.
// The value of the annotation must be the string "true".
// If this annotation is present on a manifest, consuming that image instance requires support for Zstd compression.
// That also suggests that this instance benefits from
// Zstd compression, so it can be preferred by compatible consumers over instances that
// use gzip, depending on their local policy.
OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd"
)
// OCI1IndexPublic is just an alias for the OCI index type, but one which we can
// provide methods for.
// This is publicly visible as c/image/manifest.OCI1Index
// Internal users should usually use OCI1Index instead.
type OCI1IndexPublic struct {
imgspecv1.Index
}
// MIMEType returns the MIME type of this particular manifest index.
func (index *OCI1IndexPublic) MIMEType() string {
return imgspecv1.MediaTypeImageIndex
}
// Instances returns a slice of digests of the manifests that this index knows of.
func (index *OCI1IndexPublic) Instances() []digest.Digest {
results := make([]digest.Digest, len(index.Manifests))
for i, m := range index.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the index.
func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range index.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(index.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
index.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
index.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
}
index.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
// instanceIsZstd returns true if instance is a zstd instance otherwise false.
func instanceIsZstd(manifest imgspecv1.Descriptor) bool {
if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" {
return true
}
return false
}
type instanceCandidate struct {
platformIndex int // Index of the candidate in platform.WantedPlatforms: lower numbers are preferred; or math.maxInt if the candidate doesnt have a platform
isZstd bool // tells if particular instance if zstd instance
manifestPosition int // A zero-based index of the instance in the manifest list
digest digest.Digest // Instance digest
}
func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool {
switch {
case ic.platformIndex != other.platformIndex:
return ic.platformIndex < other.platformIndex
case ic.isZstd != other.isZstd:
if !preferGzip {
return ic.isZstd
} else {
return !ic.isZstd
}
case ic.manifestPosition != other.manifestPosition:
return ic.manifestPosition < other.manifestPosition
}
panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
}
// chooseInstance is a private equivalent to ChooseInstanceByCompression,
// shared by ChooseInstance and ChooseInstanceByCompression.
func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
didPreferGzip := false
if preferGzip == types.OptionalBoolTrue {
didPreferGzip = true
}
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
var bestMatch *instanceCandidate
bestMatch = nil
for manifestIndex, d := range index.Manifests {
candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest}
if d.Platform != nil {
foundPlatform := false
for platformIndex, wantedPlatform := range wantedPlatforms {
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
foundPlatform = true
candidate.platformIndex = platformIndex
break
}
}
if !foundPlatform {
continue
}
}
if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) {
bestMatch = &candidate
}
}
if bestMatch != nil {
return bestMatch.digest, nil
}
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
return index.chooseInstance(ctx, preferGzip)
}
// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
// of the image which is appropriate for the current environment.
func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
return index.chooseInstance(ctx, types.OptionalBoolFalse)
}
// Serialize returns the index in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (index *OCI1IndexPublic) Serialize() ([]byte, error) {
buf, err := json.Marshal(index)
if err != nil {
return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
}
return buf, nil
}
// OCI1IndexPublicFromComponents creates an OCI1 image index instance from the
// supplied data.
// This is publicly visible as c/image/manifest.OCI1IndexFromComponents.
func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1IndexPublic {
index := OCI1IndexPublic{
imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: make([]imgspecv1.Descriptor, len(components)),
Annotations: maps.Clone(annotations),
},
}
for i, component := range components {
var platform *imgspecv1.Platform
if component.Platform != nil {
platform = &imgspecv1.Platform{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: slices.Clone(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
}
}
m := imgspecv1.Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: slices.Clone(component.URLs),
Annotations: maps.Clone(component.Annotations),
Platform: platform,
}
index.Manifests[i] = m
}
return &index
}
// OCI1IndexPublicClone creates a deep copy of the passed-in index.
// This is publicly visible as c/image/manifest.OCI1IndexClone.
func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic {
return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations)
}
// ToOCI1Index returns the index encoded as an OCI1 index.
func (index *OCI1IndexPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
return OCI1IndexPublicClone(index), nil
}
// ToSchema2List returns the index encoded as a Schema2 list.
func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) {
components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
for _, manifest := range index.Manifests {
platform := manifest.Platform
if platform == nil {
platform = &imgspecv1.Platform{
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
}
}
converted := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: slices.Clone(manifest.URLs),
},
Schema2PlatformSpec{
OS: platform.OS,
Architecture: platform.Architecture,
OSFeatures: slices.Clone(platform.OSFeatures),
OSVersion: platform.OSVersion,
Variant: platform.Variant,
},
}
components = append(components, converted)
}
s2 := Schema2ListPublicFromComponents(components)
return s2, nil
}
// OCI1IndexPublicFromManifest creates an OCI1 manifest index instance from marshalled
// JSON, presumably generated by encoding a OCI1 manifest index.
// This is publicly visible as c/image/manifest.OCI1IndexFromManifest.
func OCI1IndexPublicFromManifest(manifest []byte) (*OCI1IndexPublic, error) {
index := OCI1IndexPublic{
Index: imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: []imgspecv1.Descriptor{},
Annotations: make(map[string]string),
},
}
if err := json.Unmarshal(manifest, &index); err != nil {
return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
}
if err := ValidateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
AllowedFieldManifests); err != nil {
return nil, err
}
return &index, nil
}
// Clone returns a deep copy of this list and its contents.
func (index *OCI1IndexPublic) Clone() ListPublic {
return OCI1IndexPublicClone(index)
}
// ConvertToMIMEType converts the passed-in image index to a manifest list of
// the specified type.
func (index *OCI1IndexPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return index.ToSchema2List()
case imgspecv1.MediaTypeImageIndex:
return index.Clone(), nil
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
}
}
type OCI1Index struct {
OCI1IndexPublic
}
func oci1IndexFromPublic(public *OCI1IndexPublic) *OCI1Index {
return &OCI1Index{*public}
}
func (index *OCI1Index) CloneInternal() List {
return oci1IndexFromPublic(OCI1IndexPublicClone(&index.OCI1IndexPublic))
}
func (index *OCI1Index) Clone() ListPublic {
return index.CloneInternal()
}
// OCI1IndexFromManifest creates a OCI1 manifest list instance from marshalled
// JSON, presumably generated by encoding a OCI1 manifest list.
func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
public, err := OCI1IndexPublicFromManifest(manifest)
if err != nil {
return nil, err
}
return oci1IndexFromPublic(public), nil
}

View File

@ -25,7 +25,6 @@ import (
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
// For Linux, the kernel has already detected the ABI, ISA and Features.
@ -153,9 +152,13 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
if wantedVariant != "" {
// If the user requested a specific variant, we'll walk down
// the list from most to least compatible.
if variantOrder := compatibility[wantedArch]; variantOrder != nil {
if i := slices.Index(variantOrder, wantedVariant); i != -1 {
variants = variantOrder[i:]
if compatibility[wantedArch] != nil {
variantOrder := compatibility[wantedArch]
for i, v := range variantOrder {
if wantedVariant == v {
variants = variantOrder[i:]
break
}
}
}
if variants == nil {

View File

@ -7,7 +7,6 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/signature"
compression "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
@ -47,22 +46,24 @@ type ImageDestinationInternalOnly interface {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (UploadedBlob, error)
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error)
// PutBlobPartial attempts to create a blob using the data that is already present
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error)
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error)
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, ReusedBlob, error)
TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error)
// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
@ -78,13 +79,6 @@ type ImageDestination interface {
ImageDestinationInternalOnly
}
// UploadedBlob is information about a blob written to a destination.
// It is the subset of types.BlobInfo fields the transport is responsible for setting; all fields must be provided.
type UploadedBlob struct {
Digest digest.Digest
Size int64
}
// PutBlobOptions are used in PutBlobWithOptions.
type PutBlobOptions struct {
Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos.
@ -118,17 +112,6 @@ type TryReusingBlobOptions struct {
SrcRef reference.Named // A reference to the source image that contains the input blob.
}
// ReusedBlob is information about a blob reused in a destination.
// It is the subset of types.BlobInfo fields the transport is responsible for setting.
type ReusedBlob struct {
Digest digest.Digest // Must be provided
Size int64 // Must be provided
// The following compression fields should be set when the reuse substitutes
// a differently-compressed blob.
CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
}
// ImageSourceChunk is a portion of a blob.
// This API is experimental and can be changed without bumping the major version number.
type ImageSourceChunk struct {

View File

@ -1,46 +0,0 @@
package set
import "golang.org/x/exp/maps"
// FIXME:
// - Docstrings
// - This should be in a public library somewhere
type Set[E comparable] struct {
m map[E]struct{}
}
func New[E comparable]() *Set[E] {
return &Set[E]{
m: map[E]struct{}{},
}
}
func NewWithValues[E comparable](values ...E) *Set[E] {
s := New[E]()
for _, v := range values {
s.Add(v)
}
return s
}
func (s Set[E]) Add(v E) {
s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
}
func (s Set[E]) Delete(v E) {
delete(s.m, v)
}
func (s *Set[E]) Contains(v E) bool {
_, ok := s.m[v]
return ok
}
func (s *Set[E]) Empty() bool {
return len(s.m) == 0
}
func (s *Set[E]) Values() []E {
return maps.Keys(s.m)
}

View File

@ -24,7 +24,7 @@ type Signature interface {
blobChunk() ([]byte, error)
}
// Blob returns a representation of sig as a []byte, suitable for long-term storage.
// BlobChunk returns a representation of sig as a []byte, suitable for long-term storage.
func Blob(sig Signature) ([]byte, error) {
chunk, err := sig.blobChunk()
if err != nil {
@ -66,20 +66,22 @@ func FromBlob(blob []byte) (Signature, error) {
// The newer format: binary 0, format name, newline, data
case 0x00:
blob = blob[1:]
formatBytes, blobChunk, foundNewline := bytes.Cut(blob, []byte{'\n'})
if !foundNewline {
newline := bytes.IndexByte(blob, '\n')
if newline == -1 {
return nil, fmt.Errorf("invalid signature format, missing newline")
}
formatBytes := blob[:newline]
for _, b := range formatBytes {
if b < 32 || b >= 0x7F {
return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b)
}
}
blobChunk := blob[newline+1:]
switch {
case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)):
return SimpleSigningFromBlob(blobChunk), nil
case bytes.Equal(formatBytes, []byte(SigstoreFormat)):
return sigstoreFromBlobChunk(blobChunk)
return SigstoreFromBlobChunk(blobChunk)
default:
return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes))
}
@ -100,3 +102,10 @@ func UnsupportedFormatError(sig Signature) error {
return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID))
}
}
// copyByteSlice returns a guaranteed-fresh copy of a byte slice
// Use this to make sure the underlying data is not shared and cant be unexpectedly modified.
func copyByteSlice(s []byte) []byte {
res := []byte{}
return append(res, s...)
}

View File

@ -1,11 +1,6 @@
package signature
import (
"encoding/json"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
import "encoding/json"
const (
// from sigstore/cosign/pkg/types.SimpleSigningMediaType
@ -45,13 +40,13 @@ type sigstoreJSONRepresentation struct {
func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore {
return Sigstore{
untrustedMIMEType: untrustedMimeType,
untrustedPayload: slices.Clone(untrustedPayload),
untrustedAnnotations: maps.Clone(untrustedAnnotations),
untrustedPayload: copyByteSlice(untrustedPayload),
untrustedAnnotations: copyStringMap(untrustedAnnotations),
}
}
// sigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
func sigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
// SigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
func SigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
var v sigstoreJSONRepresentation
if err := json.Unmarshal(blobChunk, &v); err != nil {
return Sigstore{}, err
@ -79,9 +74,17 @@ func (s Sigstore) UntrustedMIMEType() string {
return s.untrustedMIMEType
}
func (s Sigstore) UntrustedPayload() []byte {
return slices.Clone(s.untrustedPayload)
return copyByteSlice(s.untrustedPayload)
}
func (s Sigstore) UntrustedAnnotations() map[string]string {
return maps.Clone(s.untrustedAnnotations)
return copyStringMap(s.untrustedAnnotations)
}
func copyStringMap(m map[string]string) map[string]string {
res := map[string]string{}
for k, v := range m {
res[k] = v
}
return res
}

View File

@ -1,7 +1,5 @@
package signature
import "golang.org/x/exp/slices"
// SimpleSigning is a “simple signing” signature.
type SimpleSigning struct {
untrustedSignature []byte
@ -10,7 +8,7 @@ type SimpleSigning struct {
// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object.
func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning {
return SimpleSigning{
untrustedSignature: slices.Clone(blobChunk),
untrustedSignature: copyByteSlice(blobChunk),
}
}
@ -21,9 +19,9 @@ func (s SimpleSigning) FormatID() FormatID {
// blobChunk returns a representation of signature as a []byte, suitable for long-term storage.
// Almost everyone should use signature.Blob() instead.
func (s SimpleSigning) blobChunk() ([]byte, error) {
return slices.Clone(s.untrustedSignature), nil
return copyByteSlice(s.untrustedSignature), nil
}
func (s SimpleSigning) UntrustedSignature() []byte {
return slices.Clone(s.untrustedSignature)
return copyByteSlice(s.untrustedSignature)
}

View File

@ -11,7 +11,7 @@ import (
// The net/http package uses a separate goroutine to upload data to a HTTP connection,
// and it is possible for the server to return a response (typically an error) before consuming
// the full body of the request. In that case http.Client.Do can return with an error while
// the body is still being read — regardless of the cancellation, if any, of http.Request.Context().
// the body is still being read — regardless of of the cancellation, if any, of http.Request.Context().
//
// As a result, any data used/updated by the io.Reader() provided as the request body may be
// used/updated even after http.Client.Do returns, causing races.

View File

@ -1,6 +1,7 @@
package manifest
import (
"encoding/json"
"fmt"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
@ -8,6 +9,96 @@ import (
"github.com/sirupsen/logrus"
)
// dupStringSlice returns a deep copy of a slice of strings, or nil if the
// source slice is empty.
func dupStringSlice(list []string) []string {
if len(list) == 0 {
return nil
}
dup := make([]string, len(list))
copy(dup, list)
return dup
}
// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
// passed-in map is nil or has no keys.
func dupStringStringMap(m map[string]string) map[string]string {
if len(m) == 0 {
return nil
}
result := make(map[string]string)
for k, v := range m {
result[k] = v
}
return result
}
// allowedManifestFields is a bit mask of “essential” manifest fields that validateUnambiguousManifestFormat
// can expect to be present.
type allowedManifestFields int
const (
allowedFieldConfig allowedManifestFields = 1 << iota
allowedFieldFSLayers
allowedFieldHistory
allowedFieldLayers
allowedFieldManifests
allowedFieldFirstUnusedBit // Keep this at the end!
)
// validateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields
// other than the ones the caller specifically allows.
// expectedMIMEType is used only for diagnostics.
// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
// data that just isnt what was expected, as opposed to actually ambiguous data.
func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
allowed allowedManifestFields) error {
if allowed >= allowedFieldFirstUnusedBit {
return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
}
// Use a private type to decode, not just a map[string]interface{}, because we want
// to also reject case-insensitive matches (which would be used by Go when really decoding
// the manifest).
// (It is expected that as manifest formats are added or extended over time, more fields will be added
// here.)
detectedFields := struct {
Config interface{} `json:"config"`
FSLayers interface{} `json:"fsLayers"`
History interface{} `json:"history"`
Layers interface{} `json:"layers"`
Manifests interface{} `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &detectedFields); err != nil {
// The caller was supposed to already validate version numbers, so this should not happen;
// lets not bother with making this error “nice”.
return err
}
unexpected := []string{}
// Sadly this isnt easy to automate in Go, without reflection. So, copy&paste.
if detectedFields.Config != nil && (allowed&allowedFieldConfig) == 0 {
unexpected = append(unexpected, "config")
}
if detectedFields.FSLayers != nil && (allowed&allowedFieldFSLayers) == 0 {
unexpected = append(unexpected, "fsLayers")
}
if detectedFields.History != nil && (allowed&allowedFieldHistory) == 0 {
unexpected = append(unexpected, "history")
}
if detectedFields.Layers != nil && (allowed&allowedFieldLayers) == 0 {
unexpected = append(unexpected, "layers")
}
if detectedFields.Manifests != nil && (allowed&allowedFieldManifests) == 0 {
unexpected = append(unexpected, "manifests")
}
if len(unexpected) != 0 {
return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
unexpected, expectedMIMEType)
}
return nil
}
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
func layerInfosToStrings(infos []LayerInfo) []string {

View File

@ -8,13 +8,10 @@ import (
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/regexp"
"github.com/docker/docker/api/types/versions"
"github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
@ -56,16 +53,16 @@ type Schema1V1Compatibility struct {
// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
// (NOTE: The instance is not necessary a literal representation of the original blob,
// layers with duplicate IDs are eliminated.)
func Schema1FromManifest(manifestBlob []byte) (*Schema1, error) {
func Schema1FromManifest(manifest []byte) (*Schema1, error) {
s1 := Schema1{}
if err := json.Unmarshal(manifestBlob, &s1); err != nil {
if err := json.Unmarshal(manifest, &s1); err != nil {
return nil, err
}
if s1.SchemaVersion != 1 {
return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion)
}
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema1SignedMediaType,
manifest.AllowedFieldFSLayers|manifest.AllowedFieldHistory); err != nil {
if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType,
allowedFieldFSLayers|allowedFieldHistory); err != nil {
return nil, err
}
if err := s1.initialize(); err != nil {
@ -186,22 +183,22 @@ func (m *Schema1) fixManifestLayers() error {
return errors.New("Invalid parent ID in the base layer of the image")
}
// check general duplicates to error instead of a deadlock
idmap := set.New[string]()
idmap := make(map[string]struct{})
var lastID string
for _, img := range m.ExtractedV1Compatibility {
// skip IDs that appear after each other, we handle those later
if img.ID != lastID && idmap.Contains(img.ID) {
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
}
lastID = img.ID
idmap.Add(lastID)
idmap[lastID] = struct{}{}
}
// backwards loop so that we keep the remaining indexes after removing items
for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- {
if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue
m.FSLayers = slices.Delete(m.FSLayers, i, i+1)
m.History = slices.Delete(m.History, i, i+1)
m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1)
m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
m.History = append(m.History[:i], m.History[i+1:]...)
m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...)
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
}

View File

@ -5,7 +5,6 @@ import (
"fmt"
"time"
"github.com/containers/image/v5/internal/manifest"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/pkg/strslice"
"github.com/containers/image/v5/types"
@ -13,7 +12,12 @@ import (
)
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
type Schema2Descriptor = manifest.Schema2Descriptor
type Schema2Descriptor struct {
MediaType string `json:"mediaType"`
Size int64 `json:"size"`
Digest digest.Digest `json:"digest"`
URLs []string `json:"urls,omitempty"`
}
// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor.
func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo {
@ -155,13 +159,13 @@ type Schema2Image struct {
}
// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob.
func Schema2FromManifest(manifestBlob []byte) (*Schema2, error) {
func Schema2FromManifest(manifest []byte) (*Schema2, error) {
s2 := Schema2{}
if err := json.Unmarshal(manifestBlob, &s2); err != nil {
if err := json.Unmarshal(manifest, &s2); err != nil {
return nil, err
}
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema2MediaType,
manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema2MediaType,
allowedFieldConfig|allowedFieldLayers); err != nil {
return nil, err
}
// Check manifest's and layers' media types.

View File

@ -1,32 +1,220 @@
package manifest
import (
"github.com/containers/image/v5/internal/manifest"
"encoding/json"
"fmt"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// Schema2PlatformSpec describes the platform which a particular manifest is
// specialized for.
type Schema2PlatformSpec = manifest.Schema2PlatformSpec
type Schema2PlatformSpec struct {
Architecture string `json:"architecture"`
OS string `json:"os"`
OSVersion string `json:"os.version,omitempty"`
OSFeatures []string `json:"os.features,omitempty"`
Variant string `json:"variant,omitempty"`
Features []string `json:"features,omitempty"` // removed in OCI
}
// Schema2ManifestDescriptor references a platform-specific manifest.
type Schema2ManifestDescriptor = manifest.Schema2ManifestDescriptor
type Schema2ManifestDescriptor struct {
Schema2Descriptor
Platform Schema2PlatformSpec `json:"platform"`
}
// Schema2List is a list of platform-specific manifests.
type Schema2List = manifest.Schema2ListPublic
type Schema2List struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
Manifests []Schema2ManifestDescriptor `json:"manifests"`
}
// MIMEType returns the MIME type of this particular manifest list.
func (list *Schema2List) MIMEType() string {
return list.MediaType
}
// Instances returns a slice of digests of the manifests that this list knows of.
func (list *Schema2List) Instances() []digest.Digest {
results := make([]digest.Digest, len(list.Manifests))
for i, m := range list.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the list.
func (list *Schema2List) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range list.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(list.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to Schema2List.UpdateInstances: expected %d, got %d", len(list.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
list.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
list.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
}
list.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
// of the image which is appropriate for the current environment.
func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range list.Manifests {
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: dupStringSlice(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
}
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (list *Schema2List) Serialize() ([]byte, error) {
buf, err := json.Marshal(list)
if err != nil {
return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err)
}
return buf, nil
}
// Schema2ListFromComponents creates a Schema2 manifest list instance from the
// supplied data.
func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List {
return manifest.Schema2ListPublicFromComponents(components)
list := Schema2List{
SchemaVersion: 2,
MediaType: DockerV2ListMediaType,
Manifests: make([]Schema2ManifestDescriptor, len(components)),
}
for i, component := range components {
m := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: dupStringSlice(component.URLs),
},
Schema2PlatformSpec{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: dupStringSlice(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
Features: dupStringSlice(component.Platform.Features),
},
}
list.Manifests[i] = m
}
return &list
}
// Schema2ListClone creates a deep copy of the passed-in list.
func Schema2ListClone(list *Schema2List) *Schema2List {
return manifest.Schema2ListPublicClone(list)
return Schema2ListFromComponents(list.Manifests)
}
// ToOCI1Index returns the list encoded as an OCI1 index.
func (list *Schema2List) ToOCI1Index() (*OCI1Index, error) {
components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
for _, manifest := range list.Manifests {
converted := imgspecv1.Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: dupStringSlice(manifest.URLs),
Platform: &imgspecv1.Platform{
OS: manifest.Platform.OS,
Architecture: manifest.Platform.Architecture,
OSFeatures: dupStringSlice(manifest.Platform.OSFeatures),
OSVersion: manifest.Platform.OSVersion,
Variant: manifest.Platform.Variant,
},
}
components = append(components, converted)
}
oci := OCI1IndexFromComponents(components, nil)
return oci, nil
}
// ToSchema2List returns the list encoded as a Schema2 list.
func (list *Schema2List) ToSchema2List() (*Schema2List, error) {
return Schema2ListClone(list), nil
}
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
// JSON, presumably generated by encoding a Schema2 manifest list.
func Schema2ListFromManifest(manifestBlob []byte) (*Schema2List, error) {
return manifest.Schema2ListPublicFromManifest(manifestBlob)
func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
list := Schema2List{
Manifests: []Schema2ManifestDescriptor{},
}
if err := json.Unmarshal(manifest, &list); err != nil {
return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err)
}
if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
allowedFieldManifests); err != nil {
return nil, err
}
return &list, nil
}
// Clone returns a deep copy of this list and its contents.
func (list *Schema2List) Clone() List {
return Schema2ListClone(list)
}
// ConvertToMIMEType converts the passed-in manifest list to a manifest
// list of the specified type.
func (list *Schema2List) ConvertToMIMEType(manifestMIMEType string) (List, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return list.Clone(), nil
case imgspecv1.MediaTypeImageIndex:
return list.ToOCI1Index()
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType)
}
}

View File

@ -1,7 +1,10 @@
package manifest
import (
"github.com/containers/image/v5/internal/manifest"
"fmt"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
@ -18,14 +21,56 @@ var (
// Callers can either use this abstract interface without understanding the details of the formats,
// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members
// directly.
type List = manifest.ListPublic
type List interface {
// MIMEType returns the MIME type of this particular manifest list.
MIMEType() string
// Instances returns a list of the manifests that this list knows of, other than its own.
Instances() []digest.Digest
// Update information about the list's instances. The length of the passed-in slice must
// match the length of the list of instances which the list already contains, and every field
// must be specified.
UpdateInstances([]ListUpdate) error
// Instance returns the size and MIME type of a particular instance in the list.
Instance(digest.Digest) (ListUpdate, error)
// ChooseInstance selects which manifest is most appropriate for the platform described by the
// SystemContext, or for the current platform if the SystemContext doesn't specify any details.
ChooseInstance(ctx *types.SystemContext) (digest.Digest, error)
// Serialize returns the list in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded
// from, even if no modifications were made!
Serialize() ([]byte, error)
// ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error.
ConvertToMIMEType(mimeType string) (List, error)
// Clone returns a deep copy of this list and its contents.
Clone() List
}
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.
type ListUpdate = manifest.ListUpdate
type ListUpdate struct {
Digest digest.Digest
Size int64
MediaType string
}
// ListFromBlob parses a list of manifests.
func ListFromBlob(manifestBlob []byte, manifestMIMEType string) (List, error) {
return manifest.ListPublicFromBlob(manifestBlob, manifestMIMEType)
func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
normalized := NormalizedMIMEType(manifestMIMEType)
switch normalized {
case DockerV2ListMediaType:
return Schema2ListFromManifest(manifest)
case imgspecv1.MediaTypeImageIndex:
return OCI1IndexFromManifest(manifest)
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Treating single images as manifest lists is not implemented")
}
return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized)
}
// ConvertListToMIMEType converts the passed-in manifest list to a manifest

View File

@ -1,9 +1,10 @@
package manifest
import (
"encoding/json"
"fmt"
"github.com/containers/image/v5/internal/manifest"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/types"
"github.com/containers/libtrust"
digest "github.com/opencontainers/go-digest"
@ -15,28 +16,28 @@ import (
// FIXME(runcom, mitr): should we have a mediatype pkg??
const (
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType
DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType
DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType
DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
// DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
DockerV2Schema2ConfigMediaType = manifest.DockerV2Schema2ConfigMediaType
DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
// DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType
DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers.
DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed
DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar"
// DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
DockerV2ListMediaType = manifest.DockerV2ListMediaType
DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaType = manifest.DockerV2Schema2ForeignLayerMediaType
DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar"
// DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers.
DockerV2Schema2ForeignLayerMediaTypeGzip = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip
DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
)
// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation
// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact)
type NonImageArtifactError = manifest.NonImageArtifactError
type NonImageArtifactError = internalManifest.NonImageArtifactError
// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type.
func SupportedSchema2MediaType(m string) error {
@ -101,21 +102,102 @@ type LayerInfo struct {
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
// but we may not have such metadata available (e.g. when the manifest is a local file).
func GuessMIMEType(manifestBlob []byte) string {
return manifest.GuessMIMEType(manifestBlob)
func GuessMIMEType(manifest []byte) string {
// A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
// Also docker/distribution/manifest.Versioned.
meta := struct {
MediaType string `json:"mediaType"`
SchemaVersion int `json:"schemaVersion"`
Signatures interface{} `json:"signatures"`
}{}
if err := json.Unmarshal(manifest, &meta); err != nil {
return ""
}
switch meta.MediaType {
case DockerV2Schema2MediaType, DockerV2ListMediaType,
imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type.
return meta.MediaType
}
// this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
switch meta.SchemaVersion {
case 1:
if meta.Signatures != nil {
return DockerV2Schema1SignedMediaType
}
return DockerV2Schema1MediaType
case 2:
// Best effort to understand if this is an OCI image since mediaType
// wasn't in the manifest for OCI image-spec < 1.0.2.
// For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
ociMan := struct {
Config struct {
MediaType string `json:"mediaType"`
} `json:"config"`
}{}
if err := json.Unmarshal(manifest, &ociMan); err != nil {
return ""
}
switch ociMan.Config.MediaType {
case imgspecv1.MediaTypeImageConfig:
return imgspecv1.MediaTypeImageManifest
case DockerV2Schema2ConfigMediaType:
// This case should not happen since a Docker image
// must declare a top-level media type and
// `meta.MediaType` has already been checked.
return DockerV2Schema2MediaType
}
// Maybe an image index or an OCI artifact.
ociIndex := struct {
Manifests []imgspecv1.Descriptor `json:"manifests"`
}{}
if err := json.Unmarshal(manifest, &ociIndex); err != nil {
return ""
}
if len(ociIndex.Manifests) != 0 {
if ociMan.Config.MediaType == "" {
return imgspecv1.MediaTypeImageIndex
}
// FIXME: this is mixing media types of manifests and configs.
return ociMan.Config.MediaType
}
// It's most likely an OCI artifact with a custom config media
// type which is not (and cannot) be covered by the media-type
// checks cabove.
return imgspecv1.MediaTypeImageManifest
}
return ""
}
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
func Digest(manifestBlob []byte) (digest.Digest, error) {
return manifest.Digest(manifestBlob)
func Digest(manifest []byte) (digest.Digest, error) {
if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
if err != nil {
return "", err
}
manifest, err = sig.Payload()
if err != nil {
// Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
// that libtrust itself has josebase64UrlEncode()d
return "", err
}
}
return digest.FromBytes(manifest), nil
}
// MatchesDigest returns true iff the manifest matches expectedDigest.
// Error may be set if this returns false.
// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
func MatchesDigest(manifestBlob []byte, expectedDigest digest.Digest) (bool, error) {
return manifest.MatchesDigest(manifestBlob, expectedDigest)
func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
// This should eventually support various digest types.
actualDigest, err := Digest(manifest)
if err != nil {
return false, err
}
return expectedDigest == actualDigest, nil
}
// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
@ -149,7 +231,30 @@ func MIMETypeSupportsEncryption(mimeType string) bool {
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
// centralizing various workarounds.
func NormalizedMIMEType(input string) string {
return manifest.NormalizedMIMEType(input)
switch input {
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
// need to happen within the ImageSource.
case "application/json":
return DockerV2Schema1SignedMediaType
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
imgspecv1.MediaTypeImageManifest,
imgspecv1.MediaTypeImageIndex,
DockerV2Schema2MediaType,
DockerV2ListMediaType:
return input
default:
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
//
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
// This makes no real sense, but it happens
// because requests for manifests are
// redirected to a content distribution
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
return DockerV2Schema1SignedMediaType
}
}
// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type

View File

@ -5,14 +5,13 @@ import (
"fmt"
"strings"
"github.com/containers/image/v5/internal/manifest"
internalManifest "github.com/containers/image/v5/internal/manifest"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
@ -50,13 +49,13 @@ func SupportedOCI1MediaType(m string) error {
}
// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob.
func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) {
func OCI1FromManifest(manifest []byte) (*OCI1, error) {
oci1 := OCI1{}
if err := json.Unmarshal(manifestBlob, &oci1); err != nil {
if err := json.Unmarshal(manifest, &oci1); err != nil {
return nil, err
}
if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageIndex,
manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil {
if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
allowedFieldConfig|allowedFieldLayers); err != nil {
return nil, err
}
return &oci1, nil
@ -161,8 +160,10 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
// an error if the mediatype does not support encryption
func getEncryptedMediaType(mediatype string) (string, error) {
if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") {
return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
for _, s := range strings.Split(mediatype, "+")[1:] {
if s == "encrypted" {
return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype)
}
}
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
switch unsuffixedMediatype {
@ -177,7 +178,7 @@ func getEncryptedMediaType(mediatype string) (string, error) {
// an error if the mediatype does not support decryption
func getDecryptedMediaType(mediatype string) (string, error) {
if !strings.HasSuffix(mediatype, "+encrypted") {
return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype)
return "", fmt.Errorf("unsupported mediaType to decrypt %v:", mediatype)
}
return strings.TrimSuffix(mediatype, "+encrypted"), nil
@ -196,7 +197,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
// Most software calling this without human intervention is going to expect the values to be realistic and relevant,
// and is probably better served by failing; we can always re-visit that later if we fail now, but
// if we started returning some data for OCI artifacts now, we couldnt start failing in this function later.
return nil, manifest.NewNonImageArtifactError(m.Config.MediaType)
return nil, internalManifest.NewNonImageArtifactError(m.Config.MediaType)
}
config, err := configGetter(m.ConfigInfo())
@ -247,7 +248,7 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) {
// (The only known caller of ImageID is storage/storageImageDestination.computeID,
// which cant work with non-image artifacts.)
if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return "", manifest.NewNonImageArtifactError(m.Config.MediaType)
return "", internalManifest.NewNonImageArtifactError(m.Config.MediaType)
}
if err := m.Config.Digest.Validate(); err != nil {

View File

@ -1,27 +1,232 @@
package manifest
import (
"github.com/containers/image/v5/internal/manifest"
"encoding/json"
"fmt"
"runtime"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// OCI1Index is just an alias for the OCI index type, but one which we can
// provide methods for.
type OCI1Index = manifest.OCI1IndexPublic
type OCI1Index struct {
imgspecv1.Index
}
// MIMEType returns the MIME type of this particular manifest index.
func (index *OCI1Index) MIMEType() string {
return imgspecv1.MediaTypeImageIndex
}
// Instances returns a slice of digests of the manifests that this index knows of.
func (index *OCI1Index) Instances() []digest.Digest {
results := make([]digest.Digest, len(index.Manifests))
for i, m := range index.Manifests {
results[i] = m.Digest
}
return results
}
// Instance returns the ListUpdate of a particular instance in the index.
func (index *OCI1Index) Instance(instanceDigest digest.Digest) (ListUpdate, error) {
for _, manifest := range index.Manifests {
if manifest.Digest == instanceDigest {
return ListUpdate{
Digest: manifest.Digest,
Size: manifest.Size,
MediaType: manifest.MediaType,
}, nil
}
}
return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest)
}
// UpdateInstances updates the sizes, digests, and media types of the manifests
// which the list catalogs.
func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
if len(updates) != len(index.Manifests) {
return fmt.Errorf("incorrect number of update entries passed to OCI1Index.UpdateInstances: expected %d, got %d", len(index.Manifests), len(updates))
}
for i := range updates {
if err := updates[i].Digest.Validate(); err != nil {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances contained an invalid digest: %w", i+1, len(updates), err)
}
index.Manifests[i].Digest = updates[i].Digest
if updates[i].Size < 0 {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(updates), updates[i].Size)
}
index.Manifests[i].Size = updates[i].Size
if updates[i].MediaType == "" {
return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
}
index.Manifests[i].MediaType = updates[i].MediaType
}
return nil
}
// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
// of the image which is appropriate for the current environment.
func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range index.Manifests {
if d.Platform == nil {
continue
}
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: dupStringSlice(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
}
for _, d := range index.Manifests {
if d.Platform == nil {
return d.Digest, nil
}
}
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
// Serialize returns the index in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (index *OCI1Index) Serialize() ([]byte, error) {
buf, err := json.Marshal(index)
if err != nil {
return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err)
}
return buf, nil
}
// OCI1IndexFromComponents creates an OCI1 image index instance from the
// supplied data.
func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index {
return manifest.OCI1IndexPublicFromComponents(components, annotations)
index := OCI1Index{
imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: make([]imgspecv1.Descriptor, len(components)),
Annotations: dupStringStringMap(annotations),
},
}
for i, component := range components {
var platform *imgspecv1.Platform
if component.Platform != nil {
platform = &imgspecv1.Platform{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: dupStringSlice(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
}
}
m := imgspecv1.Descriptor{
MediaType: component.MediaType,
Size: component.Size,
Digest: component.Digest,
URLs: dupStringSlice(component.URLs),
Annotations: dupStringStringMap(component.Annotations),
Platform: platform,
}
index.Manifests[i] = m
}
return &index
}
// OCI1IndexClone creates a deep copy of the passed-in index.
func OCI1IndexClone(index *OCI1Index) *OCI1Index {
return manifest.OCI1IndexPublicClone(index)
return OCI1IndexFromComponents(index.Manifests, index.Annotations)
}
// ToOCI1Index returns the index encoded as an OCI1 index.
func (index *OCI1Index) ToOCI1Index() (*OCI1Index, error) {
return OCI1IndexClone(index), nil
}
// ToSchema2List returns the index encoded as a Schema2 list.
func (index *OCI1Index) ToSchema2List() (*Schema2List, error) {
components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests))
for _, manifest := range index.Manifests {
platform := manifest.Platform
if platform == nil {
platform = &imgspecv1.Platform{
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
}
}
converted := Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: dupStringSlice(manifest.URLs),
},
Schema2PlatformSpec{
OS: platform.OS,
Architecture: platform.Architecture,
OSFeatures: dupStringSlice(platform.OSFeatures),
OSVersion: platform.OSVersion,
Variant: platform.Variant,
},
}
components = append(components, converted)
}
s2 := Schema2ListFromComponents(components)
return s2, nil
}
// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled
// JSON, presumably generated by encoding a OCI1 manifest index.
func OCI1IndexFromManifest(manifestBlob []byte) (*OCI1Index, error) {
return manifest.OCI1IndexPublicFromManifest(manifestBlob)
func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
index := OCI1Index{
Index: imgspecv1.Index{
Versioned: imgspec.Versioned{SchemaVersion: 2},
MediaType: imgspecv1.MediaTypeImageIndex,
Manifests: []imgspecv1.Descriptor{},
Annotations: make(map[string]string),
},
}
if err := json.Unmarshal(manifest, &index); err != nil {
return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err)
}
if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
allowedFieldManifests); err != nil {
return nil, err
}
return &index, nil
}
// Clone returns a deep copy of this list and its contents.
func (index *OCI1Index) Clone() List {
return OCI1IndexClone(index)
}
// ConvertToMIMEType converts the passed-in image index to a manifest list of
// the specified type.
func (index *OCI1Index) ConvertToMIMEType(manifestMIMEType string) (List, error) {
switch normalized := NormalizedMIMEType(manifestMIMEType); normalized {
case DockerV2ListMediaType:
return index.ToSchema2List()
case imgspecv1.MediaTypeImageIndex:
return index.Clone(), nil
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType:
return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType)
default:
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType)
}
}

View File

@ -109,8 +109,8 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options)
}
@ -119,16 +119,18 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options)
}

View File

@ -58,7 +58,13 @@ func splitPathAndImageWindows(reference string) (string, string) {
}
func splitPathAndImageNonWindows(reference string) (string, string) {
path, image, _ := strings.Cut(reference, ":") // image is set to "" if there is no ":"
sep := strings.SplitN(reference, ":", 2)
path := sep[0]
var image string
if len(sep) == 2 {
image = sep[1]
}
return path, image
}

View File

@ -12,9 +12,9 @@ import (
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
@ -107,11 +107,11 @@ func (d *ociImageDestination) Close() error {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
succeeded := false
explicitClosed := false
@ -128,14 +128,14 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
@ -144,50 +144,52 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// always fails on Windows.
if runtime.GOOS != "windows" {
if err := blobFile.Chmod(0644); err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
}
blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
if err := ensureParentDirectoryExists(blobPath); err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
// need to explicitly close the file, since a rename won't otherwise not work on Windows
blobFile.Close()
explicitClosed = true
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
succeeded = true
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if info.Digest == "" {
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
}
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
if err != nil {
return false, private.ReusedBlob{}, err
return false, types.BlobInfo{}, err
}
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
return false, private.ReusedBlob{}, nil
return false, types.BlobInfo{}, nil
}
if err != nil {
return false, private.ReusedBlob{}, err
return false, types.BlobInfo{}, err
}
return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types,

View File

@ -12,8 +12,8 @@ import (
"github.com/containers/image/v5/internal/imagesource/impl"
"github.com/containers/image/v5/internal/imagesource/stubs"
"github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/docker/go-connections/tlsconfig"
@ -94,7 +94,6 @@ func (s *ociImageSource) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageSource, if any.
func (s *ociImageSource) Close() error {
s.client.CloseIdleConnections()
return nil
}

View File

@ -100,7 +100,7 @@ func (ref ociReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref ociReference) StringWithinTransport() string {
return fmt.Sprintf("%s:%s", ref.dir, ref.image)

View File

@ -3,12 +3,11 @@ package openshift
import (
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"net/netip"
"net/url"
"os"
"path"
@ -18,10 +17,10 @@ import (
"time"
"github.com/containers/storage/pkg/homedir"
"github.com/ghodss/yaml"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
"gopkg.in/yaml.v3"
"golang.org/x/net/http2"
)
// restTLSClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/restclient.TLSClientConfig.
@ -206,12 +205,15 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) {
if isConfigTransportTLS(*clientConfig) {
var err error
// REMOVED: Support for interactive fallback.
userAuthPartialConfig := getUserIdentificationPartialConfig(configAuthInfo)
userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo)
if err != nil {
return nil, err
}
if err = mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig); err != nil {
return nil, err
}
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configClusterInfo)
serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo)
if err != nil {
return nil, err
}
@ -230,7 +232,7 @@ func (config *directClientConfig) ClientConfig() (*restConfig, error) {
// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files)
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. load the ~/.kubernetes_auth file as a default
func getServerIdentificationPartialConfig(configClusterInfo clientcmdCluster) (*restConfig, error) {
func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) {
mergedConfig := &restConfig{}
// configClusterInfo holds the information identify the server provided by .kubeconfig
@ -253,7 +255,7 @@ func getServerIdentificationPartialConfig(configClusterInfo clientcmdCluster) (*
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
// 4. if there is not enough information to identify the user, prompt if possible
func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) *restConfig {
func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) {
mergedConfig := &restConfig{}
// blindly overwrite existing values based on precedence
@ -272,7 +274,7 @@ func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) *restC
}
// REMOVED: prompting for missing information.
return mergedConfig
return mergedConfig, nil
}
// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable.
@ -670,7 +672,11 @@ func load(data []byte) (*clientcmdConfig, error) {
return config, nil
}
// Note: This does absolutely no kind/version checking or conversions.
if err := yaml.Unmarshal(data, config); err != nil {
data, err := yaml.YAMLToJSON(data)
if err != nil {
return nil, err
}
if err := json.Unmarshal(data, config); err != nil {
return nil, err
}
return config, nil
@ -871,11 +877,11 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
noProxyEnv := os.Getenv("NO_PROXY")
noProxyRules := strings.Split(noProxyEnv, ",")
cidrs := []netip.Prefix{}
cidrs := []*net.IPNet{}
for _, noProxyRule := range noProxyRules {
prefix, err := netip.ParsePrefix(noProxyRule)
if err == nil {
cidrs = append(cidrs, prefix)
_, cidr, _ := net.ParseCIDR(noProxyRule)
if cidr != nil {
cidrs = append(cidrs, cidr)
}
}
@ -886,7 +892,7 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
return func(req *http.Request) (*url.URL, error) {
host := req.URL.Host
// for some urls, the Host is already the host, not the host:port
if _, err := netip.ParseAddr(host); err != nil {
if net.ParseIP(host) == nil {
var err error
host, _, err = net.SplitHostPort(req.URL.Host)
if err != nil {
@ -894,15 +900,15 @@ func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error
}
}
ip, err := netip.ParseAddr(host)
if err != nil {
ip := net.ParseIP(host)
if ip == nil {
return delegate(req)
}
if slices.ContainsFunc(cidrs, func(cidr netip.Prefix) bool {
return cidr.Contains(ip)
}) {
return nil, nil
for _, cidr := range cidrs {
if cidr.Contains(ip) {
return nil, nil
}
}
return delegate(req)
@ -930,14 +936,14 @@ func tlsCacheGet(config *restConfig) (http.RoundTripper, error) {
Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment),
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
DialContext: (&net.Dialer{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
}).Dial,
}
// Allow clients to disable http2 if needed.
if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 {
t.ForceAttemptHTTP2 = true
_ = http2.ConfigureTransport(t)
}
return t, nil
}
@ -1051,20 +1057,20 @@ func (c *restConfig) HasCertAuth() bool {
// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
type clientcmdConfig struct {
// Clusters is a map of referenceable names to cluster configs
Clusters clustersMap `yaml:"clusters"`
Clusters clustersMap `json:"clusters"`
// AuthInfos is a map of referenceable names to user configs
AuthInfos authInfosMap `yaml:"users"`
AuthInfos authInfosMap `json:"users"`
// Contexts is a map of referenceable names to context configs
Contexts contextsMap `yaml:"contexts"`
Contexts contextsMap `json:"contexts"`
// CurrentContext is the name of the context that you would like to use by default
CurrentContext string `yaml:"current-context"`
CurrentContext string `json:"current-context"`
}
type clustersMap map[string]*clientcmdCluster
func (m *clustersMap) UnmarshalYAML(value *yaml.Node) error {
func (m *clustersMap) UnmarshalJSON(data []byte) error {
var a []v1NamedCluster
if err := value.Decode(&a); err != nil {
if err := json.Unmarshal(data, &a); err != nil {
return err
}
for _, e := range a {
@ -1076,9 +1082,9 @@ func (m *clustersMap) UnmarshalYAML(value *yaml.Node) error {
type authInfosMap map[string]*clientcmdAuthInfo
func (m *authInfosMap) UnmarshalYAML(value *yaml.Node) error {
func (m *authInfosMap) UnmarshalJSON(data []byte) error {
var a []v1NamedAuthInfo
if err := value.Decode(&a); err != nil {
if err := json.Unmarshal(data, &a); err != nil {
return err
}
for _, e := range a {
@ -1090,9 +1096,9 @@ func (m *authInfosMap) UnmarshalYAML(value *yaml.Node) error {
type contextsMap map[string]*clientcmdContext
func (m *contextsMap) UnmarshalYAML(value *yaml.Node) error {
func (m *contextsMap) UnmarshalJSON(data []byte) error {
var a []v1NamedContext
if err := value.Decode(&a); err != nil {
if err := json.Unmarshal(data, &a); err != nil {
return err
}
for _, e := range a {
@ -1112,32 +1118,19 @@ func clientcmdNewConfig() *clientcmdConfig {
}
}
// yamlBinaryAsBase64String is a []byte that can be stored in yaml as a !!str, not a !!binary
type yamlBinaryAsBase64String []byte
func (bin *yamlBinaryAsBase64String) UnmarshalText(text []byte) error {
res := make([]byte, base64.StdEncoding.DecodedLen(len(text)))
n, err := base64.StdEncoding.Decode(res, text)
if err != nil {
return err
}
*bin = res[:n]
return nil
}
// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster.
// Cluster contains information about how to communicate with a kubernetes cluster
type clientcmdCluster struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// Server is the address of the kubernetes cluster (https://hostname:port).
Server string `yaml:"server"`
Server string `json:"server"`
// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
InsecureSkipTLSVerify bool `yaml:"insecure-skip-tls-verify,omitempty"`
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
// CertificateAuthority is the path to a cert file for the certificate authority.
CertificateAuthority string `yaml:"certificate-authority,omitempty"`
CertificateAuthority string `json:"certificate-authority,omitempty"`
// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
CertificateAuthorityData yamlBinaryAsBase64String `yaml:"certificate-authority-data,omitempty"`
CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
}
// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo.
@ -1146,19 +1139,19 @@ type clientcmdAuthInfo struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// ClientCertificate is the path to a client cert file for TLS.
ClientCertificate string `yaml:"client-certificate,omitempty"`
ClientCertificate string `json:"client-certificate,omitempty"`
// ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
ClientCertificateData yamlBinaryAsBase64String `yaml:"client-certificate-data,omitempty"`
ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
// ClientKey is the path to a client key file for TLS.
ClientKey string `yaml:"client-key,omitempty"`
ClientKey string `json:"client-key,omitempty"`
// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
ClientKeyData yamlBinaryAsBase64String `yaml:"client-key-data,omitempty"`
ClientKeyData []byte `json:"client-key-data,omitempty"`
// Token is the bearer token for authentication to the kubernetes cluster.
Token string `yaml:"token,omitempty"`
Token string `json:"token,omitempty"`
// Username is the username for basic authentication to the kubernetes cluster.
Username string `yaml:"username,omitempty"`
Username string `json:"username,omitempty"`
// Password is the password for basic authentication to the kubernetes cluster.
Password string `yaml:"password,omitempty"`
Password string `json:"password,omitempty"`
}
// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context.
@ -1167,36 +1160,36 @@ type clientcmdContext struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
LocationOfOrigin string
// Cluster is the name of the cluster for this context
Cluster string `yaml:"cluster"`
Cluster string `json:"cluster"`
// AuthInfo is the name of the authInfo for this context
AuthInfo string `yaml:"user"`
AuthInfo string `json:"user"`
// Namespace is the default namespace to use on unspecified requests
Namespace string `yaml:"namespace,omitempty"`
Namespace string `json:"namespace,omitempty"`
}
// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster.
// NamedCluster relates nicknames to cluster information
type v1NamedCluster struct {
// Name is the nickname for this Cluster
Name string `yaml:"name"`
Name string `json:"name"`
// Cluster holds the cluster information
Cluster clientcmdCluster `yaml:"cluster"`
Cluster clientcmdCluster `json:"cluster"`
}
// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext.
// NamedContext relates nicknames to context information
type v1NamedContext struct {
// Name is the nickname for this Context
Name string `yaml:"name"`
Name string `json:"name"`
// Context holds the context information
Context clientcmdContext `yaml:"context"`
Context clientcmdContext `json:"context"`
}
// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo.
// NamedAuthInfo relates nicknames to auth information
type v1NamedAuthInfo struct {
// Name is the nickname for this AuthInfo
Name string `yaml:"name"`
Name string `json:"name"`
// AuthInfo holds the auth information
AuthInfo clientcmdAuthInfo `yaml:"user"`
AuthInfo clientcmdAuthInfo `json:"user"`
}

View File

@ -146,11 +146,11 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str
// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use;
// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside.
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
_, repo, gotRepo := strings.Cut(ref, "/")
if !gotRepo {
parts := strings.SplitN(ref, "/", 2)
if len(parts) != 2 {
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
}
return reference.Domain(c.ref.dockerReference) + "/" + repo, nil
return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil
}
// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies.

View File

@ -17,12 +17,10 @@ import (
"github.com/containers/image/v5/internal/imagedestination/impl"
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
type openshiftImageDestination struct {
@ -116,8 +114,8 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options)
}
@ -126,16 +124,18 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
return d.docker.TryReusingBlobWithOptions(ctx, info, options)
}
@ -180,11 +180,12 @@ func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context,
if err != nil {
return err
}
existingSigNames := set.New[string]()
existingSigNames := map[string]struct{}{}
for _, sig := range image.Signatures {
existingSigNames.Add(sig.objectMeta.Name)
existingSigNames[sig.objectMeta.Name] = struct{}{}
}
sigExists:
for _, newSigWithFormat := range signatures {
newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning)
if !ok {
@ -192,10 +193,10 @@ func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context,
}
newSig := newSigSimple.UntrustedSignature()
if slices.ContainsFunc(image.Signatures, func(existingSig imageSignature) bool {
return existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig)
}) {
continue
for _, existingSig := range image.Signatures {
if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
continue sigExists
}
}
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
@ -207,7 +208,7 @@ func (d *openshiftImageDestination) PutSignaturesWithFormat(ctx context.Context,
return fmt.Errorf("generating random signature len %d: %w", n, err)
}
signatureName = fmt.Sprintf("%s@%032x", imageStreamImageName, randBytes)
if !existingSigNames.Contains(signatureName) {
if _, ok := existingSigNames[signatureName]; !ok {
break
}
}

View File

@ -89,7 +89,7 @@ func (ref openshiftReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref openshiftReference) StringWithinTransport() string {
return reference.FamiliarString(ref.dockerReference)

View File

@ -69,7 +69,7 @@ type manifestSchema struct {
}
type ostreeImageDestination struct {
impl.Compat
compat impl.Compat
impl.PropertyMethodsInitialize
stubs.NoPutBlobPartialInitialize
stubs.AlwaysSupportsSignatures
@ -135,16 +135,16 @@ func (d *ostreeImageDestination) Close() error {
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
blobPath := filepath.Join(tmpDir, "content")
blobFile, err := os.Create(blobPath)
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
defer blobFile.Close()
@ -152,19 +152,19 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
if err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return private.UploadedBlob{}, err
return types.BlobInfo{}, err
}
hash := blobDigest.Hex()
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
}
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
@ -334,11 +334,11 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
return false, private.ReusedBlob{}, err
return false, types.BlobInfo{}, err
}
d.repo = repo
}
@ -346,25 +346,25 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return found, private.ReusedBlob{}, err
return found, types.BlobInfo{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
return found, private.ReusedBlob{}, err
return found, types.BlobInfo{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.size")
if err != nil || !found {
return found, private.ReusedBlob{}, err
return found, types.BlobInfo{}, err
}
size, err := strconv.ParseInt(data, 10, 64)
if err != nil {
return false, private.ReusedBlob{}, err
return false, types.BlobInfo{}, err
}
return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
}
// PutManifest writes manifest to the destination.

View File

@ -75,11 +75,12 @@ type ostreeImageCloser struct {
func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) {
var repo = ""
image, repoPart, gotRepoPart := strings.Cut(ref, "@/")
if !gotRepoPart {
repo = defaultOSTreeRepo
var image = ""
s := strings.SplitN(ref, "@/", 2)
if len(s) == 1 {
image, repo = s[0], defaultOSTreeRepo
} else {
repo = "/" + repoPart
image, repo = s[0], "/"+s[1]
}
return NewReference(image, repo)
@ -133,7 +134,7 @@ func (ref ostreeReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref ostreeReference) StringWithinTransport() string {
return fmt.Sprintf("%s@%s", ref.image, ref.repo)
@ -156,11 +157,11 @@ func (ref ostreeReference) PolicyConfigurationIdentity() string {
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref ostreeReference) PolicyConfigurationNamespaces() []string {
repo, _, gotTag := strings.Cut(ref.image, ":")
if !gotTag { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
s := strings.SplitN(ref.image, ":", 2)
if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag.
panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image))
}
name := repo
name := s[0]
res := []string{}
for {
res = append(res, fmt.Sprintf("%s:%s", ref.repo, name))

View File

@ -6,7 +6,6 @@ import (
"time"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
@ -20,12 +19,12 @@ type locationKey struct {
blobDigest digest.Digest
}
// cache implements an in-memory-only BlobInfoCache.
// cache implements an in-memory-only BlobInfoCache
type cache struct {
mutex sync.Mutex
// The following fields can only be accessed with mutex held.
uncompressedDigests map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest
}
@ -45,7 +44,7 @@ func New() types.BlobInfoCache {
func new2() *cache {
return &cache{
uncompressedDigests: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
compressors: map[digest.Digest]string{},
}
@ -68,7 +67,7 @@ func (mem *cache) uncompressedDigestLocked(anyDigest digest.Digest) digest.Diges
// Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
// when we already record a (compressed, uncompressed) pair.
if s, ok := mem.digestsByUncompressed[anyDigest]; ok && !s.Empty() {
if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
return anyDigest
}
return ""
@ -89,10 +88,10 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
if !ok {
anyDigestSet = set.New[digest.Digest]()
anyDigestSet = map[digest.Digest]struct{}{}
mem.digestsByUncompressed[uncompressed] = anyDigestSet
}
anyDigestSet.Add(anyDigest)
anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
}
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
@ -172,11 +171,10 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types
var uncompressedDigest digest.Digest // = ""
if canSubstitute {
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok {
for _, d := range otherDigests.Values() {
if d != primaryDigest && d != uncompressedDigest {
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
}
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
for d := range otherDigests {
if d != primaryDigest && d != uncompressedDigest {
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
}
}
if uncompressedDigest != primaryDigest {

View File

@ -30,7 +30,7 @@ var (
// Zstd compression.
Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
[]byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
// ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files.
// Zstd:chunked compression.
ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
nil, ZstdDecompressor, compressor.ZstdCompressor)

View File

@ -12,7 +12,6 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
@ -140,7 +139,10 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// possible sources, and then call `GetCredentials` on them. That
// prevents us from having to reverse engineer the logic in
// `GetCredentials`.
allKeys := set.New[string]()
allKeys := make(map[string]bool)
addKey := func(s string) {
allKeys[s] = true
}
// To use GetCredentials, we must at least convert the URL forms into host names.
// While we're at it, well also canonicalize docker.io to the standard format.
@ -164,14 +166,14 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// direct mapping to a registry, so we can just
// walk the map.
for registry := range auths.CredHelpers {
allKeys.Add(registry)
addKey(registry)
}
for key := range auths.AuthConfigs {
key := normalizeAuthFileKey(key, path.legacyFormat)
if key == normalizedDockerIORegistry {
key = "docker.io"
}
allKeys.Add(key)
addKey(key)
}
}
// External helpers.
@ -186,7 +188,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
}
}
for registry := range creds {
allKeys.Add(registry)
addKey(registry)
}
}
}
@ -194,7 +196,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// Now use `GetCredentials` to the specific auth configs for each
// previously listed registry.
authConfigs := make(map[string]types.DockerAuthConfig)
for _, key := range allKeys.Values() {
for key := range allKeys {
authConf, err := GetCredentials(sys, key)
if err != nil {
// Note: we rely on the logging in `GetCredentials`.
@ -392,16 +394,17 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
if isNamespaced {
logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper)
return
}
err := deleteAuthFromCredHelper(helper, key)
if err == nil {
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
isLoggedIn = true
return
}
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
return
} else {
err := deleteAuthFromCredHelper(helper, key)
if err == nil {
logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper)
isLoggedIn = true
return
}
if credentials.IsErrCredentialsNotFoundMessage(err.Error()) {
logrus.Debugf("Not logged in to %s with credential helper %s", key, helper)
return
}
}
multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err))
}
@ -756,8 +759,8 @@ func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuth
return types.DockerAuthConfig{}, err
}
user, passwordPart, valid := strings.Cut(string(decoded), ":")
if !valid {
parts := strings.SplitN(string(decoded), ":", 2)
if len(parts) != 2 {
// if it's invalid just skip, as docker does
if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, dont warn about those
logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Dont include the text of decoded, because that might put secrets into a log.
@ -767,7 +770,8 @@ func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuth
return types.DockerAuthConfig{}, nil
}
password := strings.Trim(passwordPart, "\x00")
user := parts[0]
password := strings.Trim(parts[1], "\x00")
return types.DockerAuthConfig{
Username: user,
Password: password,
@ -782,7 +786,7 @@ func normalizeAuthFileKey(key string, legacyFormat bool) string {
stripped = strings.TrimPrefix(stripped, "https://")
if legacyFormat || stripped != key {
stripped, _, _ = strings.Cut(stripped, "/")
stripped = strings.SplitN(stripped, "/", 2)[0]
}
return normalizeRegistry(stripped)

View File

@ -14,7 +14,6 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/lockfile"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
// defaultShortNameMode is the default mode of registries.conf files if the
@ -309,7 +308,9 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl
// updateWithConfigurationFrom updates c with configuration from updates.
// In case of conflict, updates is preferred.
func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) {
maps.Copy(c.namedAliases, updates.namedAliases)
for name, value := range updates.namedAliases {
c.namedAliases[name] = value
}
}
func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) {

View File

@ -16,7 +16,6 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/regexp"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
// systemRegistriesConfPath is the path to the system-wide registry
@ -1020,9 +1019,12 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
// Go maps have a non-deterministic order when iterating the keys, so
// we dump them in a slice and sort it to enforce some order in
// Registries slice. Some consumers of c/image (e.g., CRI-O) log the
// configuration where a non-deterministic order could easily cause
// the configuration where a non-deterministic order could easily cause
// confusion.
prefixes := maps.Keys(registryMap)
prefixes := []string{}
for prefix := range registryMap {
prefixes = append(prefixes, prefix)
}
sort.Strings(prefixes)
c.partialV2.Registries = []Registry{}

View File

@ -12,7 +12,6 @@ import (
"time"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc
@ -81,9 +80,12 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
}
func hasFile(files []os.DirEntry, name string) bool {
return slices.ContainsFunc(files, func(f os.DirEntry) bool {
return f.Name() == name
})
for _, f := range files {
if f.Name() == name {
return true
}
}
return false
}
// NewTransport Creates a default transport
@ -91,13 +93,14 @@ func NewTransport() *http.Transport {
direct := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: direct.DialContext,
TLSHandshakeTimeout: 10 * time.Second,
IdleConnTimeout: 90 * time.Second,
MaxIdleConns: 100,
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
DisableKeepAlives: true,
}
return tr
}

View File

@ -87,7 +87,7 @@ func (ref sifReference) Transport() types.ImageTransport {
// StringWithinTransport returns a string representation of the reference, which MUST be such that
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName().
func (ref sifReference) StringWithinTransport() string {

View File

@ -12,7 +12,6 @@ import (
"github.com/containers/image/v5/signature/internal"
"github.com/sigstore/fulcio/pkg/certificate"
"github.com/sigstore/sigstore/pkg/cryptoutils"
"golang.org/x/exp/slices"
)
// fulcioTrustRoot contains policy allow validating Fulcio-issued certificates.
@ -105,12 +104,12 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
// log of approved Fulcio invocations, and its not clear where that would come from, especially human users manually
// logging in using OpenID are not going to maintain a record of those actions.
//
// Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicious signatures
// Also, the SCT does not help reveal _what_ was maliciously signed, nor does it protect against malicous signatures
// by correctly-issued certificates.
//
// So, pragmatically, the ideal design seem to be to only do signatures from a trusted build system (which is, by definition,
// the arbiter of desired vs. malicious signatures) that maintains an audit log of performed signature operations; and that seems to
// make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary.
// make make the SCT (and all of Rekor apart from the trusted timestamp) unnecessary.
// == Validate the recorded OIDC issuer
gotOIDCIssuer := false
@ -137,7 +136,15 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time,
}
// == Validate the OIDC subject
if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) {
foundEmail := false
// TO DO: Use slices.Contains after we update to Go 1.18
for _, certEmail := range untrustedCertificate.EmailAddresses {
if certEmail == f.subjectEmail {
foundEmail = true
break
}
}
if !foundEmail {
return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)",
f.subjectEmail,
untrustedCertificate.EmailAddresses))

View File

@ -5,8 +5,6 @@ import (
"encoding/json"
"fmt"
"io"
"github.com/containers/image/v5/internal/set"
)
// JSONFormatError is returned when JSON does not match expected format.
@ -22,8 +20,8 @@ func (err JSONFormatError) Error() string {
//
// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
// we could use reflection to automate this. Later?
func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) error {
seenKeys := set.New[string]()
func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
seenKeys := map[string]struct{}{}
dec := json.NewDecoder(bytes.NewReader(data))
t, err := dec.Token()
@ -47,10 +45,10 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er
// Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
}
if seenKeys.Contains(key) {
if _, ok := seenKeys[key]; ok {
return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
}
seenKeys.Add(key)
seenKeys[key] = struct{}{}
valuePtr := fieldResolver(key)
if valuePtr == nil {
@ -70,11 +68,11 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er
// ParanoidUnmarshalJSONObjectExactFields unmarshals data as a JSON object, but failing on the slightest unexpected aspect
// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
// must be present exactly once, and none other fields are accepted.
func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]any) error {
seenKeys := set.New[string]()
if err := ParanoidUnmarshalJSONObject(data, func(key string) any {
func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
seenKeys := map[string]struct{}{}
if err := ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
if valuePtr, ok := exactFields[key]; ok {
seenKeys.Add(key)
seenKeys[key] = struct{}{}
return valuePtr
}
return nil
@ -82,7 +80,7 @@ func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]
return err
}
for key := range exactFields {
if !seenKeys.Contains(key) {
if _, ok := seenKeys[key]; !ok {
return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
}
}

View File

@ -28,7 +28,7 @@ type UntrustedRekorSET struct {
}
type UntrustedRekorPayload struct {
Body []byte // In cosign, this is an any, but only a string works
Body []byte // In cosign, this is an interface{}, but only a string works
IntegratedTime int64
LogIndex int64
LogID string
@ -51,7 +51,7 @@ func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *UntrustedRekorSET) strictUnmarshalJSON(data []byte) error {
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"SignedEntryTimestamp": &s.UntrustedSignedEntryTimestamp,
"Payload": &s.UntrustedPayload,
})
@ -63,7 +63,7 @@ var _ json.Marshaler = (*UntrustedRekorSET)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s UntrustedRekorSET) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]any{
return json.Marshal(map[string]interface{}{
"SignedEntryTimestamp": s.UntrustedSignedEntryTimestamp,
"Payload": s.UntrustedPayload,
})
@ -86,7 +86,7 @@ func (p *UntrustedRekorPayload) UnmarshalJSON(data []byte) error {
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal JSONFormatError error type.
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
func (p *UntrustedRekorPayload) strictUnmarshalJSON(data []byte) error {
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
return ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"body": &p.Body,
"integratedTime": &p.IntegratedTime,
"logIndex": &p.LogIndex,
@ -100,7 +100,7 @@ var _ json.Marshaler = (*UntrustedRekorPayload)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]any{
return json.Marshal(map[string]interface{}{
"body": p.Body,
"integratedTime": p.IntegratedTime,
"logIndex": p.LogIndex,
@ -159,7 +159,7 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver
}
hashedRekordV001Bytes, err := json.Marshal(hashedRekord.Spec)
if err != nil {
// Coverage: hashedRekord.Spec is an any that was just unmarshaled,
// Coverage: hashedRekord.Spec is an interface{} that was just unmarshaled,
// so this should never fail.
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("re-creating hashedrekord spec: %v", err))
}

View File

@ -21,14 +21,14 @@ const (
// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature)
type UntrustedSigstorePayload struct {
untrustedDockerManifestDigest digest.Digest
untrustedDockerReference string // FIXME: more precise type?
untrustedCreatorID *string
UntrustedDockerManifestDigest digest.Digest
UntrustedDockerReference string // FIXME: more precise type?
UntrustedCreatorID *string
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
// we would add another field, UntrustedTimestampNS int64.
untrustedTimestamp *int64
UntrustedTimestamp *int64
}
// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with
@ -39,10 +39,10 @@ func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerRefer
creatorID := "containers/image " + version.Version
timestamp := time.Now().Unix()
return UntrustedSigstorePayload{
untrustedDockerManifestDigest: dockerManifestDigest,
untrustedDockerReference: dockerReference,
untrustedCreatorID: &creatorID,
untrustedTimestamp: &timestamp,
UntrustedDockerManifestDigest: dockerManifestDigest,
UntrustedDockerReference: dockerReference,
UntrustedCreatorID: &creatorID,
UntrustedTimestamp: &timestamp,
}
}
@ -52,22 +52,22 @@ var _ json.Marshaler = (*UntrustedSigstorePayload)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) {
if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
return nil, errors.New("Unexpected empty signature content")
}
critical := map[string]any{
critical := map[string]interface{}{
"type": sigstoreSignatureType,
"image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.untrustedDockerReference},
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
}
optional := map[string]any{}
if s.untrustedCreatorID != nil {
optional["creator"] = *s.untrustedCreatorID
optional := map[string]interface{}{}
if s.UntrustedCreatorID != nil {
optional["creator"] = *s.UntrustedCreatorID
}
if s.untrustedTimestamp != nil {
optional["timestamp"] = *s.untrustedTimestamp
if s.UntrustedTimestamp != nil {
optional["timestamp"] = *s.UntrustedTimestamp
}
signature := map[string]any{
signature := map[string]interface{}{
"critical": critical,
"optional": optional,
}
@ -92,7 +92,7 @@ func (s *UntrustedSigstorePayload) UnmarshalJSON(data []byte) error {
// Splitting it into a separate function allows us to do the JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
var critical, optional json.RawMessage
if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
if err := ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"critical": &critical,
"optional": &optional,
}); err != nil {
@ -104,7 +104,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
var gotCreatorID, gotTimestamp = false, false
// /usr/bin/cosign generates "optional": null if there are no user-specified annotations.
if !bytes.Equal(optional, []byte("null")) {
if err := ParanoidUnmarshalJSONObject(optional, func(key string) any {
if err := ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
switch key {
case "creator":
gotCreatorID = true
@ -113,7 +113,7 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
gotTimestamp = true
return &timestamp
default:
var ignore any
var ignore interface{}
return &ignore
}
}); err != nil {
@ -121,19 +121,19 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
}
}
if gotCreatorID {
s.untrustedCreatorID = &creatorID
s.UntrustedCreatorID = &creatorID
}
if gotTimestamp {
intTimestamp := int64(timestamp)
if float64(intTimestamp) != timestamp {
return NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
}
s.untrustedTimestamp = &intTimestamp
s.UntrustedTimestamp = &intTimestamp
}
var t string
var image, identity json.RawMessage
if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
if err := ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
"type": &t,
"image": &image,
"identity": &identity,
@ -145,15 +145,15 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
}
var digestString string
if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
if err := ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
"docker-manifest-digest": &digestString,
}); err != nil {
return err
}
s.untrustedDockerManifestDigest = digest.Digest(digestString)
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
"docker-reference": &s.untrustedDockerReference,
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
"docker-reference": &s.UntrustedDockerReference,
})
}
@ -191,10 +191,10 @@ func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte,
if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil {
return nil, NewInvalidSignatureError(err.Error())
}
if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil {
if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.UntrustedDockerManifestDigest); err != nil {
return nil, err
}
if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil {
if err := rules.ValidateSignedDockerReference(unmatchedPayload.UntrustedDockerReference); err != nil {
return nil, err
}
// SigstorePayloadAcceptanceRules have accepted this value.

View File

@ -104,7 +104,7 @@ var _ json.Unmarshaler = (*Policy)(nil)
func (p *Policy) UnmarshalJSON(data []byte) error {
*p = Policy{}
transports := policyTransportsMap{}
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "default":
return &p.Default
@ -135,7 +135,7 @@ func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyTransportScopes{}
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
// transport can be nil
transport := transports.Get(key)
// internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
@ -181,7 +181,7 @@ func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
// We can't unmarshal directly into map values because it is not possible to take an address of a map value.
// So, use a temporary map of pointers-to-slices and convert.
tmpMap := map[string]*PolicyRequirements{}
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
// internal.ParanoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
if _, ok := tmpMap[key]; ok {
return nil
@ -271,7 +271,7 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
*pr = prInsecureAcceptAnything{}
var tmp prInsecureAcceptAnything
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@ -301,7 +301,7 @@ var _ json.Unmarshaler = (*prReject)(nil)
func (pr *prReject) UnmarshalJSON(data []byte) error {
*pr = prReject{}
var tmp prReject
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@ -384,7 +384,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
var tmp prSignedBy
var gotKeyPath, gotKeyPaths, gotKeyData = false, false, false
var signedIdentity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
@ -495,7 +495,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
*pr = prSignedBaseLayer{}
var tmp prSignedBaseLayer
var baseLayerIdentity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"baseLayerIdentity": &baseLayerIdentity,
}); err != nil {
@ -564,7 +564,7 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil)
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchExact{}
var tmp prmMatchExact
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@ -594,7 +594,7 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepoDigestOrExact{}
var tmp prmMatchRepoDigestOrExact
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@ -624,7 +624,7 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil)
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepository{}
var tmp prmMatchRepository
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
}); err != nil {
return err
@ -664,7 +664,7 @@ var _ json.Unmarshaler = (*prmExactReference)(nil)
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
*prm = prmExactReference{}
var tmp prmExactReference
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"dockerReference": &tmp.DockerReference,
}); err != nil {
@ -706,7 +706,7 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil)
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
*prm = prmExactRepository{}
var tmp prmExactRepository
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"dockerRepository": &tmp.DockerRepository,
}); err != nil {
@ -778,7 +778,7 @@ var _ json.Unmarshaler = (*prmRemapIdentity)(nil)
func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error {
*prm = prmRemapIdentity{}
var tmp prmRemapIdentity
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"type": &tmp.Type,
"prefix": &tmp.Prefix,
"signedPrefix": &tmp.SignedPrefix,

View File

@ -147,7 +147,7 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error {
var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool
var fulcio prSigstoreSignedFulcio
var signedIdentity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
@ -298,7 +298,7 @@ func (f *prSigstoreSignedFulcio) UnmarshalJSON(data []byte) error {
*f = prSigstoreSignedFulcio{}
var tmp prSigstoreSignedFulcio
var gotCAPath, gotCAData, gotOIDCIssuer, gotSubjectEmail bool // = false...
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any {
if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "caPath":
gotCAPath = true

View File

@ -46,7 +46,7 @@ type PolicyRequirement interface {
// - sarRejected if the signature has not been verified;
// in that case error must be non-nil, and should be an PolicyRequirementError if evaluation
// succeeded but the result was rejection.
// - sarUnknown if this PolicyRequirement does not deal with signatures.
// - sarUnknown if if this PolicyRequirement does not deal with signatures.
// NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
// Returning sarUnknown and a non-nil error value is invalid.
// WARNING: This makes the signature contents acceptable for further processing,

View File

@ -12,7 +12,6 @@ import (
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/manifest"
digest "github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
@ -68,8 +67,10 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva
signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
if slices.Contains(trustedIdentities, keyIdentity) {
return nil
for _, trustedIdentity := range trustedIdentities {
if keyIdentity == trustedIdentity {
return nil
}
}
// Coverage: We use a private GPG home directory and only import trusted keys, so this should
// not be reachable.

View File

@ -33,12 +33,12 @@ import (
// limitations under the License.
const (
// from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType.
// from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType
sigstorePrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY"
)
// from sigstore/cosign/pkg/cosign.loadPrivateKey
// FIXME: Do we need all of these key formats?
// FIXME: Do we need all of these key formats, and all of those
func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) {
// Decrypt first
p, _ := pem.Decode(key)

View File

@ -31,14 +31,14 @@ type Signature struct {
// untrustedSignature is a parsed content of a signature.
type untrustedSignature struct {
untrustedDockerManifestDigest digest.Digest
untrustedDockerReference string // FIXME: more precise type?
untrustedCreatorID *string
UntrustedDockerManifestDigest digest.Digest
UntrustedDockerReference string // FIXME: more precise type?
UntrustedCreatorID *string
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
// we would add another field, UntrustedTimestampNS int64.
untrustedTimestamp *int64
UntrustedTimestamp *int64
}
// UntrustedSignatureInformation is information available in an untrusted signature.
@ -65,10 +65,10 @@ func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference s
creatorID := "atomic " + version.Version
timestamp := time.Now().Unix()
return untrustedSignature{
untrustedDockerManifestDigest: dockerManifestDigest,
untrustedDockerReference: dockerReference,
untrustedCreatorID: &creatorID,
untrustedTimestamp: &timestamp,
UntrustedDockerManifestDigest: dockerManifestDigest,
UntrustedDockerReference: dockerReference,
UntrustedCreatorID: &creatorID,
UntrustedTimestamp: &timestamp,
}
}
@ -78,22 +78,22 @@ var _ json.Marshaler = (*untrustedSignature)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s untrustedSignature) MarshalJSON() ([]byte, error) {
if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
return nil, errors.New("Unexpected empty signature content")
}
critical := map[string]any{
critical := map[string]interface{}{
"type": signatureType,
"image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.untrustedDockerReference},
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
}
optional := map[string]any{}
if s.untrustedCreatorID != nil {
optional["creator"] = *s.untrustedCreatorID
optional := map[string]interface{}{}
if s.UntrustedCreatorID != nil {
optional["creator"] = *s.UntrustedCreatorID
}
if s.untrustedTimestamp != nil {
optional["timestamp"] = *s.untrustedTimestamp
if s.UntrustedTimestamp != nil {
optional["timestamp"] = *s.UntrustedTimestamp
}
signature := map[string]any{
signature := map[string]interface{}{
"critical": critical,
"optional": optional,
}
@ -118,7 +118,7 @@ func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
// Splitting it into a separate function allows us to do the internal.JSONFormatError → InvalidSignatureError in a single place, the caller.
func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var critical, optional json.RawMessage
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
"critical": &critical,
"optional": &optional,
}); err != nil {
@ -128,7 +128,7 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
var creatorID string
var timestamp float64
var gotCreatorID, gotTimestamp = false, false
if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) any {
if err := internal.ParanoidUnmarshalJSONObject(optional, func(key string) interface{} {
switch key {
case "creator":
gotCreatorID = true
@ -137,26 +137,26 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
gotTimestamp = true
return &timestamp
default:
var ignore any
var ignore interface{}
return &ignore
}
}); err != nil {
return err
}
if gotCreatorID {
s.untrustedCreatorID = &creatorID
s.UntrustedCreatorID = &creatorID
}
if gotTimestamp {
intTimestamp := int64(timestamp)
if float64(intTimestamp) != timestamp {
return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
}
s.untrustedTimestamp = &intTimestamp
s.UntrustedTimestamp = &intTimestamp
}
var t string
var image, identity json.RawMessage
if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
"type": &t,
"image": &image,
"identity": &identity,
@ -168,15 +168,15 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
}
var digestString string
if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]any{
if err := internal.ParanoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
"docker-manifest-digest": &digestString,
}); err != nil {
return err
}
s.untrustedDockerManifestDigest = digest.Digest(digestString)
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
"docker-reference": &s.untrustedDockerReference,
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
"docker-reference": &s.UntrustedDockerReference,
})
}
@ -229,16 +229,16 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
return nil, internal.NewInvalidSignatureError(err.Error())
}
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil {
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
return nil, err
}
if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil {
if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil {
return nil, err
}
// signatureAcceptanceRules have accepted this value.
return &Signature{
DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest,
DockerReference: unmatchedSignature.untrustedDockerReference,
DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest,
DockerReference: unmatchedSignature.UntrustedDockerReference,
}, nil
}
@ -269,14 +269,14 @@ func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []
}
var timestamp *time.Time // = nil
if untrustedDecodedContents.untrustedTimestamp != nil {
ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0)
if untrustedDecodedContents.UntrustedTimestamp != nil {
ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0)
timestamp = &ts
}
return &UntrustedSignatureInformation{
UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest,
UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference,
UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID,
UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest,
UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference,
UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID,
UntrustedTimestamp: timestamp,
UntrustedShortKeyIdentifier: shortKeyIdentifier,
}, nil

View File

@ -21,7 +21,6 @@ import (
"github.com/containers/image/v5/internal/imagedestination/stubs"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/putblobdigest"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/signature"
"github.com/containers/image/v5/internal/tmpdir"
"github.com/containers/image/v5/manifest"
@ -35,7 +34,6 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
var (
@ -77,19 +75,13 @@ type storageImageDestination struct {
indexToStorageID map[int]*string
// All accesses to below data are protected by `lock` which is made
// *explicit* in the code.
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
}
// addedLayerInfo records data about a layer to use in this image.
type addedLayerInfo struct {
digest digest.Digest
emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
}
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
@ -117,18 +109,18 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
HasThreadSafePutBlob: true,
}),
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
fileSizes: make(map[digest.Digest]int64),
filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
indexToStorageID: make(map[int]*string),
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
fileSizes: make(map[digest.Digest]int64),
filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
indexToStorageID: make(map[int]*string),
indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo),
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
}
dest.Compat = impl.AddCompat(dest)
return dest, nil
@ -164,8 +156,8 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
info, err := s.putBlobToPendingFile(stream, blobinfo, &options)
func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
info, err := s.putBlobToPendingFile(ctx, stream, blobinfo, &options)
if err != nil {
return info, err
}
@ -174,20 +166,21 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
return info, nil
}
return info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
digest: info.Digest,
emptyLayer: options.EmptyLayer,
})
return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
}
// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (private.UploadedBlob, error) {
func (s *storageImageDestination) putBlobToPendingFile(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
// Stores a layer or data blob in our temporary directory, checking that any information
// in the blobinfo matches the incoming data.
errorBlobInfo := types.BlobInfo{
Digest: "",
Size: -1,
}
if blobinfo.Digest != "" {
if err := blobinfo.Digest.Validate(); err != nil {
return private.UploadedBlob{}, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
}
}
@ -195,7 +188,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
filename := s.computeNextBlobCacheFile()
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err)
return errorBlobInfo, fmt.Errorf("creating temporary file %q: %w", filename, err)
}
defer file.Close()
counter := ioutils.NewWriteCounter(file)
@ -203,16 +196,16 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
decompressed, err := archive.DecompressStream(stream)
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err)
return errorBlobInfo, fmt.Errorf("setting up to decompress blob: %w", err)
}
diffID := digest.Canonical.Digester()
// Copy the data to the file.
// TODO: This can take quite some time, and should ideally be cancellable using context.Context.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
_, err = io.Copy(diffID.Hash(), decompressed)
decompressed.Close()
if err != nil {
return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err)
return errorBlobInfo, fmt.Errorf("storing blob to file %q: %w", filename, err)
}
// Determine blob properties, and fail if information that we were given about the blob
@ -222,7 +215,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
if blobSize < 0 {
blobSize = counter.Count
} else if blobinfo.Size != counter.Count {
return private.UploadedBlob{}, ErrBlobSizeMismatch
return errorBlobInfo, ErrBlobSizeMismatch
}
// Record information about the blob.
@ -234,9 +227,10 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
// This is safe because we have just computed diffID, and blobDigest was either computed
// by us, or validated by the caller (usually copy.digestingReader).
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
return private.UploadedBlob{
Digest: blobDigest,
Size: blobSize,
return types.BlobInfo{
Digest: blobDigest,
Size: blobSize,
MediaType: blobinfo.MediaType,
}, nil
}
@ -248,7 +242,7 @@ type zstdFetcher struct {
// GetBlobAt converts from chunked.GetBlobAt to BlobChunkAccessor.GetBlobAt.
func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
newChunks := make([]private.ImageSourceChunk, 0, len(chunks))
var newChunks []private.ImageSourceChunk
for _, v := range chunks {
i := private.ImageSourceChunk{
Offset: v.Offset,
@ -269,7 +263,7 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
fetcher := zstdFetcher{
chunkAccessor: chunkAccessor,
ctx: ctx,
@ -278,12 +272,12 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
if err != nil {
return private.UploadedBlob{}, err
return srcInfo, err
}
out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
if err != nil {
return private.UploadedBlob{}, err
return srcInfo, err
}
blobDigest := srcInfo.Digest
@ -295,126 +289,124 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
s.diffOutputs[blobDigest] = out
s.lock.Unlock()
return private.UploadedBlob{
Digest: blobDigest,
Size: srcInfo.Size,
}, nil
return srcInfo, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil).
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options)
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
reused, info, err := s.tryReusingBlobAsPending(ctx, blobinfo, &options)
if err != nil || !reused || options.LayerIndex == nil {
return reused, info, err
}
return reused, info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
digest: info.Digest,
emptyLayer: options.EmptyLayer,
})
return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
}
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata.
// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
// lock the entire method as it executes fairly quickly
s.lock.Lock()
defer s.lock.Unlock()
if options.SrcRef != nil {
// Check if we have the layer in the underlying additional layer store.
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String())
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String())
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err)
return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobinfo.Digest, err)
} else if err == nil {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[digest] = aLayer.UncompressedDigest()
s.blobAdditionalLayer[digest] = aLayer
return true, private.ReusedBlob{
Digest: digest,
Size: aLayer.CompressedSize(),
s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
s.blobAdditionalLayer[blobinfo.Digest] = aLayer
return true, types.BlobInfo{
Digest: blobinfo.Digest,
Size: aLayer.CompressedSize(),
MediaType: blobinfo.MediaType,
}, nil
}
}
if digest == "" {
return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
if blobinfo.Digest == "" {
return false, types.BlobInfo{}, errors.New(`Can not check for a blob with unknown digest`)
}
if err := digest.Validate(); err != nil {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
if err := blobinfo.Digest.Validate(); err != nil {
return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
}
// Check if we've already cached it in a file.
if size, ok := s.fileSizes[digest]; ok {
return true, private.ReusedBlob{
Digest: digest,
Size: size,
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
return true, types.BlobInfo{
Digest: blobinfo.Digest,
Size: size,
MediaType: blobinfo.MediaType,
}, nil
}
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err)
return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobinfo.Digest, err)
}
if len(layers) > 0 {
// Save this for completeness.
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: layers[0].UncompressedSize,
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
return true, types.BlobInfo{
Digest: blobinfo.Digest,
Size: layers[0].UncompressedSize,
MediaType: blobinfo.MediaType,
}, nil
}
// Check if we have a was-compressed layer in storage that's based on that blob.
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest)
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err)
return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobinfo.Digest, err)
}
if len(layers) > 0 {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: layers[0].CompressedSize,
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
return true, types.BlobInfo{
Digest: blobinfo.Digest,
Size: layers[0].CompressedSize,
MediaType: blobinfo.MediaType,
}, nil
}
// Does the blob correspond to a known DiffID which we already have available?
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
// uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
if options.CanSubstitute || size != -1 {
if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest {
if options.CanSubstitute || blobinfo.Size != -1 {
if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
}
if len(layers) > 0 {
if size != -1 {
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: size,
}, nil
if blobinfo.Size != -1 {
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
return true, blobinfo, nil
}
if !options.CanSubstitute {
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest)
return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo)
}
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: uncompressedDigest,
Size: layers[0].UncompressedSize,
return true, types.BlobInfo{
Digest: uncompressedDigest,
Size: layers[0].UncompressedSize,
MediaType: blobinfo.MediaType,
}, nil
}
}
}
// Nope, we don't have it.
return false, private.ReusedBlob{}, nil
return false, types.BlobInfo{}, nil
}
// computeID computes a recommended image ID based on information we have so far. If
@ -476,10 +468,10 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er
return nil, errors.New("blob not found")
}
// queueOrCommit queues the specified layer to be committed to the storage.
// queueOrCommit queues in the specified blob to be committed to the storage.
// If no other goroutine is already committing layers, the layer and all
// subsequent layers (if already queued) will be committed to the storage.
func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) error {
func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error {
// NOTE: whenever the code below is touched, make sure that all code
// paths unlock the lock and to unlock it exactly once.
//
@ -499,7 +491,10 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
// caller is the "worker" routine committing layers. All other routines
// can continue pulling and queuing in layers.
s.lock.Lock()
s.indexToAddedLayerInfo[index] = info
s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{
BlobInfo: blob,
EmptyLayer: emptyLayer,
}
// We're still waiting for at least one previous/parent layer to be
// committed, so there's nothing to do.
@ -508,14 +503,10 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
return nil
}
for {
info, ok := s.indexToAddedLayerInfo[index]
if !ok {
break
}
for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] {
s.lock.Unlock()
// Note: commitLayer locks on-demand.
if err := s.commitLayer(index, info, -1); err != nil {
if err := s.commitLayer(ctx, *info, index); err != nil {
return err
}
s.lock.Lock()
@ -529,15 +520,13 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
return nil
}
// commitLayer commits the specified layer with the given index to the storage.
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
//
// commitLayer commits the specified blob with the given index to the storage.
// Note that the previous layer is expected to already be committed.
//
// Caution: this function must be called without holding `s.lock`. Callers
// must guarantee that, at any given time, at most one goroutine may execute
// `commitLayer()`.
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error {
func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error {
// Already committed? Return early.
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
return nil
@ -552,7 +541,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
}
// Carry over the previous ID for empty non-base layers.
if info.emptyLayer {
if blob.EmptyLayer {
s.indexToStorageID[index] = &lastLayer
return nil
}
@ -560,7 +549,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
// Check if there's already a layer with the ID that we'd give to the result of applying
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
s.lock.Lock()
diffID, haveDiffID := s.blobDiffIDs[info.digest]
diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
s.lock.Unlock()
if !haveDiffID {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
@ -569,21 +558,18 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
// that relies on using a blob digest that has never been seen by the store had better call
// TryReusingBlob; not calling PutBlob already violates the documented API, so theres only
// so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", info.digest)
// Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
Cache: none.NoCache,
CanSubstitute: false,
})
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
// NOTE: use `TryReusingBlob` to prevent recursion.
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
if err != nil {
return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
return fmt.Errorf("checking for a layer based on blob %q: %w", blob.Digest.String(), err)
}
if !has {
return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
return fmt.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
}
diffID, haveDiffID = s.blobDiffIDs[info.digest]
diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
if !haveDiffID {
return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String())
return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
}
}
id := diffID.Hex()
@ -598,7 +584,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
}
s.lock.Lock()
diffOutput, ok := s.diffOutputs[info.digest]
diffOutput, ok := s.diffOutputs[blob.Digest]
s.lock.Unlock()
if ok {
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
@ -607,7 +593,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
}
// FIXME: what to do with the uncompressed digest?
diffOutput.UncompressedDigest = info.digest
diffOutput.UncompressedDigest = blob.Digest
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
_ = s.imageRef.transport.store.Delete(layer.ID)
@ -619,7 +605,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
}
s.lock.Lock()
al, ok := s.blobAdditionalLayer[info.digest]
al, ok := s.blobAdditionalLayer[blob.Digest]
s.lock.Unlock()
if ok {
layer, err := al.PutAs(id, lastLayer, nil)
@ -634,7 +620,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
// Check if we previously cached a file with that blob's contents. If we didn't,
// then we need to read the desired contents from a layer.
s.lock.Lock()
filename, ok := s.filenames[info.digest]
filename, ok := s.filenames[blob.Digest]
s.lock.Unlock()
if !ok {
// Try to find the layer with contents matching that blobsum.
@ -643,13 +629,13 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
if err2 == nil && len(layers) > 0 {
layer = layers[0].ID
} else {
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest)
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
if err2 == nil && len(layers) > 0 {
layer = layers[0].ID
}
}
if layer == "" {
return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
return fmt.Errorf("locating layer for blob %q: %w", blob.Digest, err2)
}
// Read the layer's contents.
noCompression := archive.Uncompressed
@ -658,7 +644,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
}
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
if err2 != nil {
return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
return fmt.Errorf("reading layer %q for blob %q: %w", layer, blob.Digest, err2)
}
// Copy the layer diff to a file. Diff() takes a lock that it holds
// until the ReadCloser that it returns is closed, and PutLayer() wants
@ -682,7 +668,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
// Make sure that we can find this file later, should we need the layer's
// contents again.
s.lock.Lock()
s.filenames[info.digest] = filename
s.filenames[blob.Digest] = filename
s.lock.Unlock()
}
// Read the cached blob and use it as a diff.
@ -694,11 +680,11 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
// Build the new layer using the diff, regardless of where it came from.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
OriginalDigest: info.digest,
OriginalDigest: blob.Digest,
UncompressedDigest: diffID,
}, file)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
return fmt.Errorf("adding layer with blob %q: %w", blob.Digest, err)
}
s.indexToStorageID[index] = &layer.ID
@ -749,10 +735,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
// Extract, commit, or find the layers.
for i, blob := range layerBlobs {
if err := s.commitLayer(i, addedLayerInfo{
digest: blob.Digest,
emptyLayer: blob.EmptyLayer,
}, blob.Size); err != nil {
if err := s.commitLayer(ctx, blob, i); err != nil {
return err
}
}
@ -812,14 +795,14 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
// Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
// we just need to screen out the ones that are actually layers to get the list of non-layers.
dataBlobs := set.New[digest.Digest]()
dataBlobs := make(map[digest.Digest]struct{})
for blob := range s.filenames {
dataBlobs.Add(blob)
dataBlobs[blob] = struct{}{}
}
for _, layerBlob := range layerBlobs {
dataBlobs.Delete(layerBlob.Digest)
delete(dataBlobs, layerBlob.Digest)
}
for _, blob := range dataBlobs.Values() {
for blob := range dataBlobs {
v, err := os.ReadFile(s.filenames[blob])
if err != nil {
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
@ -900,7 +883,9 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob
if err != nil {
return err
}
s.manifest = slices.Clone(manifestBlob)
newBlob := make([]byte, len(manifestBlob))
copy(newBlob, manifestBlob)
s.manifest = newBlob
s.manifestDigest = digest
return nil
}

View File

@ -43,7 +43,7 @@ func (s *storageImageCloser) Size() (int64, error) {
// newImage creates an image that also knows its size
func newImage(ctx context.Context, sys *types.SystemContext, s storageReference) (types.ImageCloser, error) {
src, err := newImageSource(sys, s)
src, err := newImageSource(ctx, sys, s)
if err != nil {
return nil, err
}

View File

@ -14,7 +14,6 @@ import (
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
@ -53,15 +52,17 @@ func newReference(transport storageTransport, named reference.Named, id string)
// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref
func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
repo := ref.Name()
return slices.ContainsFunc(image.Names, func(name string) bool {
if named, err := reference.ParseNormalizedNamed(name); err == nil && named.Name() == repo {
return true
for _, name := range image.Names {
if named, err := reference.ParseNormalizedNamed(name); err == nil {
if named.Name() == repo {
return true
}
}
return false
})
}
return false
}
// multiArchImageMatchesSystemContext returns true if the passed-in image both contains a
// multiArchImageMatchesSystemContext returns true if if the passed-in image both contains a
// multi-arch manifest that matches the passed-in digest, and the image is the per-platform
// image instance that matches sys.
//
@ -169,9 +170,11 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
// sake of older consumers that don't know there's a whole list in there now.
if s.named != nil {
if digested, ok := s.named.(reference.Digested); ok {
digest := digested.Digest()
if slices.Contains(loadedImage.Digests, digest) {
loadedImage.Digest = digest
for _, digest := range loadedImage.Digests {
if digest == digested.Digest() {
loadedImage.Digest = digest
break
}
}
}
}
@ -204,10 +207,10 @@ func (s storageReference) StringWithinTransport() string {
}
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
if s.named != nil {
res += s.named.String()
res = res + s.named.String()
}
if s.id != "" {
res += "@" + s.id
res = res + "@" + s.id
}
return res
}
@ -215,10 +218,10 @@ func (s storageReference) StringWithinTransport() string {
func (s storageReference) PolicyConfigurationIdentity() string {
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
if s.named != nil {
res += s.named.String()
res = res + s.named.String()
}
if s.id != "" {
res += "@" + s.id
res = res + "@" + s.id
}
return res
}
@ -277,7 +280,7 @@ func (s storageReference) DeleteImage(ctx context.Context, sys *types.SystemCont
}
func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
return newImageSource(sys, s)
return newImageSource(ctx, sys, s)
}
func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {

View File

@ -44,7 +44,7 @@ type storageImageSource struct {
}
// newImageSource sets up an image for reading.
func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
func newImageSource(ctx context.Context, sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
// First, locate the image.
img, err := imageRef.resolveImage(sys)
if err != nil {
@ -186,7 +186,7 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st
}
// GetManifest() reads the image's manifest.
func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, mimeType string, err error) {
func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
if instanceDigest != nil {
key := manifestBigDataKey(*instanceDigest)
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)

View File

@ -234,30 +234,35 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
reference = reference[closeIndex+1:]
// Peel off a "driver@" from the start.
driverInfo := ""
driverPart1, driverPart2, gotDriver := strings.Cut(storeSpec, "@")
if !gotDriver {
storeSpec = driverPart1
driverSplit := strings.SplitN(storeSpec, "@", 2)
if len(driverSplit) != 2 {
if storeSpec == "" {
return nil, ErrInvalidReference
}
} else {
driverInfo = driverPart1
driverInfo = driverSplit[0]
if driverInfo == "" {
return nil, ErrInvalidReference
}
storeSpec = driverPart2
storeSpec = driverSplit[1]
if storeSpec == "" {
return nil, ErrInvalidReference
}
}
// Peel off a ":options" from the end.
var options []string
storeSpec, optionsPart, gotOptions := strings.Cut(storeSpec, ":")
if gotOptions {
options = strings.Split(optionsPart, ",")
optionsSplit := strings.SplitN(storeSpec, ":", 2)
if len(optionsSplit) == 2 {
options = strings.Split(optionsSplit[1], ",")
storeSpec = optionsSplit[0]
}
// Peel off a "+runroot" from the new end.
storeSpec, runRootInfo, _ := strings.Cut(storeSpec, "+") // runRootInfo is "" if there is no "+"
runRootInfo := ""
runRootSplit := strings.SplitN(storeSpec, "+", 2)
if len(runRootSplit) == 2 {
runRootInfo = runRootSplit[1]
storeSpec = runRootSplit[0]
}
// The rest is our graph root.
rootInfo := storeSpec
// Check that any paths are absolute paths.

View File

@ -9,8 +9,8 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
)
// ConfigUpdater is an interface that ImageReferences for "tarball" images also
@ -35,7 +35,9 @@ func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[
if r.annotations == nil {
r.annotations = make(map[string]string)
}
maps.Copy(r.annotations, annotations)
for k, v := range annotations {
r.annotations[k] = v
}
return nil
}
@ -71,7 +73,7 @@ func (r *tarballReference) NewImage(ctx context.Context, sys *types.SystemContex
func (r *tarballReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
for _, filename := range r.filenames {
if err := os.Remove(filename); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("error removing %q: %w", filename, err)
return fmt.Errorf("error removing %q: %v", filename, err)
}
}
return nil

View File

@ -18,8 +18,6 @@ import (
digest "github.com/opencontainers/go-digest"
imgspecs "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
type tarballImageSource struct {
@ -64,13 +62,13 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
} else {
file, err = os.Open(filename)
if err != nil {
return nil, fmt.Errorf("error opening %q for reading: %w", filename, err)
return nil, fmt.Errorf("error opening %q for reading: %v", filename, err)
}
defer file.Close()
reader = file
fileinfo, err := file.Stat()
if err != nil {
return nil, fmt.Errorf("error reading size of %q: %w", filename, err)
return nil, fmt.Errorf("error reading size of %q: %v", filename, err)
}
blobSize = fileinfo.Size()
blobTime = fileinfo.ModTime()
@ -170,6 +168,10 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
MediaType: blobTypes[i],
})
}
annotations := make(map[string]string)
for k, v := range r.annotations {
annotations[k] = v
}
manifest := imgspecv1.Manifest{
Versioned: imgspecs.Versioned{
SchemaVersion: 2,
@ -180,7 +182,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
MediaType: imgspecv1.MediaTypeImageConfig,
},
Layers: layerDescriptors,
Annotations: maps.Clone(r.annotations),
Annotations: annotations,
}
// Encode the manifest.
@ -226,19 +228,20 @@ func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobIn
return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
}
// Maybe one of the layer blobs.
i := slices.Index(is.blobIDs, blobinfo.Digest)
if i == -1 {
return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
for i := range is.blobIDs {
if blobinfo.Digest == is.blobIDs[i] {
// We want to read that layer: open the file or memory block and hand it back.
if is.filenames[i] == "-" {
return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
}
reader, err := os.Open(is.filenames[i])
if err != nil {
return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err)
}
return reader, is.blobSizes[i], nil
}
}
// We want to read that layer: open the file or memory block and hand it back.
if is.filenames[i] == "-" {
return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil
}
reader, err := os.Open(is.filenames[i])
if err != nil {
return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err)
}
return reader, is.blobSizes[i], nil
return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String())
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).

View File

@ -25,24 +25,24 @@ import (
// ParseImageName converts a URL-like image name to a types.ImageReference.
func ParseImageName(imgName string) (types.ImageReference, error) {
// Keep this in sync with TransportFromImageName!
transportName, withinTransport, valid := strings.Cut(imgName, ":")
if !valid {
parts := strings.SplitN(imgName, ":", 2)
if len(parts) != 2 {
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
}
transport := transports.Get(transportName)
transport := transports.Get(parts[0])
if transport == nil {
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName)
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
}
return transport.ParseReference(withinTransport)
return transport.ParseReference(parts[1])
}
// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when
// the transport is unknown or when the input is invalid.
func TransportFromImageName(imageName string) types.ImageTransport {
// Keep this in sync with ParseImageName!
transportName, _, valid := strings.Cut(imageName, ":")
if valid {
return transports.Get(transportName)
parts := strings.SplitN(imageName, ":", 2)
if len(parts) == 2 {
return transports.Get(parts[0])
}
return nil
}

View File

@ -5,7 +5,6 @@ import (
"sort"
"sync"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/types"
)
@ -67,21 +66,22 @@ func Register(t types.ImageTransport) {
// This is the generally recommended way to refer to images in the UI.
//
// NOTE: The returned string is not promised to be equal to the original input to ParseImageName;
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
func ImageName(ref types.ImageReference) string {
return ref.Transport().Name() + ":" + ref.StringWithinTransport()
}
var deprecatedTransports = set.NewWithValues("atomic")
// ListNames returns a list of non deprecated transport names.
// Deprecated transports can be used, but are not presented to users.
func ListNames() []string {
kt.mu.Lock()
defer kt.mu.Unlock()
deprecated := map[string]bool{
"atomic": true,
}
var names []string
for _, transport := range kt.transports {
if !deprecatedTransports.Contains(transport.Name()) {
if !deprecated[transport.Name()] {
names = append(names, transport.Name())
}
}

Some files were not shown because too many files have changed in this diff Show More