Call .Validate() before digest.Hex() / digest.Encoded()

... to prevent panics if the value does not contain a :, or other unexpected
values (e.g. a path traversal).

Don't bother on paths where we computed the digest ourselves, or it is already trusted
for other reasons.

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
Miloslav Trmač 2024-04-17 22:26:46 +02:00
parent fdb044b5b1
commit 04deef6fe6
18 changed files with 210 additions and 59 deletions

View File

@ -49,10 +49,13 @@ type progressBar struct {
// As a convention, most users of progress bars should call mark100PercentComplete on full success; // As a convention, most users of progress bars should call mark100PercentComplete on full success;
// by convention, we don't leave progress bars in partial state when fully done // by convention, we don't leave progress bars in partial state when fully done
// (even if we copied much less data than anticipated). // (even if we copied much less data than anticipated).
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar { func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) (*progressBar, error) {
// shortDigestLen is the length of the digest used for blobs. // shortDigestLen is the length of the digest used for blobs.
const shortDigestLen = 12 const shortDigestLen = 12
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return nil, err
}
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded()) prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column. // Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
maxPrefixLen := len("Copying blob ") + shortDigestLen maxPrefixLen := len("Copying blob ") + shortDigestLen
@ -105,7 +108,7 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
return &progressBar{ return &progressBar{
Bar: bar, Bar: bar,
originalSize: info.Size, originalSize: info.Size,
} }, nil
} }
// printCopyInfo prints a "Copying ..." message on the copier if the output is // printCopyInfo prints a "Copying ..." message on the copier if the output is

View File

@ -606,7 +606,10 @@ func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
progressPool := ic.c.newProgressPool() progressPool := ic.c.newProgressPool()
defer progressPool.Wait() defer progressPool.Wait()
bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done") bar, err := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
if err != nil {
return types.BlobInfo{}, err
}
defer bar.Abort(false) defer bar.Abort(false)
ic.c.printCopyInfo("config", srcInfo) ic.c.printCopyInfo("config", srcInfo)
@ -738,15 +741,21 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
} }
if reused { if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest) logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
func() { // A scope for defer if err := func() error { // A scope for defer
label := "skipped: already exists" label := "skipped: already exists"
if reusedBlob.MatchedByTOCDigest { if reusedBlob.MatchedByTOCDigest {
label = "skipped: already exists (found by TOC)" label = "skipped: already exists (found by TOC)"
} }
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label) bar, err := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label)
if err != nil {
return err
}
defer bar.Abort(false) defer bar.Abort(false)
bar.mark100PercentComplete() bar.mark100PercentComplete()
}() return nil
}(); err != nil {
return types.BlobInfo{}, "", err
}
// Throw an event that the layer has been skipped // Throw an event that the layer has been skipped
if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 { if ic.c.options.Progress != nil && ic.c.options.ProgressInterval > 0 {
@ -765,8 +774,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// Attempt a partial only when the source allows to retrieve a blob partially and // Attempt a partial only when the source allows to retrieve a blob partially and
// the destination has support for it. // the destination has support for it.
if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() { if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer reused, blobInfo, err := func() (bool, types.BlobInfo, error) { // A scope for defer
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done") bar, err := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
if err != nil {
return false, types.BlobInfo{}, err
}
hideProgressBar := true hideProgressBar := true
defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily. defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
bar.Abort(hideProgressBar) bar.Abort(hideProgressBar)
@ -789,18 +801,25 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
bar.mark100PercentComplete() bar.mark100PercentComplete()
hideProgressBar = false hideProgressBar = false
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest) logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob) return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob), nil
} }
logrus.Debugf("Failed to retrieve partial blob: %v", err) logrus.Debugf("Failed to retrieve partial blob: %v", err)
return false, types.BlobInfo{} return false, types.BlobInfo{}, nil
}(); reused { }()
if err != nil {
return types.BlobInfo{}, "", err
}
if reused {
return blobInfo, cachedDiffID, nil return blobInfo, cachedDiffID, nil
} }
} }
// Fallback: copy the layer, computing the diffID if we need to do so // Fallback: copy the layer, computing the diffID if we need to do so
return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done") bar, err := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
if err != nil {
return types.BlobInfo{}, "", err
}
defer bar.Abort(false) defer bar.Abort(false)
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache) srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)

View File

@ -174,7 +174,10 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
} }
} }
blobPath := d.ref.layerPath(blobDigest) blobPath, err := d.ref.layerPath(blobDigest)
if err != nil {
return private.UploadedBlob{}, err
}
// need to explicitly close the file, since a rename won't otherwise not work on Windows // need to explicitly close the file, since a rename won't otherwise not work on Windows
blobFile.Close() blobFile.Close()
explicitClosed = true explicitClosed = true
@ -197,7 +200,10 @@ func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, inf
if info.Digest == "" { if info.Digest == "" {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest") return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
} }
blobPath := d.ref.layerPath(info.Digest) blobPath, err := d.ref.layerPath(info.Digest)
if err != nil {
return false, private.ReusedBlob{}, err
}
finfo, err := os.Stat(blobPath) finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) { if err != nil && os.IsNotExist(err) {
return false, private.ReusedBlob{}, nil return false, private.ReusedBlob{}, nil
@ -217,7 +223,11 @@ func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, inf
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error { func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644) path, err := d.ref.manifestPath(instanceDigest)
if err != nil {
return err
}
return os.WriteFile(path, manifest, 0644)
} }
// PutSignaturesWithFormat writes a set of signatures to the destination. // PutSignaturesWithFormat writes a set of signatures to the destination.
@ -230,7 +240,11 @@ func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signa
if err != nil { if err != nil {
return err return err
} }
if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), blob, 0644); err != nil { path, err := d.ref.signaturePath(i, instanceDigest)
if err != nil {
return err
}
if err := os.WriteFile(path, blob, 0644); err != nil {
return err return err
} }
} }

View File

@ -55,7 +55,11 @@ func (s *dirImageSource) Close() error {
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
m, err := os.ReadFile(s.ref.manifestPath(instanceDigest)) path, err := s.ref.manifestPath(instanceDigest)
if err != nil {
return nil, "", err
}
m, err := os.ReadFile(path)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
@ -66,7 +70,11 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
r, err := os.Open(s.ref.layerPath(info.Digest)) path, err := s.ref.layerPath(info.Digest)
if err != nil {
return nil, -1, err
}
r, err := os.Open(path)
if err != nil { if err != nil {
return nil, -1, err return nil, -1, err
} }
@ -84,7 +92,10 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
signatures := []signature.Signature{} signatures := []signature.Signature{}
for i := 0; ; i++ { for i := 0; ; i++ {
path := s.ref.signaturePath(i, instanceDigest) path, err := s.ref.signaturePath(i, instanceDigest)
if err != nil {
return nil, err
}
sigBlob, err := os.ReadFile(path) sigBlob, err := os.ReadFile(path)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {

View File

@ -64,7 +64,7 @@ func TestGetPutManifest(t *testing.T) {
func TestGetPutBlob(t *testing.T) { func TestGetPutBlob(t *testing.T) {
computedBlob := []byte("test-blob") computedBlob := []byte("test-blob")
providedBlob := []byte("provided-blob") providedBlob := []byte("provided-blob")
providedDigest := digest.Digest("sha256:provided-test-digest") providedDigest := digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
ref, _ := refToTempDir(t) ref, _ := refToTempDir(t)
cache := memory.New() cache := memory.New()
@ -113,12 +113,13 @@ func (fn readerFromFunc) Read(p []byte) (int, error) {
// TestPutBlobDigestFailure simulates behavior on digest verification failure. // TestPutBlobDigestFailure simulates behavior on digest verification failure.
func TestPutBlobDigestFailure(t *testing.T) { func TestPutBlobDigestFailure(t *testing.T) {
const digestErrorString = "Simulated digest error" const digestErrorString = "Simulated digest error"
const blobDigest = digest.Digest("sha256:test-digest") const blobDigest = digest.Digest("sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
ref, _ := refToTempDir(t) ref, _ := refToTempDir(t)
dirRef, ok := ref.(dirReference) dirRef, ok := ref.(dirReference)
require.True(t, ok) require.True(t, ok)
blobPath := dirRef.layerPath(blobDigest) blobPath, err := dirRef.layerPath(blobDigest)
require.NoError(t, err)
cache := memory.New() cache := memory.New()
firstRead := true firstRead := true

View File

@ -161,25 +161,34 @@ func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContex
} }
// manifestPath returns a path for the manifest within a directory using our conventions. // manifestPath returns a path for the manifest within a directory using our conventions.
func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string { func (ref dirReference) manifestPath(instanceDigest *digest.Digest) (string, error) {
if instanceDigest != nil { if instanceDigest != nil {
return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json") if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
return "", err
}
return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json"), nil
} }
return filepath.Join(ref.path, "manifest.json") return filepath.Join(ref.path, "manifest.json"), nil
} }
// layerPath returns a path for a layer tarball within a directory using our conventions. // layerPath returns a path for a layer tarball within a directory using our conventions.
func (ref dirReference) layerPath(digest digest.Digest) string { func (ref dirReference) layerPath(digest digest.Digest) (string, error) {
if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
return "", err
}
// FIXME: Should we keep the digest identification? // FIXME: Should we keep the digest identification?
return filepath.Join(ref.path, digest.Encoded()) return filepath.Join(ref.path, digest.Encoded()), nil
} }
// signaturePath returns a path for a signature within a directory using our conventions. // signaturePath returns a path for a signature within a directory using our conventions.
func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string { func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) (string, error) {
if instanceDigest != nil { if instanceDigest != nil {
return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)) if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
return "", err
}
return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)), nil
} }
return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)), nil
} }
// versionPath returns a path for the version file within a directory using our conventions. // versionPath returns a path for the version file within a directory using our conventions.

View File

@ -197,8 +197,15 @@ func TestReferenceManifestPath(t *testing.T) {
ref, tmpDir := refToTempDir(t) ref, tmpDir := refToTempDir(t)
dirRef, ok := ref.(dirReference) dirRef, ok := ref.(dirReference)
require.True(t, ok) require.True(t, ok)
assert.Equal(t, tmpDir+"/manifest.json", dirRef.manifestPath(nil)) res, err := dirRef.manifestPath(nil)
assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".manifest.json", dirRef.manifestPath(&dhex)) require.NoError(t, err)
assert.Equal(t, tmpDir+"/manifest.json", res)
res, err = dirRef.manifestPath(&dhex)
require.NoError(t, err)
assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".manifest.json", res)
invalidDigest := digest.Digest("sha256:../hello")
_, err = dirRef.manifestPath(&invalidDigest)
assert.Error(t, err)
} }
func TestReferenceLayerPath(t *testing.T) { func TestReferenceLayerPath(t *testing.T) {
@ -207,7 +214,11 @@ func TestReferenceLayerPath(t *testing.T) {
ref, tmpDir := refToTempDir(t) ref, tmpDir := refToTempDir(t)
dirRef, ok := ref.(dirReference) dirRef, ok := ref.(dirReference)
require.True(t, ok) require.True(t, ok)
assert.Equal(t, tmpDir+"/"+hex, dirRef.layerPath("sha256:"+hex)) res, err := dirRef.layerPath("sha256:" + hex)
require.NoError(t, err)
assert.Equal(t, tmpDir+"/"+hex, res)
_, err = dirRef.layerPath(digest.Digest("sha256:../hello"))
assert.Error(t, err)
} }
func TestReferenceSignaturePath(t *testing.T) { func TestReferenceSignaturePath(t *testing.T) {
@ -216,10 +227,21 @@ func TestReferenceSignaturePath(t *testing.T) {
ref, tmpDir := refToTempDir(t) ref, tmpDir := refToTempDir(t)
dirRef, ok := ref.(dirReference) dirRef, ok := ref.(dirReference)
require.True(t, ok) require.True(t, ok)
assert.Equal(t, tmpDir+"/signature-1", dirRef.signaturePath(0, nil)) res, err := dirRef.signaturePath(0, nil)
assert.Equal(t, tmpDir+"/signature-10", dirRef.signaturePath(9, nil)) require.NoError(t, err)
assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-1", dirRef.signaturePath(0, &dhex)) assert.Equal(t, tmpDir+"/signature-1", res)
assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-10", dirRef.signaturePath(9, &dhex)) res, err = dirRef.signaturePath(9, nil)
require.NoError(t, err)
assert.Equal(t, tmpDir+"/signature-10", res)
res, err = dirRef.signaturePath(0, &dhex)
require.NoError(t, err)
assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-1", res)
res, err = dirRef.signaturePath(9, &dhex)
require.NoError(t, err)
assert.Equal(t, tmpDir+"/"+dhex.Encoded()+".signature-10", res)
invalidDigest := digest.Digest("sha256:../hello")
_, err = dirRef.signaturePath(0, &invalidDigest)
assert.Error(t, err)
} }
func TestReferenceVersionPath(t *testing.T) { func TestReferenceVersionPath(t *testing.T) {

View File

@ -624,11 +624,13 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature
// NOTE: Keep this in sync with docs/signature-protocols.md! // NOTE: Keep this in sync with docs/signature-protocols.md!
for i, signature := range signatures { for i, signature := range signatures {
sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
err := d.putOneSignature(sigURL, signature)
if err != nil { if err != nil {
return err return err
} }
if err := d.putOneSignature(sigURL, signature); err != nil {
return err
}
} }
// Remove any other signatures, if present. // Remove any other signatures, if present.
// We stop at the first missing signature; if a previous deleting loop aborted // We stop at the first missing signature; if a previous deleting loop aborted
@ -636,7 +638,10 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature
// is enough for dockerImageSource to stop looking for other signatures, so that // is enough for dockerImageSource to stop looking for other signatures, so that
// is sufficient. // is sufficient.
for i := len(signatures); ; i++ { for i := len(signatures); ; i++ {
sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
if err != nil {
return err
}
missing, err := d.c.deleteOneSignature(sigURL) missing, err := d.c.deleteOneSignature(sigURL)
if err != nil { if err != nil {
return err return err

View File

@ -462,7 +462,10 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst
return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
} }
sigURL := lookasideStorageURL(s.c.signatureBase, manifestDigest, i) sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
if err != nil {
return nil, err
}
signature, missing, err := s.getOneSignature(ctx, sigURL) signature, missing, err := s.getOneSignature(ctx, sigURL)
if err != nil { if err != nil {
return nil, err return nil, err
@ -660,7 +663,10 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
} }
for i := 0; ; i++ { for i := 0; ; i++ {
sigURL := lookasideStorageURL(c.signatureBase, manifestDigest, i) sigURL, err := lookasideStorageURL(c.signatureBase, manifestDigest, i)
if err != nil {
return err
}
missing, err := c.deleteOneSignature(sigURL) missing, err := c.deleteOneSignature(sigURL)
if err != nil { if err != nil {
return err return err

View File

@ -111,11 +111,19 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err) return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err)
} }
d.config = buf d.config = buf
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil { configPath, err := d.archive.configPath(inputInfo.Digest)
if err != nil {
return private.UploadedBlob{}, err
}
if err := d.archive.sendFileLocked(configPath, inputInfo.Size, bytes.NewReader(buf)); err != nil {
return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err) return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err)
} }
} else { } else {
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil { layerPath, err := d.archive.physicalLayerPath(inputInfo.Digest)
if err != nil {
return private.UploadedBlob{}, err
}
if err := d.archive.sendFileLocked(layerPath, inputInfo.Size, stream); err != nil {
return private.UploadedBlob{}, err return private.UploadedBlob{}, err
} }
} }

View File

@ -95,7 +95,10 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
if !w.legacyLayers.Contains(layerID) { if !w.legacyLayers.Contains(layerID) {
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image"). // Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
// See also the comment in physicalLayerPath. // See also the comment in physicalLayerPath.
physicalLayerPath := w.physicalLayerPath(layerDigest) physicalLayerPath, err := w.physicalLayerPath(layerDigest)
if err != nil {
return err
}
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil { if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
return fmt.Errorf("creating layer symbolic link: %w", err) return fmt.Errorf("creating layer symbolic link: %w", err)
} }
@ -204,12 +207,20 @@ func checkManifestItemsMatch(a, b *ManifestItem) error {
func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error { func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
layerPaths := []string{} layerPaths := []string{}
for _, l := range layerDescriptors { for _, l := range layerDescriptors {
layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest)) p, err := w.physicalLayerPath(l.Digest)
if err != nil {
return err
}
layerPaths = append(layerPaths, p)
} }
var item *ManifestItem var item *ManifestItem
configPath, err := w.configPath(configDigest)
if err != nil {
return err
}
newItem := ManifestItem{ newItem := ManifestItem{
Config: w.configPath(configDigest), Config: configPath,
RepoTags: []string{}, RepoTags: []string{},
Layers: layerPaths, Layers: layerPaths,
Parent: "", // We dont have this information Parent: "", // We dont have this information
@ -294,21 +305,27 @@ func (w *Writer) Close() error {
// configPath returns a path we choose for storing a config with the specified digest. // configPath returns a path we choose for storing a config with the specified digest.
// NOTE: This is an internal implementation detail, not a format property, and can change // NOTE: This is an internal implementation detail, not a format property, and can change
// any time. // any time.
func (w *Writer) configPath(configDigest digest.Digest) string { func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
return configDigest.Hex() + ".json" if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
return "", err
}
return configDigest.Hex() + ".json", nil
} }
// physicalLayerPath returns a path we choose for storing a layer with the specified digest // physicalLayerPath returns a path we choose for storing a layer with the specified digest
// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format). // (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
// NOTE: This is an internal implementation detail, not a format property, and can change // NOTE: This is an internal implementation detail, not a format property, and can change
// any time. // any time.
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string { func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
return "", err
}
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described // writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
// in the root of the tarball. // in the root of the tarball.
return layerDigest.Hex() + ".tar" return layerDigest.Hex() + ".tar", nil
} }
type tarFI struct { type tarFI struct {

View File

@ -287,8 +287,11 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest. // lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
// base is not nil from the caller // base is not nil from the caller
// NOTE: Keep this in sync with docs/signature-protocols.md! // NOTE: Keep this in sync with docs/signature-protocols.md!
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL { func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly.
return nil, err
}
sigURL := *base sigURL := *base
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
return &sigURL return &sigURL, nil
} }

View File

@ -8,6 +8,7 @@ import (
"testing" "testing"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -320,9 +321,15 @@ func TestLookasideStorageURL(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
expectedURL, err := url.Parse(c.expected) expectedURL, err := url.Parse(c.expected)
require.NoError(t, err) require.NoError(t, err)
res := lookasideStorageURL(baseURL, mdInput, c.index) res, err := lookasideStorageURL(baseURL, mdInput, c.index)
require.NoError(t, err)
assert.Equal(t, expectedURL, res, c.expected) assert.Equal(t, expectedURL, res, c.expected)
} }
baseURL, err := url.Parse("file:///tmp")
require.NoError(t, err)
_, err = lookasideStorageURL(baseURL, digest.Digest("sha256:../hello"), 0)
assert.Error(t, err)
} }
func TestBuiltinDefaultLookasideStorageDir(t *testing.T) { func TestBuiltinDefaultLookasideStorageDir(t *testing.T) {

View File

@ -347,6 +347,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
} }
d.repo = repo d.repo = repo
} }
if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly.
return false, private.ReusedBlob{}, err
}
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
@ -472,12 +476,18 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
return nil return nil
} }
for _, layer := range d.schema.LayersDescriptors { for _, layer := range d.schema.LayersDescriptors {
if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.Digest.Hex() hash := layer.Digest.Hex()
if err = checkLayer(hash); err != nil { if err = checkLayer(hash); err != nil {
return err return err
} }
} }
for _, layer := range d.schema.FSLayers { for _, layer := range d.schema.FSLayers {
if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return err
}
hash := layer.BlobSum.Hex() hash := layer.BlobSum.Hex()
if err = checkLayer(hash); err != nil { if err = checkLayer(hash); err != nil {
return err return err

View File

@ -286,7 +286,9 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return nil, -1, err
}
blob := info.Digest.Hex() blob := info.Digest.Hex()
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy. // Ensure s.compressed is initialized. It is build by LayerInfosForCopy.

View File

@ -1097,8 +1097,12 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
}) })
} }
for instanceDigest, signatures := range s.signatureses { for instanceDigest, signatures := range s.signatureses {
key, err := signatureBigDataKey(instanceDigest)
if err != nil {
return err
}
options.BigData = append(options.BigData, storage.ImageBigDataOption{ options.BigData = append(options.BigData, storage.ImageBigDataOption{
Key: signatureBigDataKey(instanceDigest), Key: key,
Data: signatures, Data: signatures,
Digest: digest.Canonical.FromBytes(signatures), Digest: digest.Canonical.FromBytes(signatures),
}) })

View File

@ -27,8 +27,11 @@ func manifestBigDataKey(digest digest.Digest) string {
// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions. // signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions.
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably; // If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
func signatureBigDataKey(digest digest.Digest) string { func signatureBigDataKey(digest digest.Digest) (string, error) {
return "signature-" + digest.Encoded() if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return "", err
}
return "signature-" + digest.Encoded(), nil
} }
// storageImageMetadata is stored, as JSON, in storage.Image.Metadata // storageImageMetadata is stored, as JSON, in storage.Image.Metadata

View File

@ -385,7 +385,14 @@ func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instan
instance := "default instance" instance := "default instance"
if instanceDigest != nil { if instanceDigest != nil {
signatureSizes = s.metadata.SignaturesSizes[*instanceDigest] signatureSizes = s.metadata.SignaturesSizes[*instanceDigest]
key = signatureBigDataKey(*instanceDigest) k, err := signatureBigDataKey(*instanceDigest)
if err != nil {
return nil, err
}
key = k
if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
return nil, err
}
instance = instanceDigest.Encoded() instance = instanceDigest.Encoded()
} }
if len(signatureSizes) > 0 { if len(signatureSizes) > 0 {