Teach images to hold multiple manifests

Change how we compute digests for BigData items with names that start
with "manifest" so that we use the image library's manifest.Digest()
function, which knows how to preprocess schema1 manifests to get the
right value, instead of just trying to finesse it.

Track the digests of multiple manifest-named items for images.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
Nalin Dahyabhai 2019-01-30 10:50:50 -05:00
parent 488134ff6a
commit ccf8bef6fa
6 changed files with 229 additions and 62 deletions

View File

@ -33,6 +33,9 @@ func image(flags *mflag.FlagSet, action string, m storage.Store, args []string)
for _, layerID := range image.MappedTopLayers {
fmt.Printf("Top Layer: %s\n", layerID)
}
for _, digest := range image.Digests {
fmt.Printf("Digest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("Data: %s\n", name)
}

View File

@ -31,6 +31,9 @@ func images(flags *mflag.FlagSet, action string, m storage.Store, args []string)
for _, name := range image.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, digest := range image.Digests {
fmt.Printf("\tdigest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}
@ -67,6 +70,9 @@ func imagesByDigest(flags *mflag.FlagSet, action string, m storage.Store, args [
for _, name := range image.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, digest := range image.Digests {
fmt.Printf("\tdigest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}

View File

@ -0,0 +1,16 @@
## containers-storage-images-by-digest 1 "February 2019"
## NAME
containers-storage images-by-digest - List known images by digest
## SYNOPSIS
**containers-storage** **images-by-digest** *digest*
## DESCRIPTION
Retrieves information about images which match a specified digest
## EXAMPLE
**containers-storage images-by-digest sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855**
## SEE ALSO
containers-storage-image(1)

190
images.go
View File

@ -5,8 +5,10 @@ import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/containers/image/manifest"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/truncindex"
@ -15,9 +17,13 @@ import (
)
const (
// ImageDigestBigDataKey is the name of the big data item whose
// contents we consider useful for computing a "digest" of the
// image, by which we can locate the image later.
// ImageDigestManifestBigDataNamePrefix is a prefix of big data item
// names which we consider to be manifests, used for computing a
// "digest" value for the image as a whole, by which we can locate the
// image later.
ImageDigestManifestBigDataNamePrefix = "manifest"
// ImageDigestBigDataKey is provided for compatibility with older
// versions of the image library. It will be removed in the future.
ImageDigestBigDataKey = "manifest"
)
@ -27,12 +33,19 @@ type Image struct {
// value which was generated by the library.
ID string `json:"id"`
// Digest is a digest value that we can use to locate the image.
// Digest is a digest value that we can use to locate the image, if one
// was specified at creation-time.
Digest digest.Digest `json:"digest,omitempty"`
// Digests is a list of digest values of the image's manifests, and
// possibly a manually-specified value, that we can use to locate the
// image. If Digest is set, its value is also in this list.
Digests []digest.Digest `json:"-"`
// Names is an optional set of user-defined convenience values. The
// image can be referred to by its ID or any of its names. Names are
// unique among images.
// unique among images, and are often the text representation of tagged
// or canonical references.
Names []string `json:"names,omitempty"`
// TopLayer is the ID of the topmost layer of the image itself, if the
@ -92,8 +105,10 @@ type ROImageStore interface {
// Images returns a slice enumerating the known images.
Images() ([]Image, error)
// Images returns a slice enumerating the images which have a big data
// item with the name ImageDigestBigDataKey and the specified digest.
// ByDigest returns a slice enumerating the images which have either an
// explicitly-set digest, or a big data item with a name that starts
// with ImageDigestManifestBigDataNamePrefix, which matches the
// specified digest.
ByDigest(d digest.Digest) ([]*Image, error)
}
@ -111,7 +126,8 @@ type ImageStore interface {
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
// SetNames replaces the list of names associated with an image with the
// supplied values.
// supplied values. The values are expected to be valid normalized
// named image references.
SetNames(id string, names []string) error
// Delete removes the record of the image.
@ -135,6 +151,7 @@ func copyImage(i *Image) *Image {
return &Image{
ID: i.ID,
Digest: i.Digest,
Digests: copyDigestSlice(i.Digests),
Names: copyStringSlice(i.Names),
TopLayer: i.TopLayer,
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
@ -147,6 +164,17 @@ func copyImage(i *Image) *Image {
}
}
func copyImageSlice(slice []*Image) []*Image {
if len(slice) > 0 {
cp := make([]*Image, len(slice))
for i := range slice {
cp[i] = copyImage(slice[i])
}
return cp
}
return nil
}
func (r *imageStore) Images() ([]Image, error) {
images := make([]Image, len(r.images))
for i := range r.images {
@ -167,6 +195,43 @@ func (r *imageStore) datapath(id, key string) string {
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
}
// bigDataNameIsManifest determines if a big data item with the specified name
// is considered to be representative of the image, in that its digest can be
// said to also be the image's digest. Currently, if its name is, or begins
// with, "manifest", we say that it is.
func bigDataNameIsManifest(name string) bool {
return strings.HasPrefix(name, ImageDigestManifestBigDataNamePrefix)
}
// recomputeDigests takes a fixed digest and a name-to-digest map and builds a
// list of the unique values that would identify the image.
func (image *Image) recomputeDigests() error {
validDigests := make([]digest.Digest, 0, len(image.BigDataDigests)+1)
digests := make(map[digest.Digest]struct{})
if image.Digest != "" {
if err := image.Digest.Validate(); err != nil {
return errors.Wrapf(err, "error validating image digest %q", string(image.Digest))
}
digests[image.Digest] = struct{}{}
validDigests = append(validDigests, image.Digest)
}
for name, digest := range image.BigDataDigests {
if !bigDataNameIsManifest(name) {
continue
}
if digest.Validate() != nil {
return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name)
}
// Deduplicate the digest values.
if _, known := digests[digest]; !known {
digests[digest] = struct{}{}
validDigests = append(validDigests, digest)
}
}
image.Digests = validDigests
return nil
}
func (r *imageStore) Load() error {
shouldSave := false
rpath := r.imagespath()
@ -189,17 +254,18 @@ func (r *imageStore) Load() error {
r.removeName(conflict, name)
shouldSave = true
}
names[name] = images[n]
}
// Implicit digest
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
digests[digest] = append(digests[digest], images[n])
// Compute the digest list.
err = image.recomputeDigests()
if err != nil {
return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names)
}
// Explicit digest
if image.Digest == "" {
image.Digest = image.BigDataDigests[ImageDigestBigDataKey]
} else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] {
digests[image.Digest] = append(digests[image.Digest], images[n])
for _, name := range image.Names {
names[name] = image
}
for _, digest := range image.Digests {
list := digests[digest]
digests[digest] = append(list, image)
}
}
}
@ -333,12 +399,12 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
}
}
if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID
return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id)
}
names = dedupeNames(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
return nil, ErrDuplicateName
if image, nameInUse := r.byname[name]; nameInUse {
return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID)
}
}
if created.IsZero() {
@ -348,6 +414,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
image = &Image{
ID: id,
Digest: searchableDigest,
Digests: nil,
Names: names,
TopLayer: layer,
Metadata: metadata,
@ -357,16 +424,20 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
Created: created,
Flags: make(map[string]interface{}),
}
err := image.recomputeDigests()
if err != nil {
return nil, errors.Wrapf(err, "error validating digests for new image")
}
r.images = append(r.images, image)
r.idindex.Add(id)
r.byid[id] = image
if searchableDigest != "" {
list := r.bydigest[searchableDigest]
r.bydigest[searchableDigest] = append(list, image)
}
for _, name := range names {
r.byname[name] = image
}
for _, digest := range image.Digests {
list := r.bydigest[digest]
r.bydigest[digest] = append(list, image)
}
err = r.Save()
image = copyImage(image)
}
@ -444,6 +515,14 @@ func (r *imageStore) Delete(id string) error {
for _, name := range image.Names {
delete(r.byname, name)
}
for _, digest := range image.Digests {
prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
if len(prunedList) == 0 {
delete(r.bydigest, digest)
} else {
r.bydigest[digest] = prunedList
}
}
if toDeleteIndex != -1 {
// delete the image at toDeleteIndex
if toDeleteIndex == len(r.images)-1 {
@ -452,28 +531,6 @@ func (r *imageStore) Delete(id string) error {
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
}
}
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
// remove the image from the digest-based index
if list, ok := r.bydigest[digest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, digest)
} else {
r.bydigest[digest] = prunedList
}
}
}
if image.Digest != "" {
// remove the image's hard-coded digest from the digest-based index
if list, ok := r.bydigest[image.Digest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, image.Digest)
} else {
r.bydigest[image.Digest] = prunedList
}
}
}
if err := r.Save(); err != nil {
return err
}
@ -504,7 +561,7 @@ func (r *imageStore) Exists(id string) bool {
func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
if images, ok := r.bydigest[d]; ok {
return images, nil
return copyImageSlice(images), nil
}
return nil, ErrImageUnknown
}
@ -606,10 +663,19 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
if !ok {
return ErrImageUnknown
}
if err := os.MkdirAll(r.datadir(image.ID), 0700); err != nil {
err := os.MkdirAll(r.datadir(image.ID), 0700)
if err != nil {
return err
}
err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
var newDigest digest.Digest
if bigDataNameIsManifest(key) {
if newDigest, err = manifest.Digest(data); err != nil {
return errors.Wrapf(err, "error digesting manifest")
}
} else {
newDigest = digest.Canonical.FromBytes(data)
}
err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
if err == nil {
save := false
if image.BigDataSizes == nil {
@ -621,7 +687,6 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
image.BigDataDigests = make(map[string]digest.Digest)
}
oldDigest, digestOk := image.BigDataDigests[key]
newDigest := digest.Canonical.FromBytes(data)
image.BigDataDigests[key] = newDigest
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
save = true
@ -637,20 +702,21 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
if key == ImageDigestBigDataKey {
if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest {
// remove the image from the list of images in the digest-based
// index which corresponds to the old digest for this item, unless
// it's also the hard-coded digest
if list, ok := r.bydigest[oldDigest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, oldDigest)
} else {
r.bydigest[oldDigest] = prunedList
}
for _, oldDigest := range image.Digests {
// remove the image from the list of images in the digest-based index
if list, ok := r.bydigest[oldDigest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, oldDigest)
} else {
r.bydigest[oldDigest] = prunedList
}
}
}
if err = image.recomputeDigests(); err != nil {
return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID)
}
for _, newDigest := range image.Digests {
// add the image to the list of images in the digest-based index which
// corresponds to the new digest for this item, unless it's already there
list := r.bydigest[newDigest]

View File

@ -3157,6 +3157,15 @@ func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
return ret
}
func copyDigestSlice(slice []digest.Digest) []digest.Digest {
if len(slice) == 0 {
return nil
}
ret := make([]digest.Digest, len(slice))
copy(ret, slice)
return ret
}
// copyStringInterfaceMap still forces us to assume that the interface{} is
// a non-pointer scalar value
func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {

67
tests/manifests.bats Normal file
View File

@ -0,0 +1,67 @@
#!/usr/bin/env bats
load helpers
@test "manifests" {
# Create and populate three interesting layers.
populate
# Create an image using the top layer.
name=wonderful-image
run storage --debug=false create-image --name $name $upperlayer
[ "$status" -eq 0 ]
[ "$output" != "" ]
image=${lines[0]}
# Add a couple of big data items as manifests.
createrandom ${TESTDIR}/random1
createrandom ${TESTDIR}/random2
createrandom ${TESTDIR}/random3
digest1=$(sha256sum ${TESTDIR}/random1)
digest1=${digest1// *}
digest2=$(sha256sum ${TESTDIR}/random2)
digest2=${digest2// *}
digest3=$(sha256sum ${TESTDIR}/random3)
digest3=${digest3// *}
storage set-image-data -f ${TESTDIR}/random1 $image manifest
storage set-image-data -f ${TESTDIR}/random2 $image manifest-random2
storage set-image-data -f ${TESTDIR}/random3 $image manifest-random3
storage add-names --name localhost/fooimage:latest $image
# Get information about the image, and make sure the ID, name, and data names were preserved.
run storage image $image
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "ID: $image" ]]
[[ "$output" =~ "Name: $name" ]]
[[ "$output" =~ "Digest: sha256:$digest1" ]]
[[ "$output" =~ "Digest: sha256:$digest2" ]]
[[ "$output" =~ "Digest: sha256:$digest3" ]]
run storage images-by-digest sha256:$digest1
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "$image" ]]
[[ "$output" =~ "name: $name" ]]
[[ "$output" =~ "digest: sha256:$digest1" ]]
[[ "$output" =~ "digest: sha256:$digest2" ]]
[[ "$output" =~ "digest: sha256:$digest3" ]]
run storage images-by-digest sha256:$digest2
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "$image" ]]
[[ "$output" =~ "name: $name" ]]
[[ "$output" =~ "digest: sha256:$digest1" ]]
[[ "$output" =~ "digest: sha256:$digest2" ]]
[[ "$output" =~ "digest: sha256:$digest3" ]]
run storage images-by-digest sha256:$digest3
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ "$image" ]]
[[ "$output" =~ "name: $name" ]]
[[ "$output" =~ "digest: sha256:$digest1" ]]
[[ "$output" =~ "digest: sha256:$digest2" ]]
[[ "$output" =~ "digest: sha256:$digest3" ]]
}