Merge pull request #1483 from nalind/pullup-images

Complete "pulling up" of images in updateNames()
This commit is contained in:
Miloslav Trmač 2023-04-01 10:57:07 +02:00 committed by GitHub
commit cf7f1eccdf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 530 additions and 183 deletions

View File

@ -26,7 +26,7 @@ NATIVETAGS :=
AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh)
BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS)
GO ?= go
TESTFLAGS := $(shell go test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race)
# Go module support: set `-mod=vendor` to use the vendored sources
ifeq ($(shell $(GO) help mod >/dev/null 2>&1 && echo true), true)

View File

@ -37,18 +37,30 @@ func addNames(flags *mflag.FlagSet, action string, m storage.Store, args []strin
if err != nil {
return 1, err
}
oldnames, err := m.Names(id)
if err := m.AddNames(id, paramNames); err != nil {
return 1, err
}
names, err := m.Names(id)
if err != nil {
return 1, err
}
newNames := []string{}
if oldnames != nil {
newNames = append(newNames, oldnames...)
if jsonOutput {
if _, err := outputJSON(names); err != nil {
return 1, err
}
}
if paramNames != nil {
newNames = append(newNames, paramNames...)
return 0, nil
}
func removeNames(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1, nil
}
if err := m.SetNames(id, newNames); err != nil {
id, err := m.Lookup(args[0])
if err != nil {
return 1, err
}
if err := m.RemoveNames(id, paramNames); err != nil {
return 1, err
}
names, err := m.Names(id)
@ -106,7 +118,19 @@ func init() {
maxArgs: -1,
action: addNames,
addFlags: func(flags *mflag.FlagSet, cmd *command) {
flags.Var(opts.NewListOptsRef(&paramNames, nil), []string{"-name", "n"}, "New name")
flags.Var(opts.NewListOptsRef(&paramNames, nil), []string{"-name", "n"}, "Name to add")
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
},
})
commands = append(commands, command{
names: []string{"remove-names", "removenames"},
optionsHelp: "[options [...]] imageOrContainerNameOrID",
usage: "Remove layer, image, or container name or names",
minArgs: 1,
maxArgs: -1,
action: removeNames,
addFlags: func(flags *mflag.FlagSet, cmd *command) {
flags.Var(opts.NewListOptsRef(&paramNames, nil), []string{"-name", "n"}, "Name to remove")
flags.BoolVar(&jsonOutput, []string{"-json", "j"}, jsonOutput, "Prefer JSON output")
},
})

View File

@ -107,13 +107,13 @@ type rwContainerStore interface {
// stopReading releases locks obtained by startReading.
stopReading()
// Create creates a container that has a specified ID (or generates a
// create creates a container that has a specified ID (or generates a
// random one if an empty value is supplied) and optional names,
// based on the specified image, using the specified layer as its
// read-write layer.
// The maps in the container's options structure are recorded for the
// convenience of the caller, nothing more.
Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
create(id string, names []string, image, layer string, options *ContainerOptions) (*Container, error)
// updateNames modifies names associated with a container based on (op, names).
updateNames(id string, names []string, op updateNameOperation) error
@ -651,7 +651,10 @@ func (r *containerStore) SetFlag(id string, flag string, value interface{}) erro
}
// Requires startWriting.
func (r *containerStore) Create(id string, names []string, image, layer, metadata string, options *ContainerOptions) (container *Container, err error) {
func (r *containerStore) create(id string, names []string, image, layer string, options *ContainerOptions) (container *Container, err error) {
if options == nil {
options = &ContainerOptions{}
}
if id == "" {
id = stringid.GenerateRandomID()
_, idInUse := r.byid[id]
@ -663,12 +666,6 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
if _, idInUse := r.byid[id]; idInUse {
return nil, ErrDuplicateID
}
if options.MountOpts != nil {
options.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
}
if options.Volatile {
options.Flags[volatileFlag] = true
}
names = dedupeNames(names)
for _, name := range names {
if _, nameInUse := r.byname[name]; nameInUse {
@ -686,7 +683,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
Names: names,
ImageID: image,
LayerID: layer,
Metadata: metadata,
Metadata: options.Metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
@ -696,16 +693,42 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
GIDMap: copyIDMap(options.GIDMap),
volatileStore: options.Volatile,
}
if options.MountOpts != nil {
container.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
}
if options.Volatile {
container.Flags[volatileFlag] = true
}
r.containers = append(r.containers, container)
r.byid[id] = container
// This can only fail on duplicate IDs, which shouldnt happen — and in that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here would be too risky.
// This can only fail on duplicate IDs, which shouldnt happen — and in
// that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here
// would be too risky.
_ = r.idindex.Add(id)
r.byid[id] = container
r.bylayer[layer] = container
for _, name := range names {
r.byname[name] = container
}
defer func() {
if err != nil {
// now that the in-memory structures know about the new
// record, we can use regular Delete() to clean up if
// anything breaks from here on out
if e := r.Delete(id); e != nil {
logrus.Debugf("while cleaning up partially-created container %q we failed to create: %v", id, e)
}
}
}()
err = r.saveFor(container)
if err != nil {
return nil, err
}
for _, item := range options.BigData {
if err = r.SetBigData(id, item.Key, item.Data); err != nil {
return nil, err
}
}
container = copyContainer(container)
return container, err
}

View File

@ -1,4 +1,4 @@
## containers-storage-add-names "August 2016"
## containers-storage-add-names 1 "August 2016"
## NAME
containers-storage add-names - Add names to a layer/image/container
@ -23,4 +23,5 @@ other layer, image, or container.
## SEE ALSO
containers-storage-get-names(1)
containers-storage-remove-names(1)
containers-storage-set-names(1)

View File

@ -18,4 +18,5 @@ command can be used to read the list of names for any of them.
## SEE ALSO
containers-storage-add-names(1)
containers-storage-remove-names(1)
containers-storage-set-names(1)

View File

@ -0,0 +1,25 @@
## containers-storage-remove-names 1 "January 2023"
## NAME
containers-storage remove-names - Remove names from a layer/image/container
## SYNOPSIS
**containers-storage** **remove-names** [*options* [...]] *layerOrImageOrContainerNameOrID*
## DESCRIPTION
In addition to IDs, *layers*, *images*, and *containers* can have
human-readable names assigned to them in *containers-storage*. The *remove-names*
command can be used to remove one or more names from them.
## OPTIONS
**-n | --name** *name*
Specifies a name to remove from the layer, image, or container.
## EXAMPLE
**containers-storage remove-names -n my-for-realsies-awesome-container f3be6c6134d0d980936b4c894f1613b69a62b79588fdeda744d0be3693bde8ec**
## SEE ALSO
containers-storage-add-names(1)
containers-storage-get-names(1)
containers-storage-set-names(1)

View File

@ -26,3 +26,4 @@ will be removed from the layer, image, or container.
## SEE ALSO
containers-storage-add-names(1)
containers-storage-get-names(1)
containers-storage-remove-names(1)

View File

@ -137,10 +137,10 @@ type rwImageStore interface {
// stopWriting releases locks obtained by startWriting.
stopWriting()
// Create creates an image that has a specified ID (or a random one) and
// create creates an image that has a specified ID (or a random one) and
// optional names, using the specified layer as its topmost (hopefully
// read-only) layer. That layer can be referenced by multiple images.
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
create(id string, names []string, layer string, options ImageOptions) (*Image, error)
// updateNames modifies names associated with an image based on (op, names).
// The values are expected to be valid normalized
@ -688,7 +688,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
}
// Requires startWriting.
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) {
func (r *imageStore) create(id string, names []string, layer string, options ImageOptions) (image *Image, err error) {
if !r.lockfile.IsReadWrite() {
return nil, fmt.Errorf("not allowed to create new images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
@ -709,30 +709,32 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
return nil, fmt.Errorf("image name %q is already associated with image %q: %w", name, image.ID, ErrDuplicateName)
}
}
if created.IsZero() {
created = time.Now().UTC()
}
image = &Image{
ID: id,
Digest: searchableDigest,
Digests: nil,
Digest: options.Digest,
Digests: copyDigestSlice(options.Digests),
Names: names,
NamesHistory: copyStringSlice(options.NamesHistory),
TopLayer: layer,
Metadata: metadata,
Metadata: options.Metadata,
BigDataNames: []string{},
BigDataSizes: make(map[string]int64),
BigDataDigests: make(map[string]digest.Digest),
Created: created,
Flags: make(map[string]interface{}),
Created: options.CreationDate,
Flags: copyStringInterfaceMap(options.Flags),
}
if image.Created.IsZero() {
image.Created = time.Now().UTC()
}
err = image.recomputeDigests()
if err != nil {
return nil, fmt.Errorf("validating digests for new image: %w", err)
}
r.images = append(r.images, image)
// This can only fail on duplicate IDs, which shouldnt happen — and in that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here would be too risky.
// This can only fail on duplicate IDs, which shouldnt happen — and in
// that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here
// would be too risky.
_ = r.idindex.Add(id)
r.byid[id] = image
for _, name := range names {
@ -742,7 +744,28 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
list := r.bydigest[digest]
r.bydigest[digest] = append(list, image)
}
defer func() {
if err != nil {
// now that the in-memory structures know about the new
// record, we can use regular Delete() to clean up if
// anything breaks from here on out
if e := r.Delete(id); e != nil {
logrus.Debugf("while cleaning up partially-created image %q we failed to create: %v", id, e)
}
}
}()
err = r.Save()
if err != nil {
return nil, err
}
for _, item := range options.BigData {
if item.Digest == "" {
item.Digest = digest.Canonical.FromBytes(item.Data)
}
if err = r.setBigData(image, item.Key, item.Data, item.Digest); err != nil {
return nil, err
}
}
image = copyImage(image)
return image, err
}
@ -965,9 +988,6 @@ func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
// Requires startWriting.
func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
if key == "" {
return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
}
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to save data items associated with images at %q: %w", r.imagespath(), ErrStoreIsReadOnly)
}
@ -975,10 +995,7 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
if !ok {
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
err := os.MkdirAll(r.datadir(image.ID), 0700)
if err != nil {
return err
}
var err error
var newDigest digest.Digest
if bigDataNameIsManifest(key) {
if digestManifest == nil {
@ -990,6 +1007,18 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func
} else {
newDigest = digest.Canonical.FromBytes(data)
}
return r.setBigData(image, key, data, newDigest)
}
// Requires startWriting.
func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest digest.Digest) error {
if key == "" {
return fmt.Errorf("can't set empty name for image big data item: %w", ErrInvalidBigDataName)
}
err := os.MkdirAll(r.datadir(image.ID), 0700)
if err != nil {
return err
}
err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
if err == nil {
save := false

View File

@ -20,8 +20,12 @@ func addTestImage(t *testing.T, store rwImageStore, id string, names []string) {
require.NoError(t, err)
defer store.stopWriting()
_, err = store.Create(
id, []string{}, "", "", time.Now(), digest.FromString(""),
options := ImageOptions{
CreationDate: time.Now(),
Digest: digest.FromString(""),
}
_, err = store.create(
id, []string{}, "", options,
)
require.Nil(t, err)

167
layers.go
View File

@ -112,33 +112,33 @@ type Layer struct {
Created time.Time `json:"created,omitempty"`
// CompressedDigest is the digest of the blob that was last passed to
// ApplyDiff() or Put(), as it was presented to us.
// ApplyDiff() or create(), as it was presented to us.
CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"`
// CompressedSize is the length of the blob that was last passed to
// ApplyDiff() or Put(), as it was presented to us. If
// ApplyDiff() or create(), as it was presented to us. If
// CompressedDigest is not set, this should be treated as if it were an
// uninitialized value.
CompressedSize int64 `json:"compressed-size,omitempty"`
// UncompressedDigest is the digest of the blob that was last passed to
// ApplyDiff() or Put(), after we decompressed it. Often referred to
// ApplyDiff() or create(), after we decompressed it. Often referred to
// as a DiffID.
UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
// UncompressedSize is the length of the blob that was last passed to
// ApplyDiff() or Put(), after we decompressed it. If
// ApplyDiff() or create(), after we decompressed it. If
// UncompressedDigest is not set, this should be treated as if it were
// an uninitialized value.
UncompressedSize int64 `json:"diff-size,omitempty"`
// CompressionType is the type of compression which we detected on the blob
// that was last passed to ApplyDiff() or Put().
// that was last passed to ApplyDiff() or create().
CompressionType archive.Compression `json:"compression,omitempty"`
// UIDs and GIDs are lists of UIDs and GIDs used in the layer. This
// field is only populated (i.e., will only contain one or more
// entries) if the layer was created using ApplyDiff() or Put().
// entries) if the layer was created using ApplyDiff() or create().
UIDs []uint32 `json:"uidset,omitempty"`
GIDs []uint32 `json:"gidset,omitempty"`
@ -248,20 +248,15 @@ type rwLayerStore interface {
// stopWriting releases locks obtained by startWriting.
stopWriting()
// Create creates a new layer, optionally giving it a specified ID rather than
// create creates a new layer, optionally giving it a specified ID rather than
// a randomly-generated one, either inheriting data from another specified
// layer or the empty base layer. The new layer can optionally be given names
// and have an SELinux label specified for use when mounting it. Some
// underlying drivers can accept a "size" option. At this time, most
// underlying drivers do not themselves distinguish between writeable
// and read-only layers.
Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (*Layer, error)
// CreateWithFlags combines the functions of Create and SetFlag.
CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
// Put combines the functions of CreateWithFlags and ApplyDiff.
Put(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
// and read-only layers. Returns the new layer structure and the size of the
// diff which was applied to its parent to initialize its contents.
create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (*Layer, int64, error)
// updateNames modifies names associated with a layer based on (op, names).
updateNames(id string, names []string, op updateNameOperation) error
@ -1186,8 +1181,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
// TODO: check if necessary fields are filled
r.layers = append(r.layers, layer)
// This can only fail on duplicate IDs, which shouldnt happen — and in that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here would be too risky.
// This can only fail on duplicate IDs, which shouldnt happen — and in
// that case the index is already in the desired state anyway.
// Implementing recovery from an unlikely and unimportant failure here
// would be too risky.
_ = r.idindex.Add(id)
r.byid[id] = layer
for _, name := range names { // names got from the additional layer store won't be used
@ -1200,8 +1197,8 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
}
if err := r.saveFor(layer); err != nil {
if err2 := r.driver.Remove(id); err2 != nil {
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, err2)
if e := r.Delete(layer.ID); e != nil {
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e)
}
return nil, err
}
@ -1209,7 +1206,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
}
// Requires startWriting.
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (layer *Layer, size int64, err error) {
if moreOptions == nil {
moreOptions = &LayerOptions{}
}
if !r.lockfile.IsReadWrite() {
return nil, -1, fmt.Errorf("not allowed to create new layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
@ -1252,7 +1252,6 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
templateTSdata []byte
)
if moreOptions.TemplateLayer != "" {
var tserr error
templateLayer, ok := r.lookup(moreOptions.TemplateLayer)
if !ok {
return nil, -1, ErrLayerUnknown
@ -1263,9 +1262,9 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
templateCompressionType = templateLayer.CompressionType
templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
templateTSdata, tserr = os.ReadFile(r.tspath(templateLayer.ID))
if tserr != nil && !os.IsNotExist(tserr) {
return nil, -1, tserr
templateTSdata, err = os.ReadFile(r.tspath(templateLayer.ID))
if err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, -1, err
}
} else {
templateIDMappings = &idtools.IDMappings{}
@ -1279,9 +1278,10 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
selinux.ReserveLabel(mountLabel)
}
// Before actually creating the layer, make a persistent record of it with incompleteFlag,
// so that future processes have a chance to delete it.
layer := &Layer{
// Before actually creating the layer, make a persistent record of it
// with the incomplete flag set, so that future processes have a chance
// to clean up after it.
layer = &Layer{
ID: id,
Parent: parent,
Names: names,
@ -1295,98 +1295,109 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
CompressionType: templateCompressionType,
UIDs: templateUIDs,
GIDs: templateGIDs,
Flags: make(map[string]interface{}),
Flags: copyStringInterfaceMap(moreOptions.Flags),
UIDMap: copyIDMap(moreOptions.UIDMap),
GIDMap: copyIDMap(moreOptions.GIDMap),
BigDataNames: []string{},
volatileStore: moreOptions.Volatile,
}
layer.Flags[incompleteFlag] = true
r.layers = append(r.layers, layer)
// This can only fail if the ID is already missing, which shouldnt happen — and in that case the index is already in the desired state anyway.
// This is on various paths to recover from failures, so this should be robust against partially missing data.
// This can only fail if the ID is already missing, which shouldnt
// happen — and in that case the index is already in the desired state
// anyway. This is on various paths to recover from failures, so this
// should be robust against partially missing data.
_ = r.idindex.Add(id)
r.byid[id] = layer
for _, name := range names {
r.byname[name] = layer
}
for flag, value := range flags {
layer.Flags[flag] = value
}
layer.Flags[incompleteFlag] = true
succeeded := false
cleanupFailureContext := ""
defer func() {
if !succeeded {
// On any error, try both removing the driver's data as well
// as the in-memory layer record.
if err2 := r.Delete(layer.ID); err2 != nil {
if cleanupFailureContext == "" {
cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site"
}
logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2)
if err != nil {
// now that the in-memory structures know about the new
// record, we can use regular Delete() to clean up if
// anything breaks from here on out
if cleanupFailureContext == "" {
cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site"
}
if e := r.Delete(id); e != nil {
logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, id, e)
}
}
}()
err := r.saveFor(layer)
if err != nil {
if err = r.saveFor(layer); err != nil {
cleanupFailureContext = "saving incomplete layer metadata"
return nil, -1, err
}
for _, item := range moreOptions.BigData {
if err = r.setBigData(layer, item.Key, item.Data); err != nil {
cleanupFailureContext = fmt.Sprintf("saving big data item %q", item.Key)
return nil, -1, err
}
}
idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap)
opts := drivers.CreateOpts{
MountLabel: mountLabel,
StorageOpt: options,
IDMappings: idMappings,
}
if moreOptions.TemplateLayer != "" {
if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
cleanupFailureContext = "creating a layer from template"
if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
cleanupFailureContext = fmt.Sprintf("creating a layer from template layer %q", moreOptions.TemplateLayer)
return nil, -1, fmt.Errorf("creating copy of template layer %q with ID %q: %w", moreOptions.TemplateLayer, id, err)
}
oldMappings = templateIDMappings
} else {
if writeable {
if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil {
if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
cleanupFailureContext = "creating a read-write layer"
return nil, -1, fmt.Errorf("creating read-write layer with ID %q: %w", id, err)
}
} else {
if err := r.driver.Create(id, parent, &opts); err != nil {
if err = r.driver.Create(id, parent, &opts); err != nil {
cleanupFailureContext = "creating a read-only layer"
return nil, -1, fmt.Errorf("creating layer with ID %q: %w", id, err)
return nil, -1, fmt.Errorf("creating read-only layer with ID %q: %w", id, err)
}
}
oldMappings = parentMappings
}
if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) {
if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
cleanupFailureContext = "in UpdateLayerIDMap"
return nil, -1, err
}
}
if len(templateTSdata) > 0 {
if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil {
if err = os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil {
cleanupFailureContext = "creating tar-split parent directory for a copy from template"
return nil, -1, err
}
if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
if err = ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
cleanupFailureContext = "creating a tar-split copy from template"
return nil, -1, err
}
}
var size int64 = -1
size = -1
if diff != nil {
size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff)
if err != nil {
if size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff); err != nil {
cleanupFailureContext = "applying layer diff"
return nil, -1, err
}
} else {
// applyDiffWithOptions in the `diff != nil` case handles this bit for us
// applyDiffWithOptions() would have updated r.bycompressedsum
// and r.byuncompressedsum for us, but if we used a template
// layer, we didn't call it, so add the new layer as candidates
// for searches for layers by checksum
if layer.CompressedDigest != "" {
r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
}
@ -1394,29 +1405,17 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
}
}
delete(layer.Flags, incompleteFlag)
err = r.saveFor(layer)
if err != nil {
if err = r.saveFor(layer); err != nil {
cleanupFailureContext = "saving finished layer metadata"
return nil, -1, err
}
layer = copyLayer(layer)
succeeded = true
return layer, size, err
}
// Requires startWriting.
func (r *layerStore) CreateWithFlags(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}) (layer *Layer, err error) {
layer, _, err = r.Put(id, parent, names, mountLabel, options, moreOptions, writeable, flags, nil)
return layer, err
}
// Requires startWriting.
func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool) (layer *Layer, err error) {
return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil)
}
// Requires startReading or startWriting.
func (r *layerStore) Mounted(id string) (int, error) {
if !r.lockfile.IsReadWrite() {
@ -1677,9 +1676,6 @@ func (r *layerStore) BigData(id, key string) (io.ReadCloser, error) {
// Requires startWriting.
func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
if key == "" {
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
if !r.lockfile.IsReadWrite() {
return fmt.Errorf("not allowed to save data items associated with layers at %q: %w", r.layerdir, ErrStoreIsReadOnly)
}
@ -1687,6 +1683,13 @@ func (r *layerStore) SetBigData(id, key string, data io.Reader) error {
if !ok {
return fmt.Errorf("locating layer with ID %q to write bigdata: %w", id, ErrLayerUnknown)
}
return r.setBigData(layer, key, data)
}
func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error {
if key == "" {
return fmt.Errorf("can't set empty name for layer big data item: %w", ErrInvalidBigDataName)
}
err := os.MkdirAll(r.datadir(layer.ID), 0700)
if err != nil {
return err
@ -1759,7 +1762,9 @@ func (r *layerStore) tspath(id string) string {
// layerHasIncompleteFlag returns true if layer.Flags contains an incompleteFlag set to true
// The caller must hold r.inProcessLock for reading.
func layerHasIncompleteFlag(layer *Layer) bool {
// layer.Flags[…] is defined to succeed and return ok == false if Flags == nil
if layer.Flags == nil {
return false
}
if flagValue, ok := layer.Flags[incompleteFlag]; ok {
if b, ok := flagValue.(bool); ok && b {
return true
@ -1788,20 +1793,21 @@ func (r *layerStore) deleteInternal(id string) error {
}
}
// We never unset incompleteFlag; below, we remove the entire object from r.layers.
id = layer.ID
if err := r.driver.Remove(id); err != nil {
if err := r.driver.Remove(id); err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
os.Remove(r.tspath(id))
os.RemoveAll(r.datadir(id))
delete(r.byid, id)
for _, name := range layer.Names {
delete(r.byname, name)
}
// This can only fail if the ID is already missing, which shouldnt happen — and in that case the index is already in the desired state anyway.
// The stores Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
// This can only fail if the ID is already missing, which shouldnt
// happen — and in that case the index is already in the desired state
// anyway. The stores Delete method is used on various paths to
// recover from failures, so this should be robust against partially
// missing data.
_ = r.idindex.Delete(id)
mountLabel := layer.MountLabel
if layer.MountPoint != "" {
@ -1835,7 +1841,6 @@ func (r *layerStore) deleteInternal(id string) error {
selinux.ReleaseLabel(mountLabel)
}
}
return nil
}

237
store.go
View File

@ -506,10 +506,13 @@ type Store interface {
// GetDigestLock returns digest-specific Locker.
GetDigestLock(digest.Digest) (Locker, error)
// LayerFromAdditionalLayerStore searches layers from the additional layer store and
// returns the object for handling this. Note that this hasn't been stored to this store
// yet so this needs to be done through PutAs method.
// Releasing AdditionalLayer handler is caller's responsibility.
// LayerFromAdditionalLayerStore searches the additional layer store and returns an object
// which can create a layer with the specified digest associated with the specified image
// reference. Note that this hasn't been stored to this store yet: the actual creation of
// a usable layer is done by calling the returned object's PutAs() method. After creating
// a layer, the caller must then call the object's Release() method to free any temporary
// resources which were allocated for the object by this method or the object's PutAs()
// method.
// This API is experimental and can be changed without bumping the major version number.
LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error)
@ -562,6 +565,17 @@ type LayerOptions struct {
UncompressedDigest digest.Digest
// True is the layer info can be treated as volatile
Volatile bool
// BigData is a set of items which should be stored with the layer.
BigData []LayerBigDataOption
// Flags is a set of named flags and their values to store with the layer.
// Currently these can only be set when the layer record is created, but that
// could change in the future.
Flags map[string]interface{}
}
type LayerBigDataOption struct {
Key string
Data io.Reader
}
// ImageOptions is used for passing options to a Store's CreateImage() method.
@ -571,6 +585,26 @@ type ImageOptions struct {
CreationDate time.Time
// Digest is a hard-coded digest value that we can use to look up the image. It is optional.
Digest digest.Digest
// Digests is a list of digest values of the image's manifests, and
// possibly a manually-specified value, that we can use to locate the
// image. If Digest is set, its value is also in this list.
Digests []digest.Digest
// Metadata is caller-specified metadata associated with the layer.
Metadata string
// BigData is a set of items which should be stored with the image.
BigData []ImageBigDataOption
// NamesHistory is used for guessing for what this image was named when a container was created based
// on it, but it no longer has any names.
NamesHistory []string
// Flags is a set of named flags and their values to store with the image. Currently these can only
// be set when the image record is created, but that could change in the future.
Flags map[string]interface{}
}
type ImageBigDataOption struct {
Key string
Data []byte
Digest digest.Digest
}
// ContainerOptions is used for passing options to a Store's CreateContainer() method.
@ -580,11 +614,23 @@ type ContainerOptions struct {
// container's layer will inherit settings from the image's top layer
// or, if it is not being created based on an image, the Store object.
types.IDMappingOptions
LabelOpts []string
LabelOpts []string
// Flags is a set of named flags and their values to store with the container.
// Currently these can only be set when the container record is created, but that
// could change in the future.
Flags map[string]interface{}
MountOpts []string
Volatile bool
StorageOpt map[string]string
// Metadata is caller-specified metadata associated with the container.
Metadata string
// BigData is a set of items which should be stored for the container.
BigData []ContainerBigDataOption
}
type ContainerBigDataOption struct {
Key string
Data []byte
}
type store struct {
@ -1221,7 +1267,7 @@ func canUseShifting(store rwLayerStore, uidmap, gidmap []idtools.IDMap) bool {
return true
}
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) {
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
var parentLayer *Layer
rlstore, rlstores, err := s.bothLayerStoreKinds()
if err != nil {
@ -1235,8 +1281,11 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
return nil, -1, err
}
defer s.containerStore.stopWriting()
if options == nil {
options = &LayerOptions{}
var options LayerOptions
if lOptions != nil {
options = *lOptions
options.BigData = copyLayerBigDataOptionSlice(lOptions.BigData)
options.Flags = copyStringInterfaceMap(lOptions.Flags)
}
if options.HostUIDMapping {
options.UIDMap = nil
@ -1303,7 +1352,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
GIDMap: copyIDMap(gidMap),
}
}
return rlstore.Put(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, nil, diff)
return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff)
}
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
@ -1311,7 +1360,7 @@ func (s *store) CreateLayer(id, parent string, names []string, mountLabel string
return layer, err
}
func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) {
func (s *store) CreateImage(id string, names []string, layer, metadata string, iOptions *ImageOptions) (*Image, error) {
if layer != "" {
layerStores, err := s.allLayerStores()
if err != nil {
@ -1337,13 +1386,22 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
var res *Image
err := s.writeToImageStore(func() error {
creationDate := time.Now().UTC()
if options != nil && !options.CreationDate.IsZero() {
creationDate = options.CreationDate
var options ImageOptions
if iOptions != nil {
options = *iOptions
options.Digests = copyDigestSlice(iOptions.Digests)
options.BigData = copyImageBigDataOptionSlice(iOptions.BigData)
options.NamesHistory = copyStringSlice(iOptions.NamesHistory)
options.Flags = copyStringInterfaceMap(iOptions.Flags)
}
if options.CreationDate.IsZero() {
options.CreationDate = time.Now().UTC()
}
options.Metadata = metadata
var err error
res, err = s.imageStore.Create(id, names, layer, metadata, creationDate, options.Digest)
res, err = s.imageStore.create(id, names, layer, options)
return err
})
return res, err
@ -1426,26 +1484,22 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst
// mappings, and register it as an alternate top layer in the image.
var layerOptions LayerOptions
if canUseShifting(rlstore, options.UIDMap, options.GIDMap) {
layerOptions = LayerOptions{
IDMappingOptions: types.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
UIDMap: nil,
GIDMap: nil,
},
layerOptions.IDMappingOptions = types.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
UIDMap: nil,
GIDMap: nil,
}
} else {
layerOptions = LayerOptions{
IDMappingOptions: types.IDMappingOptions{
HostUIDMapping: options.HostUIDMapping,
HostGIDMapping: options.HostGIDMapping,
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
},
layerOptions.IDMappingOptions = types.IDMappingOptions{
HostUIDMapping: options.HostUIDMapping,
HostGIDMapping: options.HostGIDMapping,
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
}
}
layerOptions.TemplateLayer = layer.ID
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil)
if err != nil {
return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
}
@ -1459,9 +1513,17 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst
return mappedLayer, nil
}
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
if options == nil {
options = &ContainerOptions{}
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, cOptions *ContainerOptions) (*Container, error) {
var options ContainerOptions
if cOptions != nil {
options = *cOptions
options.IDMappingOptions.UIDMap = copyIDMap(cOptions.IDMappingOptions.UIDMap)
options.IDMappingOptions.GIDMap = copyIDMap(cOptions.IDMappingOptions.GIDMap)
options.LabelOpts = copyStringSlice(cOptions.LabelOpts)
options.Flags = copyStringInterfaceMap(cOptions.Flags)
options.MountOpts = copyStringSlice(cOptions.MountOpts)
options.StorageOpt = copyStringStringMap(cOptions.StorageOpt)
options.BigData = copyContainerBigDataOptionSlice(cOptions.BigData)
}
if options.HostUIDMapping {
options.UIDMap = nil
@ -1469,6 +1531,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
if options.HostGIDMapping {
options.GIDMap = nil
}
options.Metadata = metadata
rlstore, lstores, err := s.bothLayerStoreKinds() // lstores will be locked read-only if image != ""
if err != nil {
return nil, err
@ -1574,22 +1637,19 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
Volatile: options.Volatile || s.transientStore,
}
if canUseShifting(rlstore, uidMap, gidMap) {
layerOptions.IDMappingOptions =
types.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
UIDMap: nil,
GIDMap: nil,
}
layerOptions.IDMappingOptions = types.IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
UIDMap: nil,
GIDMap: nil,
}
} else {
layerOptions.IDMappingOptions =
types.IDMappingOptions{
HostUIDMapping: idMappingsOptions.HostUIDMapping,
HostGIDMapping: idMappingsOptions.HostGIDMapping,
UIDMap: copyIDMap(uidMap),
GIDMap: copyIDMap(gidMap),
}
layerOptions.IDMappingOptions = types.IDMappingOptions{
HostUIDMapping: idMappingsOptions.HostUIDMapping,
HostGIDMapping: idMappingsOptions.HostGIDMapping,
UIDMap: copyIDMap(uidMap),
GIDMap: copyIDMap(gidMap),
}
}
if options.Flags == nil {
options.Flags = make(map[string]interface{})
@ -1610,7 +1670,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
options.Flags[mountLabelFlag] = mountLabel
}
clayer, err := rlstore.Create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true)
clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil)
if err != nil {
return nil, err
}
@ -1630,7 +1690,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
GIDMap: copyIDMap(options.GIDMap),
}
var err error
container, err = s.containerStore.Create(id, names, imageID, layer, metadata, options)
container, err = s.containerStore.create(id, names, imageID, layer, &options)
if err != nil || container == nil {
if err2 := rlstore.Delete(layer); err2 != nil {
if err == nil {
@ -2117,7 +2177,8 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e
return s.imageStore.updateNames(id, deduped, op)
}
// Check is id refers to a RO Store
// Check if the id refers to a read-only image store -- we want to allow images in
// read-only stores to have their names changed.
for _, is := range s.roImageStores {
store := is
if err := store.startReading(); err != nil {
@ -2125,12 +2186,35 @@ func (s *store) updateNames(id string, names []string, op updateNameOperation) e
}
defer store.stopReading()
if i, err := store.Get(id); err == nil {
if len(deduped) > 1 {
// Do not want to create image name in R/W storage
deduped = deduped[1:]
// "pull up" the image so that we can change its names list
options := ImageOptions{
Metadata: i.Metadata,
CreationDate: i.Created,
Digest: i.Digest,
Digests: copyDigestSlice(i.Digests),
NamesHistory: copyStringSlice(i.NamesHistory),
}
_, err := s.imageStore.Create(id, deduped, i.TopLayer, i.Metadata, i.Created, i.Digest)
return err
for _, key := range i.BigDataNames {
data, err := store.BigData(id, key)
if err != nil {
return err
}
dataDigest, err := store.BigDataDigest(id, key)
if err != nil {
return err
}
options.BigData = append(options.BigData, ImageBigDataOption{
Key: key,
Data: data,
Digest: dataDigest,
})
}
_, err = s.imageStore.create(id, i.Names, i.TopLayer, options)
if err != nil {
return err
}
// now make the changes to the writeable image record's names list
return s.imageStore.updateNames(id, deduped, op)
}
}
@ -2962,6 +3046,16 @@ func (s *store) Image(id string) (*Image, error) {
if done, err := s.readAllImageStores(func(store roImageStore) (bool, error) {
image, err := store.Get(id)
if err == nil {
if store != s.imageStore {
// found it in a read-only store - readAllImageStores() still has the writeable store locked for reading
if _, localErr := s.imageStore.Get(image.ID); localErr == nil {
// if the lookup key was a name, and we found the image in a read-only
// store, but we have an entry with the same ID in the read-write store,
// then the name was removed when we duplicated the image's
// record into writable storage, so we should ignore this entry
return false, nil
}
}
res = image
return true, nil
}
@ -3247,6 +3341,14 @@ func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
return ret
}
func copyStringStringMap(m map[string]string) map[string]string {
ret := make(map[string]string, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
func copyDigestSlice(slice []digest.Digest) []digest.Digest {
if len(slice) == 0 {
return nil
@ -3266,6 +3368,31 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
return ret
}
func copyLayerBigDataOptionSlice(slice []LayerBigDataOption) []LayerBigDataOption {
ret := make([]LayerBigDataOption, len(slice))
copy(ret, slice)
return ret
}
func copyImageBigDataOptionSlice(slice []ImageBigDataOption) []ImageBigDataOption {
ret := make([]ImageBigDataOption, len(slice))
for i := range slice {
ret[i].Key = slice[i].Key
ret[i].Data = append([]byte{}, slice[i].Data...)
ret[i].Digest = slice[i].Digest
}
return ret
}
func copyContainerBigDataOptionSlice(slice []ContainerBigDataOption) []ContainerBigDataOption {
ret := make([]ContainerBigDataOption, len(slice))
for i := range slice {
ret[i].Key = slice[i].Key
ret[i].Data = append([]byte{}, slice[i].Data...)
}
return ret
}
// AutoUserNsMinSize is the minimum size for automatically created user namespaces
const AutoUserNsMinSize = 1024

View File

@ -404,6 +404,113 @@ check-for-name() {
[ "$status" -eq 0 ]
}
@test "add-names: ro-images" {
case "$STORAGE_DRIVER" in
overlay*|vfs)
;;
*)
skip "not supported by driver $STORAGE_DRIVER"
;;
esac
mkdir ${TESTDIR}/{ro-root,ro-runroot}
# Create a layer.
run storage --debug=false --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot create-layer
[ "$status" -eq 0 ]
[ "$output" != "" ]
layer=$output
# Create an image with names that uses that layer.
run storage --debug=false --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot create-image -n fooimage -n barimage $layer
[ "$status" -eq 0 ]
[ "$output" != "" ]
image=${output%% *}
storage --debug=false --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot shutdown
# Check that we can find the image by ID and by its names.
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i $image
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i fooimage
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i barimage
[ "$status" -eq 0 ]
# Add a pair of names to the image.
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root add-names -n newimage -n otherimage $image
echo "add-names:" "$output"
[ "$status" -eq 0 ]
# Check that all of the names are resolveable.
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root images
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i $image
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i fooimage
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i barimage
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i newimage
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i otherimage
[ "$status" -eq 0 ]
}
@test "remove-names: ro-images" {
case "$STORAGE_DRIVER" in
overlay*|vfs)
;;
*)
skip "not supported by driver $STORAGE_DRIVER"
;;
esac
mkdir ${TESTDIR}/{ro-root,ro-runroot}
# Create a layer.
run storage --debug=false --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot create-layer
[ "$status" -eq 0 ]
[ "$output" != "" ]
layer=$output
# Create an image with names that uses that layer.
run storage --debug=false --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot create-image -n fooimage -n barimage -n newimage -n otherimage $layer
[ "$status" -eq 0 ]
[ "$output" != "" ]
image=${output%% *}
storage --debug=false --graph ${TESTDIR}/ro-root --run ${TESTDIR}/ro-runroot shutdown
# Check that we can find the image by ID and by its names.
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i $image
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i fooimage
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i barimage
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i newimage
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i otherimage
[ "$status" -eq 0 ]
# Remove one of the names from the image.
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root remove-names -n newimage $image
echo "remove-names:" "$output"
[ "$status" -eq 0 ]
# Check that all of the names are still resolveable, except for the one we removed.
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i $image
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i fooimage
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i barimage
[ "$status" -eq 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i newimage
[ "$status" -ne 0 ]
run storage --storage-opt ${STORAGE_DRIVER}.imagestore=${TESTDIR}/ro-root exists -i otherimage
[ "$status" -eq 0 ]
}
@test "names at creation: containers" {
# Create a layer.
run storage --debug=false create-layer

View File

@ -175,7 +175,7 @@ outer:
// We need to create a temporary layer so we can mount it and lookup the
// maximum IDs used.
clayer, err := rlstore.Create("", topLayer, nil, "", nil, layerOptions, false)
clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil)
if err != nil {
return 0, err
}