Avoid unnecessary manually-coded loops
Use the "slices", "maps" standard library packages, or other readily-available features. Should not change behavior. Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
parent
ee94e88cdd
commit
35b3e0f41b
9
check.go
9
check.go
|
|
@ -770,12 +770,9 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error {
|
|||
return d
|
||||
}
|
||||
isUnaccounted := func(errs []error) bool {
|
||||
for _, err := range errs {
|
||||
if errors.Is(err, ErrLayerUnaccounted) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.ContainsFunc(errs, func(err error) bool {
|
||||
return errors.Is(err, ErrLayerUnaccounted)
|
||||
})
|
||||
}
|
||||
sort.Slice(layersToDelete, func(i, j int) bool {
|
||||
// we've not heard of either of them, so remove them in the order the driver suggested
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package storage
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
|
@ -163,17 +164,17 @@ type containerStore struct {
|
|||
func copyContainer(c *Container) *Container {
|
||||
return &Container{
|
||||
ID: c.ID,
|
||||
Names: copyStringSlice(c.Names),
|
||||
Names: slices.Clone(c.Names),
|
||||
ImageID: c.ImageID,
|
||||
LayerID: c.LayerID,
|
||||
Metadata: c.Metadata,
|
||||
BigDataNames: copyStringSlice(c.BigDataNames),
|
||||
BigDataSizes: copyStringInt64Map(c.BigDataSizes),
|
||||
BigDataDigests: copyStringDigestMap(c.BigDataDigests),
|
||||
BigDataNames: slices.Clone(c.BigDataNames),
|
||||
BigDataSizes: maps.Clone(c.BigDataSizes),
|
||||
BigDataDigests: maps.Clone(c.BigDataDigests),
|
||||
Created: c.Created,
|
||||
UIDMap: copyIDMap(c.UIDMap),
|
||||
GIDMap: copyIDMap(c.GIDMap),
|
||||
Flags: copyStringInterfaceMap(c.Flags),
|
||||
Flags: maps.Clone(c.Flags),
|
||||
volatileStore: c.volatileStore,
|
||||
}
|
||||
}
|
||||
|
|
@ -937,14 +938,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
|
|||
if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
|
||||
save = true
|
||||
}
|
||||
addName := true
|
||||
for _, name := range c.BigDataNames {
|
||||
if name == key {
|
||||
addName = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if addName {
|
||||
if !slices.Contains(c.BigDataNames, key) {
|
||||
c.BigDataNames = append(c.BigDataNames, key)
|
||||
save = true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -205,17 +205,13 @@ func DriverTestCreateFromTemplate(t testing.TB, drivername string, driverOptions
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = checkChanges(noChanges, changes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.ElementsMatch(t, noChanges, changes)
|
||||
|
||||
changes, err = driver.Changes("ROFromTemplate", nil, "Snap3", nil, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = checkChanges(noChanges, changes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.ElementsMatch(t, noChanges, changes)
|
||||
|
||||
if err := checkFile(driver, "FromTemplate", "testfile.txt", content); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
@ -236,25 +232,19 @@ func DriverTestCreateFromTemplate(t testing.TB, drivername string, driverOptions
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = checkChanges(expectedChanges, changes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.ElementsMatch(t, expectedChanges, changes)
|
||||
|
||||
changes, err = driver.Changes("FromTemplate", nil, "Base3", nil, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = checkChanges(expectedChanges, changes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.ElementsMatch(t, expectedChanges, changes)
|
||||
|
||||
changes, err = driver.Changes("ROFromTemplate", nil, "Base3", nil, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = checkChanges(expectedChanges, changes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.ElementsMatch(t, expectedChanges, changes)
|
||||
|
||||
verifyBase(t, driver, "Base3", defaultPerms)
|
||||
}
|
||||
|
|
@ -417,10 +407,7 @@ func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string)
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = checkChanges(expectedChanges, changes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.ElementsMatch(t, expectedChanges, changes)
|
||||
}
|
||||
|
||||
func writeRandomFile(path string, size uint64) error {
|
||||
|
|
@ -513,10 +500,7 @@ func DriverTestEcho(t testing.TB, drivername string, driverOptions ...string) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = checkChanges(expectedChanges, changes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.ElementsMatch(t, expectedChanges, changes)
|
||||
|
||||
if err := driver.Create(second, base, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
@ -540,10 +524,7 @@ func DriverTestEcho(t testing.TB, drivername string, driverOptions ...string) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = checkChanges(expectedChanges, changes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.ElementsMatch(t, expectedChanges, changes)
|
||||
|
||||
if err = driver.Create(third, second, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
@ -573,10 +554,7 @@ func DriverTestEcho(t testing.TB, drivername string, driverOptions ...string) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err = checkChanges(expectedChanges, changes); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
require.ElementsMatch(t, expectedChanges, changes)
|
||||
|
||||
err = driver.Put(third)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import (
|
|||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
|
|
@ -270,33 +269,6 @@ func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64)
|
|||
return nil
|
||||
}
|
||||
|
||||
type changeList []archive.Change
|
||||
|
||||
func (c changeList) Less(i, j int) bool {
|
||||
if c[i].Path == c[j].Path {
|
||||
return c[i].Kind < c[j].Kind
|
||||
}
|
||||
return c[i].Path < c[j].Path
|
||||
}
|
||||
func (c changeList) Len() int { return len(c) }
|
||||
func (c changeList) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||
|
||||
func checkChanges(expected, actual []archive.Change) error {
|
||||
if len(expected) != len(actual) {
|
||||
return fmt.Errorf("unexpected number of changes, expected %d, got %d", len(expected), len(actual))
|
||||
}
|
||||
sort.Sort(changeList(expected))
|
||||
sort.Sort(changeList(actual))
|
||||
|
||||
for i := range expected {
|
||||
if expected[i] != actual[i] {
|
||||
return fmt.Errorf("unexpected change, expecting %v, got %v", expected[i], actual[i])
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error {
|
||||
root, err := drv.Get(layer, graphdriver.MountOpts{})
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -159,21 +159,7 @@ func init() {
|
|||
}
|
||||
|
||||
func hasMetacopyOption(opts []string) bool {
|
||||
for _, s := range opts {
|
||||
if s == "metacopy=on" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func hasVolatileOption(opts []string) bool {
|
||||
for _, s := range opts {
|
||||
if s == "volatile" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(opts, "metacopy=on")
|
||||
}
|
||||
|
||||
func getMountProgramFlagFile(path string) string {
|
||||
|
|
@ -1523,11 +1509,8 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
})
|
||||
}
|
||||
|
||||
for _, o := range optsList {
|
||||
if o == "ro" {
|
||||
if slices.Contains(optsList, "ro") {
|
||||
readWrite = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
lowers, err := os.ReadFile(path.Join(dir, lowerFile))
|
||||
|
|
@ -1726,7 +1709,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
optsList = append(optsList, "userxattr")
|
||||
}
|
||||
|
||||
if options.Volatile && !hasVolatileOption(optsList) {
|
||||
if options.Volatile && !slices.Contains(optsList, "volatile") {
|
||||
supported, err := d.getSupportsVolatile()
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
|
|
|||
49
images.go
49
images.go
|
|
@ -2,6 +2,7 @@ package storage
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
|
@ -182,18 +183,18 @@ func copyImage(i *Image) *Image {
|
|||
return &Image{
|
||||
ID: i.ID,
|
||||
Digest: i.Digest,
|
||||
Digests: copyDigestSlice(i.Digests),
|
||||
Names: copyStringSlice(i.Names),
|
||||
NamesHistory: copyStringSlice(i.NamesHistory),
|
||||
Digests: slices.Clone(i.Digests),
|
||||
Names: slices.Clone(i.Names),
|
||||
NamesHistory: slices.Clone(i.NamesHistory),
|
||||
TopLayer: i.TopLayer,
|
||||
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
|
||||
MappedTopLayers: slices.Clone(i.MappedTopLayers),
|
||||
Metadata: i.Metadata,
|
||||
BigDataNames: copyStringSlice(i.BigDataNames),
|
||||
BigDataSizes: copyStringInt64Map(i.BigDataSizes),
|
||||
BigDataDigests: copyStringDigestMap(i.BigDataDigests),
|
||||
BigDataNames: slices.Clone(i.BigDataNames),
|
||||
BigDataSizes: maps.Clone(i.BigDataSizes),
|
||||
BigDataDigests: maps.Clone(i.BigDataDigests),
|
||||
Created: i.Created,
|
||||
ReadOnly: i.ReadOnly,
|
||||
Flags: copyStringInterfaceMap(i.Flags),
|
||||
Flags: maps.Clone(i.Flags),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -872,7 +873,9 @@ func (r *imageStore) Delete(id string) error {
|
|||
delete(r.byname, name)
|
||||
}
|
||||
for _, digest := range image.Digests {
|
||||
prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
|
||||
prunedList := slices.DeleteFunc(r.bydigest[digest], func(i *Image) bool {
|
||||
return i == image
|
||||
})
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, digest)
|
||||
} else {
|
||||
|
|
@ -967,17 +970,6 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
|
|||
return copyStringSlice(image.BigDataNames), nil
|
||||
}
|
||||
|
||||
func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
|
||||
modified := make([]*Image, 0, len(slice))
|
||||
for _, v := range slice {
|
||||
if v == value {
|
||||
continue
|
||||
}
|
||||
modified = append(modified, v)
|
||||
}
|
||||
return modified
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
|
||||
if !r.lockfile.IsReadWrite() {
|
||||
|
|
@ -1027,21 +1019,16 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
|
|||
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
|
||||
save = true
|
||||
}
|
||||
addName := true
|
||||
for _, name := range image.BigDataNames {
|
||||
if name == key {
|
||||
addName = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if addName {
|
||||
if !slices.Contains(image.BigDataNames, key) {
|
||||
image.BigDataNames = append(image.BigDataNames, key)
|
||||
save = true
|
||||
}
|
||||
for _, oldDigest := range image.Digests {
|
||||
// remove the image from the list of images in the digest-based index
|
||||
if list, ok := r.bydigest[oldDigest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
prunedList := slices.DeleteFunc(list, func(i *Image) bool {
|
||||
return i == image
|
||||
})
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, oldDigest)
|
||||
} else {
|
||||
|
|
@ -1056,9 +1043,7 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
|
|||
// add the image to the list of images in the digest-based index which
|
||||
// corresponds to the new digest for this item, unless it's already there
|
||||
list := r.bydigest[newDigest]
|
||||
if len(list) == len(imageSliceWithoutValue(list, image)) {
|
||||
// the list isn't shortened by trying to prune this image from it,
|
||||
// so it's not in there yet
|
||||
if !slices.Contains(list, image) {
|
||||
r.bydigest[newDigest] = append(list, image)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
45
layers.go
45
layers.go
|
|
@ -5,6 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
|
@ -436,7 +437,7 @@ func layerLocation(l *Layer) layerLocations {
|
|||
func copyLayer(l *Layer) *Layer {
|
||||
return &Layer{
|
||||
ID: l.ID,
|
||||
Names: copyStringSlice(l.Names),
|
||||
Names: slices.Clone(l.Names),
|
||||
Parent: l.Parent,
|
||||
Metadata: l.Metadata,
|
||||
MountLabel: l.MountLabel,
|
||||
|
|
@ -451,8 +452,8 @@ func copyLayer(l *Layer) *Layer {
|
|||
CompressionType: l.CompressionType,
|
||||
ReadOnly: l.ReadOnly,
|
||||
volatileStore: l.volatileStore,
|
||||
BigDataNames: copyStringSlice(l.BigDataNames),
|
||||
Flags: copyStringInterfaceMap(l.Flags),
|
||||
BigDataNames: slices.Clone(l.BigDataNames),
|
||||
Flags: maps.Clone(l.Flags),
|
||||
UIDMap: copyIDMap(l.UIDMap),
|
||||
GIDMap: copyIDMap(l.GIDMap),
|
||||
UIDs: copyUint32Slice(l.UIDs),
|
||||
|
|
@ -1565,19 +1566,9 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
|
|||
// - r.layers[].MountPoint (directly and via loadMounts / saveMounts)
|
||||
// - r.bymount (via loadMounts / saveMounts)
|
||||
|
||||
// check whether options include ro option
|
||||
hasReadOnlyOpt := func(opts []string) bool {
|
||||
for _, item := range opts {
|
||||
if item == "ro" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// You are not allowed to mount layers from readonly stores if they
|
||||
// are not mounted read/only.
|
||||
if !r.lockfile.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
|
||||
if !r.lockfile.IsReadWrite() && !slices.Contains(options.Options, "ro") {
|
||||
return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
|
||||
}
|
||||
r.mountsLockfile.Lock()
|
||||
|
|
@ -1837,14 +1828,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error
|
|||
return fmt.Errorf("closing bigdata file for the layer: %w", err)
|
||||
}
|
||||
|
||||
addName := true
|
||||
for _, name := range layer.BigDataNames {
|
||||
if name == key {
|
||||
addName = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if addName {
|
||||
if !slices.Contains(layer.BigDataNames, key) {
|
||||
layer.BigDataNames = append(layer.BigDataNames, key)
|
||||
return r.saveFor(layer)
|
||||
}
|
||||
|
|
@ -1942,18 +1926,11 @@ func (r *layerStore) deleteInternal(id string) error {
|
|||
r.layers = slices.DeleteFunc(r.layers, func(candidate *Layer) bool {
|
||||
return candidate.ID == id
|
||||
})
|
||||
if mountLabel != "" {
|
||||
var found bool
|
||||
for _, candidate := range r.layers {
|
||||
if candidate.MountLabel == mountLabel {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if mountLabel != "" && !slices.ContainsFunc(r.layers, func(candidate *Layer) bool {
|
||||
return candidate.MountLabel == mountLabel
|
||||
}) {
|
||||
selinux.ReleaseLabel(mountLabel)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -2528,9 +2505,7 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
|
|||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
for k, v := range options.Flags {
|
||||
layer.Flags[k] = v
|
||||
}
|
||||
maps.Copy(layer.Flags, options.Flags)
|
||||
}
|
||||
if err = r.saveFor(layer); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
|
@ -319,9 +320,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
|||
// otherwise any previous delete/change is considered recursive
|
||||
oldChildren := make(map[string]*FileInfo)
|
||||
if oldInfo != nil && info.isDir() {
|
||||
for k, v := range oldInfo.children {
|
||||
oldChildren[k] = v
|
||||
}
|
||||
maps.Copy(oldChildren, oldInfo.children)
|
||||
}
|
||||
|
||||
for name, newChild := range info.children {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
|
@ -36,19 +37,7 @@ func TestGenerateEmptyFile(t *testing.T) {
|
|||
actualFiles = append(actualFiles, []string{hdr.Name, content})
|
||||
i++
|
||||
}
|
||||
if len(actualFiles) != len(expectedFiles) {
|
||||
t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
|
||||
}
|
||||
for i := 0; i < len(expectedFiles); i++ {
|
||||
actual := actualFiles[i]
|
||||
expected := expectedFiles[i]
|
||||
if actual[0] != expected[0] {
|
||||
t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
|
||||
}
|
||||
if actual[1] != expected[1] {
|
||||
t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
|
||||
}
|
||||
}
|
||||
assert.Equal(t, expectedFiles, actualFiles)
|
||||
}
|
||||
|
||||
func TestGenerateWithContent(t *testing.T) {
|
||||
|
|
@ -78,17 +67,5 @@ func TestGenerateWithContent(t *testing.T) {
|
|||
actualFiles = append(actualFiles, []string{hdr.Name, content})
|
||||
i++
|
||||
}
|
||||
if len(actualFiles) != len(expectedFiles) {
|
||||
t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
|
||||
}
|
||||
for i := 0; i < len(expectedFiles); i++ {
|
||||
actual := actualFiles[i]
|
||||
expected := expectedFiles[i]
|
||||
if actual[0] != expected[0] {
|
||||
t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
|
||||
}
|
||||
if actual[1] != expected[1] {
|
||||
t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
|
||||
}
|
||||
}
|
||||
assert.Equal(t, expectedFiles, actualFiles)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ package idtools
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
|
@ -192,9 +193,7 @@ func readTree(base, root string) (map[string]node, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for path, nodeinfo := range subtree {
|
||||
tree[path] = nodeinfo
|
||||
}
|
||||
maps.Copy(tree, subtree)
|
||||
}
|
||||
}
|
||||
return tree, nil
|
||||
|
|
|
|||
|
|
@ -86,6 +86,7 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -356,14 +357,7 @@ func sortFlags(flags map[string]*Flag) []*Flag {
|
|||
continue
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, name := range list {
|
||||
if name == fName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if !slices.Contains(list, fName) {
|
||||
list = append(list, fName)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,10 @@ package mount
|
|||
import (
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMountOptionsParsing(t *testing.T) {
|
||||
|
|
@ -151,14 +154,9 @@ func TestGetMounts(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
root := false
|
||||
for _, entry := range mounts {
|
||||
if entry.Mountpoint == "/" {
|
||||
root = true
|
||||
}
|
||||
}
|
||||
|
||||
if !root {
|
||||
if !slices.ContainsFunc(mounts, func(entry *Info) bool {
|
||||
return entry.Mountpoint == "/"
|
||||
}) {
|
||||
t.Fatal("/ should be mounted at least")
|
||||
}
|
||||
}
|
||||
|
|
@ -170,14 +168,7 @@ func TestMergeTmpfsOptions(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(expected) != len(merged) {
|
||||
t.Fatalf("Expected %s got %s", expected, merged)
|
||||
}
|
||||
for index := range merged {
|
||||
if merged[index] != expected[index] {
|
||||
t.Fatalf("Expected %s for the %dth option, got %s", expected, index, merged)
|
||||
}
|
||||
}
|
||||
require.Equal(t, expected, merged)
|
||||
|
||||
options = []string{"noatime", "ro", "size=10k", "atime", "rw", "rprivate", "size=1024k", "slave", "size"}
|
||||
_, err = MergeTmpfsOptions(options)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package truncindex
|
|||
|
||||
import (
|
||||
"math/rand/v2"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
|
@ -114,13 +115,9 @@ func assertIndexIterate(t *testing.T) {
|
|||
index := NewTruncIndex(ids)
|
||||
|
||||
index.Iterate(func(targetId string) {
|
||||
for _, id := range ids {
|
||||
if targetId == id {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !slices.Contains(ids, targetId) {
|
||||
t.Fatalf("An unknown ID '%s'", targetId)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
39
store.go
39
store.go
|
|
@ -6,6 +6,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
|
@ -940,9 +941,7 @@ func (s *store) GraphOptions() []string {
|
|||
|
||||
func (s *store) PullOptions() map[string]string {
|
||||
cp := make(map[string]string, len(s.pullOptions))
|
||||
for k, v := range s.pullOptions {
|
||||
cp[k] = v
|
||||
}
|
||||
maps.Copy(cp, s.pullOptions)
|
||||
return cp
|
||||
}
|
||||
|
||||
|
|
@ -1465,7 +1464,7 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
|
|||
if lOptions != nil {
|
||||
options = *lOptions
|
||||
options.BigData = copyLayerBigDataOptionSlice(lOptions.BigData)
|
||||
options.Flags = copyStringInterfaceMap(lOptions.Flags)
|
||||
options.Flags = maps.Clone(lOptions.Flags)
|
||||
}
|
||||
if options.HostUIDMapping {
|
||||
options.UIDMap = nil
|
||||
|
|
@ -1606,7 +1605,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
|
|||
CreationDate: i.Created,
|
||||
Digest: i.Digest,
|
||||
Digests: copyDigestSlice(i.Digests),
|
||||
NamesHistory: copyStringSlice(i.NamesHistory),
|
||||
NamesHistory: slices.Clone(i.NamesHistory),
|
||||
}
|
||||
for _, key := range i.BigDataNames {
|
||||
data, err := store.BigData(id, key)
|
||||
|
|
@ -1637,18 +1636,16 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
|
|||
if iOptions.Digest != "" {
|
||||
options.Digest = iOptions.Digest
|
||||
}
|
||||
options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...)
|
||||
options.Digests = append(options.Digests, iOptions.Digests...)
|
||||
if iOptions.Metadata != "" {
|
||||
options.Metadata = iOptions.Metadata
|
||||
}
|
||||
options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...)
|
||||
options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...)
|
||||
options.NamesHistory = append(options.NamesHistory, iOptions.NamesHistory...)
|
||||
if options.Flags == nil {
|
||||
options.Flags = make(map[string]interface{})
|
||||
}
|
||||
for k, v := range iOptions.Flags {
|
||||
options.Flags[k] = v
|
||||
}
|
||||
maps.Copy(options.Flags, iOptions.Flags)
|
||||
}
|
||||
|
||||
if options.CreationDate.IsZero() {
|
||||
|
|
@ -1783,7 +1780,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
|||
options.IDMappingOptions.UIDMap = copyIDMap(cOptions.IDMappingOptions.UIDMap)
|
||||
options.IDMappingOptions.GIDMap = copyIDMap(cOptions.IDMappingOptions.GIDMap)
|
||||
options.LabelOpts = copyStringSlice(cOptions.LabelOpts)
|
||||
options.Flags = copyStringInterfaceMap(cOptions.Flags)
|
||||
options.Flags = maps.Clone(cOptions.Flags)
|
||||
options.MountOpts = copyStringSlice(cOptions.MountOpts)
|
||||
options.StorageOpt = copyStringStringMap(cOptions.StorageOpt)
|
||||
options.BigData = copyContainerBigDataOptionSlice(cOptions.BigData)
|
||||
|
|
@ -3684,22 +3681,6 @@ func copyStringSlice(slice []string) []string {
|
|||
return ret
|
||||
}
|
||||
|
||||
func copyStringInt64Map(m map[string]int64) map[string]int64 {
|
||||
ret := make(map[string]int64, len(m))
|
||||
for k, v := range m {
|
||||
ret[k] = v
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
|
||||
ret := make(map[string]digest.Digest, len(m))
|
||||
for k, v := range m {
|
||||
ret[k] = v
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func copyStringStringMap(m map[string]string) map[string]string {
|
||||
ret := make(map[string]string, len(m))
|
||||
for k, v := range m {
|
||||
|
|
@ -3801,12 +3782,10 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro
|
|||
return nil, err
|
||||
}
|
||||
key = strings.ToLower(key)
|
||||
for _, m := range mountOpts {
|
||||
if m == key {
|
||||
if slices.Contains(mountOpts, key) {
|
||||
return strings.Split(val, ","), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
10
utils.go
10
utils.go
|
|
@ -42,15 +42,7 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO
|
|||
// remove given names from old names
|
||||
result = make([]string, 0, len(oldNames))
|
||||
for _, name := range oldNames {
|
||||
// only keep names in final result which do not intersect with input names
|
||||
// basically `result = oldNames - opParameters`
|
||||
nameShouldBeRemoved := false
|
||||
for _, opName := range opParameters {
|
||||
if name == opName {
|
||||
nameShouldBeRemoved = true
|
||||
}
|
||||
}
|
||||
if !nameShouldBeRemoved {
|
||||
if !slices.Contains(opParameters, name) {
|
||||
result = append(result, name)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue