mirror of https://github.com/containers/podman.git
Merge pull request #23259 from giuseppe/vendor-storage-ab74785ce9e8
vendor: bump c/storage
This commit is contained in:
commit
42fa78b225
2
go.mod
2
go.mod
|
@ -21,7 +21,7 @@ require (
|
|||
github.com/containers/libhvee v0.7.1
|
||||
github.com/containers/ocicrypt v1.2.0
|
||||
github.com/containers/psgo v1.9.0
|
||||
github.com/containers/storage v1.54.1-0.20240627145511-52b643e1ff51
|
||||
github.com/containers/storage v1.54.1-0.20240712121534-ab74785ce9e8
|
||||
github.com/containers/winquit v1.1.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
|
||||
github.com/coreos/stream-metadata-go v0.4.4
|
||||
|
|
4
go.sum
4
go.sum
|
@ -97,8 +97,8 @@ github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sir
|
|||
github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
|
||||
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
|
||||
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
|
||||
github.com/containers/storage v1.54.1-0.20240627145511-52b643e1ff51 h1:0ipwtt1iNX4gSje0iQHHtnvqnU45uUyGO1LVGBkpoSE=
|
||||
github.com/containers/storage v1.54.1-0.20240627145511-52b643e1ff51/go.mod h1:y1CGloHDYq9uK3Og/zLkrJ8vpSuFwNaIWOyB8IX076w=
|
||||
github.com/containers/storage v1.54.1-0.20240712121534-ab74785ce9e8 h1:Y1kUvxQhjtHIvjVivFqnis9QOWCMPXeehMVb50si/DE=
|
||||
github.com/containers/storage v1.54.1-0.20240712121534-ab74785ce9e8/go.mod h1:EyuSB0B1ddqXN0pXGNKPrtxzma80jhRCeVl7/J/JAhE=
|
||||
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
|
||||
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
|
||||
github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
---
|
||||
run:
|
||||
concurrency: 6
|
||||
deadline: 5m
|
||||
skip-dirs-use-default: true
|
||||
timeout: 5m
|
||||
linters:
|
||||
enable:
|
||||
- gofumpt
|
||||
disable:
|
||||
- errcheck
|
||||
- staticcheck
|
||||
|
|
|
@ -304,7 +304,14 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) {
|
|||
archiveErr = err
|
||||
}
|
||||
// consume any trailer after the EOF marker
|
||||
io.Copy(io.Discard, diffReader)
|
||||
if _, err := io.Copy(io.Discard, diffReader); err != nil {
|
||||
err = fmt.Errorf("layer %s: consume any trailer after the EOF marker: %w", layerID, err)
|
||||
if isReadWrite {
|
||||
report.Layers[layerID] = append(report.Layers[layerID], err)
|
||||
} else {
|
||||
report.ROLayers[layerID] = append(report.ROLayers[layerID], err)
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}(id, reader)
|
||||
wg.Wait()
|
||||
|
@ -366,7 +373,7 @@ func (s *store) Check(options *CheckOptions) (CheckReport, error) {
|
|||
if options.LayerMountable {
|
||||
func() {
|
||||
// Mount the layer.
|
||||
mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel})
|
||||
mountPoint, err := s.graphDriver.Get(id, drivers.MountOpts{MountLabel: layer.MountLabel, Options: []string{"ro"}})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("%slayer %s: %w", readWriteDesc, id, err)
|
||||
if isReadWrite {
|
||||
|
@ -955,6 +962,9 @@ func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int6
|
|||
mtime: mtime,
|
||||
}
|
||||
}
|
||||
case tar.TypeXGlobalHeader:
|
||||
// ignore, since even though it looks like a valid pathname, it doesn't end
|
||||
// up on the filesystem
|
||||
default:
|
||||
// treat these as TypeReg items
|
||||
delete(c.directory, components[0])
|
||||
|
@ -966,9 +976,6 @@ func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int6
|
|||
mode: mode,
|
||||
mtime: mtime,
|
||||
}
|
||||
case tar.TypeXGlobalHeader:
|
||||
// ignore, since even though it looks like a valid pathname, it doesn't end
|
||||
// up on the filesystem
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import (
|
|||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -183,13 +182,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
|||
}
|
||||
|
||||
// Return a nil error if the kernel supports aufs
|
||||
// We cannot modprobe because inside dind modprobe fails
|
||||
// to run
|
||||
func supportsAufs() error {
|
||||
// We can try to modprobe aufs first before looking at
|
||||
// proc/filesystems for when aufs is supported
|
||||
exec.Command("modprobe", "aufs").Run()
|
||||
|
||||
if unshare.IsRootless() {
|
||||
return ErrAufsNested
|
||||
}
|
||||
|
@ -347,7 +340,9 @@ func (a *Driver) createDirsFor(id, parent string) error {
|
|||
// Remove will unmount and remove the given id.
|
||||
func (a *Driver) Remove(id string) error {
|
||||
a.locker.Lock(id)
|
||||
defer a.locker.Unlock(id)
|
||||
defer func() {
|
||||
_ = a.locker.Unlock(id)
|
||||
}()
|
||||
a.pathCacheLock.Lock()
|
||||
mountpoint, exists := a.pathCache[id]
|
||||
a.pathCacheLock.Unlock()
|
||||
|
@ -438,7 +433,10 @@ func atomicRemove(source string) error {
|
|||
// This will mount the dir at its given path
|
||||
func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
a.locker.Lock(id)
|
||||
defer a.locker.Unlock(id)
|
||||
defer func() {
|
||||
_ = a.locker.Unlock(id)
|
||||
}()
|
||||
|
||||
parents, err := a.getParentLayerPaths(id)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return "", err
|
||||
|
@ -475,7 +473,10 @@ func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
|||
// Put unmounts and updates list of active mounts.
|
||||
func (a *Driver) Put(id string) error {
|
||||
a.locker.Lock(id)
|
||||
defer a.locker.Unlock(id)
|
||||
defer func() {
|
||||
_ = a.locker.Unlock(id)
|
||||
}()
|
||||
|
||||
a.pathCacheLock.Lock()
|
||||
m, exists := a.pathCache[id]
|
||||
if !exists {
|
||||
|
@ -498,7 +499,9 @@ func (a *Driver) Put(id string) error {
|
|||
// For AUFS, it queries the mountpoint for this ID.
|
||||
func (a *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
||||
a.locker.Lock(id)
|
||||
defer a.locker.Unlock(id)
|
||||
defer func() {
|
||||
_ = a.locker.Unlock(id)
|
||||
}()
|
||||
a.pathCacheLock.Lock()
|
||||
m, exists := a.pathCache[id]
|
||||
if !exists {
|
||||
|
@ -681,7 +684,9 @@ func (a *Driver) Cleanup() error {
|
|||
func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
Unmount(target)
|
||||
if err1 := Unmount(target); err1 != nil {
|
||||
logrus.Warnf("Unmount %q: %v", target, err1)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -193,6 +193,7 @@ type DriverWithDifferOutput struct {
|
|||
UIDs []uint32
|
||||
GIDs []uint32
|
||||
UncompressedDigest digest.Digest
|
||||
CompressedDigest digest.Digest
|
||||
Metadata string
|
||||
BigData map[string][]byte
|
||||
TarSplit []byte
|
||||
|
|
|
@ -263,7 +263,11 @@ func supportsIdmappedLowerLayers(home string) (bool, error) {
|
|||
if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil {
|
||||
return false, fmt.Errorf("create mapped mount: %w", err)
|
||||
}
|
||||
defer unix.Unmount(lowerMappedDir, unix.MNT_DETACH)
|
||||
defer func() {
|
||||
if err := unix.Unmount(lowerMappedDir, unix.MNT_DETACH); err != nil {
|
||||
logrus.Warnf("Unmount %q: %v", lowerMappedDir, err)
|
||||
}
|
||||
}()
|
||||
|
||||
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerMappedDir, upperDir, workDir)
|
||||
flags := uintptr(0)
|
||||
|
|
|
@ -688,12 +688,8 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
|
|||
}
|
||||
|
||||
func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) {
|
||||
// We can try to modprobe overlay first
|
||||
|
||||
selinuxLabelTest := selinux.PrivContainerMountLabel()
|
||||
|
||||
exec.Command("modprobe", "overlay").Run()
|
||||
|
||||
logLevel := logrus.ErrorLevel
|
||||
if unshare.IsRootless() {
|
||||
logLevel = logrus.DebugLevel
|
||||
|
@ -821,7 +817,9 @@ func (d *Driver) useNaiveDiff() bool {
|
|||
logrus.Info(nativeDiffCacheText)
|
||||
useNaiveDiffOnly = true
|
||||
}
|
||||
cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText)
|
||||
if err := cachedFeatureRecord(d.runhome, feature, !useNaiveDiffOnly, nativeDiffCacheText); err != nil {
|
||||
logrus.Warnf("Recording overlay native-diff support status: %v", err)
|
||||
}
|
||||
})
|
||||
return useNaiveDiffOnly
|
||||
}
|
||||
|
@ -1553,7 +1551,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
composefsMounts := []string{}
|
||||
defer func() {
|
||||
for _, m := range composefsMounts {
|
||||
defer unix.Unmount(m, unix.MNT_DETACH)
|
||||
defer func(m string) {
|
||||
if err := unix.Unmount(m, unix.MNT_DETACH); err != nil {
|
||||
logrus.Warnf("Unmount %q: %v", m, err)
|
||||
}
|
||||
}(m)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -1657,7 +1659,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
skipIDMappingLayers[composefsMount] = composefsMount
|
||||
// overlay takes a reference on the mount, so it is safe to unmount
|
||||
// the mapped idmounts as soon as the final overlay file system is mounted.
|
||||
defer unix.Unmount(composefsMount, unix.MNT_DETACH)
|
||||
defer func() {
|
||||
if err := unix.Unmount(composefsMount, unix.MNT_DETACH); err != nil {
|
||||
logrus.Warnf("Unmount %q: %v", composefsMount, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
absLowers = append(absLowers, composefsMount)
|
||||
continue
|
||||
|
@ -1764,7 +1770,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
|
||||
// overlay takes a reference on the mount, so it is safe to unmount
|
||||
// the mapped idmounts as soon as the final overlay file system is mounted.
|
||||
defer unix.Unmount(root, unix.MNT_DETACH)
|
||||
defer func() {
|
||||
if err := unix.Unmount(root, unix.MNT_DETACH); err != nil {
|
||||
logrus.Warnf("Unmount %q: %v", root, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// relative path to the layer through the id mapped mount
|
||||
|
@ -2085,7 +2095,9 @@ func (d *Driver) DiffGetter(id string) (_ graphdriver.FileGetCloser, Err error)
|
|||
if Err != nil {
|
||||
for _, f := range composefsMounts {
|
||||
f.Close()
|
||||
unix.Rmdir(f.Name())
|
||||
if err := unix.Rmdir(f.Name()); err != nil && !os.IsNotExist(err) {
|
||||
logrus.Warnf("Failed to remove %s: %v", f.Name(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -193,7 +193,9 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
|
|||
}
|
||||
labelOpts := []string{"level:s0"}
|
||||
if _, mountLabel, err := label.InitLabels(labelOpts); err == nil {
|
||||
label.SetFileLabel(dir, mountLabel)
|
||||
if err := label.SetFileLabel(dir, mountLabel); err != nil {
|
||||
logrus.Debugf("Set %s label to %q file ended with error: %v", mountLabel, dir, err)
|
||||
}
|
||||
}
|
||||
if parent != "" {
|
||||
parentDir, err := d.Get(parent, graphdriver.MountOpts{})
|
||||
|
|
|
@ -240,7 +240,9 @@ func (d *Driver) cloneFilesystem(name, parentName string) error {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
snapshot.Destroy(zfs.DestroyDeferDeletion)
|
||||
if err1 := snapshot.Destroy(zfs.DestroyDeferDeletion); err1 != nil {
|
||||
logrus.Warnf("Destroy zfs.DestroyDeferDeletion: %v", err1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return snapshot.Destroy(zfs.DestroyDeferDeletion)
|
||||
|
|
|
@ -2529,7 +2529,9 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
|
|||
layer.GIDs = diffOutput.GIDs
|
||||
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, diffOutput.UncompressedDigest, layer.ID)
|
||||
layer.UncompressedDigest = diffOutput.UncompressedDigest
|
||||
updateDigestMap(&r.bytocsum, diffOutput.TOCDigest, diffOutput.TOCDigest, layer.ID)
|
||||
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, diffOutput.CompressedDigest, layer.ID)
|
||||
layer.CompressedDigest = diffOutput.CompressedDigest
|
||||
updateDigestMap(&r.bytocsum, layer.TOCDigest, diffOutput.TOCDigest, layer.ID)
|
||||
layer.TOCDigest = diffOutput.TOCDigest
|
||||
layer.UncompressedSize = diffOutput.Size
|
||||
layer.Metadata = diffOutput.Metadata
|
||||
|
|
|
@ -10,9 +10,11 @@ func invokeUnpack(decompressedArchive io.Reader,
|
|||
dest string,
|
||||
options *archive.TarOptions, root string,
|
||||
) error {
|
||||
_ = root // Restricting the operation to this root is not implemented on macOS
|
||||
return archive.Unpack(decompressedArchive, dest, options)
|
||||
}
|
||||
|
||||
func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
|
||||
_ = root // Restricting the operation to this root is not implemented on macOS
|
||||
return archive.TarWithOptions(srcPath, options)
|
||||
}
|
||||
|
|
|
@ -107,12 +107,15 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
|
|||
w.Close()
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
errorOut := fmt.Errorf("unpacking failed (error: %w; output: %s)", err, output)
|
||||
// when `xz -d -c -q | storage-untar ...` failed on storage-untar side,
|
||||
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
|
||||
// pending on write pipe forever
|
||||
io.Copy(io.Discard, decompressedArchive)
|
||||
if _, err := io.Copy(io.Discard, decompressedArchive); err != nil {
|
||||
return fmt.Errorf("%w\nexhausting input failed (error: %w)", errorOut, err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("processing tar file(%s): %w", output, err)
|
||||
return errorOut
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -40,11 +40,13 @@ func applyLayer() {
|
|||
}
|
||||
|
||||
// We need to be able to set any perms
|
||||
oldmask, err := system.Umask(0)
|
||||
defer system.Umask(oldmask)
|
||||
oldMask, err := system.Umask(0)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_, _ = system.Umask(oldMask) // Ignore err. This can only fail with ErrNotSupportedPlatform, in which case we would have failed above.
|
||||
}()
|
||||
|
||||
if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
|
||||
fatal(err)
|
||||
|
|
|
@ -80,7 +80,9 @@ var (
|
|||
func (c *layer) release() {
|
||||
runtime.SetFinalizer(c, nil)
|
||||
if c.mmapBuffer != nil {
|
||||
unix.Munmap(c.mmapBuffer)
|
||||
if err := unix.Munmap(c.mmapBuffer); err != nil {
|
||||
logrus.Warnf("Error Munmap: layer %q: %v", c.id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -192,7 +194,9 @@ func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) {
|
|||
}
|
||||
defer func() {
|
||||
if errRet != nil && mmapBuffer != nil {
|
||||
unix.Munmap(mmapBuffer)
|
||||
if err := unix.Munmap(mmapBuffer); err != nil {
|
||||
logrus.Warnf("Error Munmap: layer %q: %v", layerID, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
cacheFile, err := readCacheFileFromMemory(buffer)
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"encoding/base64"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
|
@ -233,6 +234,14 @@ func newTarSplitData(level int) (*tarSplitData, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// timeIfNotZero returns a pointer to the time.Time if it is not zero, otherwise it returns nil.
|
||||
func timeIfNotZero(t *time.Time) *time.Time {
|
||||
if t == nil || t.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
|
||||
// total written so far. Used to retrieve partial offsets in the file
|
||||
dest := ioutils.NewWriteCounter(destFile)
|
||||
|
@ -392,9 +401,9 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
|||
Size: hdr.Size,
|
||||
UID: hdr.Uid,
|
||||
GID: hdr.Gid,
|
||||
ModTime: &hdr.ModTime,
|
||||
AccessTime: &hdr.AccessTime,
|
||||
ChangeTime: &hdr.ChangeTime,
|
||||
ModTime: timeIfNotZero(&hdr.ModTime),
|
||||
AccessTime: timeIfNotZero(&hdr.AccessTime),
|
||||
ChangeTime: timeIfNotZero(&hdr.ChangeTime),
|
||||
Devmajor: hdr.Devmajor,
|
||||
Devminor: hdr.Devminor,
|
||||
Xattrs: xattrs,
|
||||
|
|
|
@ -76,7 +76,9 @@ func doHardLink(dirfd, srcFd int, destFile string) error {
|
|||
|
||||
// if the destination exists, unlink it first and try again
|
||||
if err != nil && os.IsExist(err) {
|
||||
unix.Unlinkat(destDirFd, destBase, 0)
|
||||
if err := unix.Unlinkat(destDirFd, destBase, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
return doLink()
|
||||
}
|
||||
return err
|
||||
|
@ -503,7 +505,7 @@ func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *arch
|
|||
return setFileAttrs(dirfd, newFile, mode, metadata, options, false)
|
||||
}
|
||||
|
||||
func safeSymlink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error {
|
||||
func safeSymlink(dirfd int, metadata *fileMetadata) error {
|
||||
destDir, destBase := filepath.Split(metadata.Name)
|
||||
destDirFd := dirfd
|
||||
if destDir != "" && destDir != "." {
|
||||
|
|
|
@ -162,20 +162,20 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err)
|
||||
}
|
||||
return makeZstdChunkedDiffer(ctx, store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
|
||||
return makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
|
||||
}
|
||||
if hasEstargzTOC {
|
||||
estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err)
|
||||
}
|
||||
return makeEstargzChunkedDiffer(ctx, store, blobSize, estargzTOCDigest, iss, pullOptions)
|
||||
return makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
|
||||
}
|
||||
|
||||
return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, pullOptions)
|
||||
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
|
||||
}
|
||||
|
||||
func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
if !parseBooleanPullOption(pullOptions, "convert_images", false) {
|
||||
return nil, errors.New("convert_images not configured")
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige
|
|||
}, nil
|
||||
}
|
||||
|
||||
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
|
@ -223,7 +223,7 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
|||
}, nil
|
||||
}
|
||||
|
||||
func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
|
||||
manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
|
@ -557,7 +557,11 @@ func hashHole(h hash.Hash, size int64, copyBuffer []byte) error {
|
|||
func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileType, destFile *destinationFile, size int64) error {
|
||||
switch compression {
|
||||
case fileTypeZstdChunked:
|
||||
defer c.zstdReader.Reset(nil)
|
||||
defer func() {
|
||||
if err := c.zstdReader.Reset(nil); err != nil {
|
||||
logrus.Warnf("release of references to the previous zstd reader failed: %v", err)
|
||||
}
|
||||
}()
|
||||
if _, err := io.CopyBuffer(destFile.to, io.LimitReader(c.zstdReader, size), c.copyBuffer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -703,7 +707,7 @@ func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
|
||||
func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
|
||||
var destFile *destinationFile
|
||||
|
||||
filesToClose := make(chan *destinationFile, 3)
|
||||
|
@ -917,7 +921,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
|
|||
return newMissingParts
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error {
|
||||
func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dirfd int, missingParts []missingPart, options *archive.TarOptions) error {
|
||||
var chunksToRequest []ImageSourceChunk
|
||||
|
||||
calculateChunksToRequest := func() {
|
||||
|
@ -956,7 +960,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st
|
|||
return err
|
||||
}
|
||||
|
||||
if err := c.storeMissingFiles(streams, errs, dest, dirfd, missingParts, options); err != nil {
|
||||
if err := c.storeMissingFiles(streams, errs, dirfd, missingParts, options); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -1122,6 +1126,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
// stream to use for reading the zstd:chunked or Estargz file.
|
||||
stream := c.stream
|
||||
|
||||
var compressedDigest digest.Digest
|
||||
var uncompressedDigest digest.Digest
|
||||
var convertedBlobSize int64
|
||||
|
||||
|
@ -1138,7 +1143,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}()
|
||||
|
||||
// calculate the checksum before accessing the file.
|
||||
compressedDigest, err := c.copyAllBlobToFile(blobFile)
|
||||
compressedDigest, err = c.copyAllBlobToFile(blobFile)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
|
@ -1224,6 +1229,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
},
|
||||
TOCDigest: c.tocDigest,
|
||||
UncompressedDigest: uncompressedDigest,
|
||||
CompressedDigest: compressedDigest,
|
||||
}
|
||||
|
||||
// When the hard links deduplication is used, file attributes are ignored because setting them
|
||||
|
@ -1283,7 +1289,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
for _, e := range mergedEntries {
|
||||
d := e.Name[0:2]
|
||||
if _, found := createdDirs[d]; !found {
|
||||
unix.Mkdirat(dirfd, d, 0o755)
|
||||
if err := unix.Mkdirat(dirfd, d, 0o755); err != nil {
|
||||
return output, &fs.PathError{Op: "mkdirat", Path: d, Err: err}
|
||||
}
|
||||
createdDirs[d] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -1429,7 +1437,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
continue
|
||||
|
||||
case tar.TypeSymlink:
|
||||
if err := safeSymlink(dirfd, mode, r, options); err != nil {
|
||||
if err := safeSymlink(dirfd, r); err != nil {
|
||||
return output, err
|
||||
}
|
||||
continue
|
||||
|
@ -1531,7 +1539,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
}
|
||||
// There are some missing files. Prepare a multirange request for the missing chunks.
|
||||
if len(missingParts) > 0 {
|
||||
if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil {
|
||||
if err := c.retrieveMissingFiles(stream, dirfd, missingParts, options); err != nil {
|
||||
return output, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -61,12 +62,20 @@ func CreateUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int,
|
|||
_ = unix.Prctl(unix.PR_SET_PDEATHSIG, uintptr(unix.SIGKILL), 0, 0, 0)
|
||||
// just wait for the SIGKILL
|
||||
for {
|
||||
syscall.Pause()
|
||||
_ = syscall.Pause()
|
||||
}
|
||||
}
|
||||
cleanupFunc := func() {
|
||||
unix.Kill(int(pid), unix.SIGKILL)
|
||||
_, _ = unix.Wait4(int(pid), nil, 0, nil)
|
||||
err1 := unix.Kill(int(pid), unix.SIGKILL)
|
||||
if err1 != nil && err1 != syscall.ESRCH {
|
||||
logrus.Warnf("kill process pid: %d with SIGKILL ended with error: %v", int(pid), err1)
|
||||
}
|
||||
if err1 != nil {
|
||||
return
|
||||
}
|
||||
if _, err := unix.Wait4(int(pid), nil, 0, nil); err != nil {
|
||||
logrus.Warnf("wait4 pid: %d ended with error: %v", int(pid), err)
|
||||
}
|
||||
}
|
||||
writeMappings := func(fname string, idmap []idtools.IDMap) error {
|
||||
mappings := ""
|
||||
|
|
|
@ -150,10 +150,13 @@ func (w *atomicFileWriter) complete(commit bool) (retErr error) {
|
|||
}
|
||||
|
||||
defer func() {
|
||||
w.closeTempFile()
|
||||
err := w.closeTempFile()
|
||||
if retErr != nil || w.writeErr != nil {
|
||||
os.Remove(w.f.Name())
|
||||
}
|
||||
if retErr == nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
if commit {
|
||||
|
|
|
@ -415,7 +415,9 @@ func (l *LockFile) lock(lType lockType) {
|
|||
// Optimization: only use the (expensive) syscall when
|
||||
// the counter is 0. In this case, we're either the first
|
||||
// reader lock or a writer lock.
|
||||
lockHandle(l.fd, lType, false)
|
||||
if err := lockHandle(l.fd, lType, false); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
l.lockType = lType
|
||||
l.locked = true
|
||||
|
|
|
@ -590,7 +590,12 @@ func MaybeReexecUsingUserNamespace(evenForRoot bool) {
|
|||
cmd.Hook = func(int) error {
|
||||
go func() {
|
||||
for receivedSignal := range interrupted {
|
||||
cmd.Cmd.Process.Signal(receivedSignal)
|
||||
if err := cmd.Cmd.Process.Signal(receivedSignal); err != nil {
|
||||
logrus.Warnf(
|
||||
"Failed to send a signal '%d' to the Process (PID: %d): %v",
|
||||
receivedSignal, cmd.Cmd.Process.Pid, err,
|
||||
)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
|
|
|
@ -1509,23 +1509,17 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
|
|||
gidMap = s.gidMap
|
||||
}
|
||||
}
|
||||
layerOptions := LayerOptions{
|
||||
OriginalDigest: options.OriginalDigest,
|
||||
OriginalSize: options.OriginalSize,
|
||||
UncompressedDigest: options.UncompressedDigest,
|
||||
Flags: options.Flags,
|
||||
}
|
||||
if s.canUseShifting(uidMap, gidMap) {
|
||||
layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}
|
||||
options.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}
|
||||
} else {
|
||||
layerOptions.IDMappingOptions = types.IDMappingOptions{
|
||||
options.IDMappingOptions = types.IDMappingOptions{
|
||||
HostUIDMapping: options.HostUIDMapping,
|
||||
HostGIDMapping: options.HostGIDMapping,
|
||||
UIDMap: copyIDMap(uidMap),
|
||||
GIDMap: copyIDMap(gidMap),
|
||||
}
|
||||
}
|
||||
return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff, slo)
|
||||
return rlstore.create(id, parentLayer, names, mountLabel, nil, &options, writeable, diff, slo)
|
||||
}
|
||||
|
||||
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
|
||||
|
|
|
@ -352,7 +352,7 @@ func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) {
|
|||
}
|
||||
|
||||
if opts.GraphDriverName == "" {
|
||||
if canUseRootlessOverlay(opts.GraphRoot, opts.RunRoot) {
|
||||
if canUseRootlessOverlay() {
|
||||
opts.GraphDriverName = overlayDriver
|
||||
} else {
|
||||
opts.GraphDriverName = "vfs"
|
||||
|
|
|
@ -16,6 +16,6 @@ var (
|
|||
)
|
||||
|
||||
// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
|
||||
func canUseRootlessOverlay(home, runhome string) bool {
|
||||
func canUseRootlessOverlay() bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -11,6 +11,6 @@ const (
|
|||
var defaultOverrideConfigFile = "/etc/containers/storage.conf"
|
||||
|
||||
// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
|
||||
func canUseRootlessOverlay(home, runhome string) bool {
|
||||
func canUseRootlessOverlay() bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ var (
|
|||
)
|
||||
|
||||
// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
|
||||
func canUseRootlessOverlay(home, runhome string) bool {
|
||||
func canUseRootlessOverlay() bool {
|
||||
// we check first for fuse-overlayfs since it is cheaper.
|
||||
if path, _ := exec.LookPath("fuse-overlayfs"); path != "" {
|
||||
return true
|
||||
|
|
|
@ -14,6 +14,6 @@ var (
|
|||
)
|
||||
|
||||
// canUseRootlessOverlay returns true if the overlay driver can be used for rootless containers
|
||||
func canUseRootlessOverlay(home, runhome string) bool {
|
||||
func canUseRootlessOverlay() bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -66,7 +66,10 @@ func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
|
|||
return
|
||||
}
|
||||
|
||||
ReloadConfigurationFile(configFile, storeOptions)
|
||||
if err := ReloadConfigurationFile(configFile, storeOptions); err != nil {
|
||||
logrus.Warningf("Failed to reload %q %v\n", configFile, err)
|
||||
return
|
||||
}
|
||||
|
||||
prevReloadConfig.storeOptions = storeOptions
|
||||
prevReloadConfig.mod = mtime
|
||||
|
|
|
@ -355,7 +355,7 @@ github.com/containers/psgo/internal/dev
|
|||
github.com/containers/psgo/internal/host
|
||||
github.com/containers/psgo/internal/proc
|
||||
github.com/containers/psgo/internal/process
|
||||
# github.com/containers/storage v1.54.1-0.20240627145511-52b643e1ff51
|
||||
# github.com/containers/storage v1.54.1-0.20240712121534-ab74785ce9e8
|
||||
## explicit; go 1.21
|
||||
github.com/containers/storage
|
||||
github.com/containers/storage/drivers
|
||||
|
|
Loading…
Reference in New Issue