mirror of https://github.com/containers/podman.git
Merge pull request #25417 from mheon/fix_25368
Fix volume quota assignment
This commit is contained in:
commit
efe8e165d8
2
go.mod
2
go.mod
|
@ -21,7 +21,7 @@ require (
|
||||||
github.com/containers/libhvee v0.10.0
|
github.com/containers/libhvee v0.10.0
|
||||||
github.com/containers/ocicrypt v1.2.1
|
github.com/containers/ocicrypt v1.2.1
|
||||||
github.com/containers/psgo v1.9.0
|
github.com/containers/psgo v1.9.0
|
||||||
github.com/containers/storage v1.57.2-0.20250214174508-b6f6fb27264f
|
github.com/containers/storage v1.57.2-0.20250228100055-700b765b2111
|
||||||
github.com/containers/winquit v1.1.0
|
github.com/containers/winquit v1.1.0
|
||||||
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
|
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
|
||||||
github.com/crc-org/crc/v2 v2.45.0
|
github.com/crc-org/crc/v2 v2.45.0
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -96,8 +96,8 @@ github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpV
|
||||||
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
|
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
|
||||||
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
|
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
|
||||||
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
|
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
|
||||||
github.com/containers/storage v1.57.2-0.20250214174508-b6f6fb27264f h1:moo+peQbWNAZBI7+NTg+4RUOq1JuOllkfCs7ximmbg0=
|
github.com/containers/storage v1.57.2-0.20250228100055-700b765b2111 h1:NMmaECeWzq2cWAXfPnsl7oFc2jyb/YRcPbzYT8jpQUA=
|
||||||
github.com/containers/storage v1.57.2-0.20250214174508-b6f6fb27264f/go.mod h1:egC90qMy0fTpGjkaHj667syy1Cbr3XPZEVX/qkUPrdM=
|
github.com/containers/storage v1.57.2-0.20250228100055-700b765b2111/go.mod h1:egC90qMy0fTpGjkaHj667syy1Cbr3XPZEVX/qkUPrdM=
|
||||||
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
|
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
|
||||||
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
|
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
|
||||||
github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
|
github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
|
||||||
|
|
|
@ -168,16 +168,11 @@ func (r *Runtime) newVolume(ctx context.Context, noCreatePluginVolume bool, opti
|
||||||
if err := idtools.SafeChown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
|
if err := idtools.SafeChown(volPathRoot, volume.config.UID, volume.config.GID); err != nil {
|
||||||
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", volPathRoot, volume.config.UID, volume.config.GID, err)
|
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", volPathRoot, volume.config.UID, volume.config.GID, err)
|
||||||
}
|
}
|
||||||
fullVolPath := filepath.Join(volPathRoot, "_data")
|
|
||||||
if err := os.MkdirAll(fullVolPath, 0755); err != nil {
|
// Setting quotas must happen *before* the _data inner directory
|
||||||
return nil, fmt.Errorf("creating volume directory %q: %w", fullVolPath, err)
|
// is created, as the volume must be empty for the quota to be
|
||||||
}
|
// properly applied - if any subdirectories exist before the
|
||||||
if err := idtools.SafeChown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
|
// quota is applied, the quota will not be applied to them.
|
||||||
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", fullVolPath, volume.config.UID, volume.config.GID, err)
|
|
||||||
}
|
|
||||||
if err := LabelVolumePath(fullVolPath, volume.config.MountLabel); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch {
|
switch {
|
||||||
case volume.config.DisableQuota:
|
case volume.config.DisableQuota:
|
||||||
if volume.config.Size > 0 || volume.config.Inodes > 0 {
|
if volume.config.Size > 0 || volume.config.Inodes > 0 {
|
||||||
|
@ -206,10 +201,20 @@ func (r *Runtime) newVolume(ctx context.Context, noCreatePluginVolume bool, opti
|
||||||
// subdirectory - so the quota ID assignment logic works
|
// subdirectory - so the quota ID assignment logic works
|
||||||
// properly.
|
// properly.
|
||||||
if err := q.SetQuota(volPathRoot, quota); err != nil {
|
if err := q.SetQuota(volPathRoot, quota); err != nil {
|
||||||
return nil, fmt.Errorf("failed to set size quota size=%d inodes=%d for volume directory %q: %w", volume.config.Size, volume.config.Inodes, fullVolPath, err)
|
return nil, fmt.Errorf("failed to set size quota size=%d inodes=%d for volume directory %q: %w", volume.config.Size, volume.config.Inodes, volPathRoot, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fullVolPath := filepath.Join(volPathRoot, "_data")
|
||||||
|
if err := os.MkdirAll(fullVolPath, 0755); err != nil {
|
||||||
|
return nil, fmt.Errorf("creating volume directory %q: %w", fullVolPath, err)
|
||||||
|
}
|
||||||
|
if err := idtools.SafeChown(fullVolPath, volume.config.UID, volume.config.GID); err != nil {
|
||||||
|
return nil, fmt.Errorf("chowning volume directory %q to %d:%d: %w", fullVolPath, volume.config.UID, volume.config.GID, err)
|
||||||
|
}
|
||||||
|
if err := LabelVolumePath(fullVolPath, volume.config.MountLabel); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
volume.config.MountPoint = fullVolPath
|
volume.config.MountPoint = fullVolPath
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -151,6 +151,7 @@ Requires: openssl
|
||||||
Requires: socat
|
Requires: socat
|
||||||
Requires: buildah
|
Requires: buildah
|
||||||
Requires: gnupg
|
Requires: gnupg
|
||||||
|
Requires: xfsprogs
|
||||||
|
|
||||||
%description tests
|
%description tests
|
||||||
%{summary}
|
%{summary}
|
||||||
|
|
|
@ -0,0 +1,78 @@
|
||||||
|
#!/usr/bin/env bats -*- bats -*-
|
||||||
|
#
|
||||||
|
# podman volume XFS quota tests
|
||||||
|
#
|
||||||
|
# bats file_tags=distro-integration
|
||||||
|
#
|
||||||
|
|
||||||
|
load helpers
|
||||||
|
|
||||||
|
function setup() {
|
||||||
|
basic_setup
|
||||||
|
|
||||||
|
run_podman '?' volume rm -a
|
||||||
|
}
|
||||||
|
|
||||||
|
function teardown() {
|
||||||
|
run_podman '?' rm -af -t 0
|
||||||
|
run_podman '?' volume rm -a
|
||||||
|
|
||||||
|
loop=$PODMAN_TMPDIR/disk.img
|
||||||
|
vol_path=$PODMAN_TMPDIR/volpath
|
||||||
|
if [ -f ${loop} ]; then
|
||||||
|
if [ -d ${vol_path} ]; then
|
||||||
|
if mountpoint ${vol_path}; then
|
||||||
|
umount "$vol_path"
|
||||||
|
fi
|
||||||
|
rm -rf "$vol_path"
|
||||||
|
fi
|
||||||
|
|
||||||
|
while read path dev; do
|
||||||
|
if [[ "$path" == "$loop" ]]; then
|
||||||
|
losetup -d $dev
|
||||||
|
fi
|
||||||
|
done < <(losetup -l --noheadings --output BACK-FILE,NAME)
|
||||||
|
rm -f $loop
|
||||||
|
fi
|
||||||
|
|
||||||
|
basic_teardown
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "podman volumes with XFS quotas" {
|
||||||
|
skip_if_rootless "Quotas are only possible with root"
|
||||||
|
skip_if_remote "Requires --root flag, not possible w/ remote"
|
||||||
|
|
||||||
|
# Minimum XFS filesystem size is 300mb
|
||||||
|
loop=$PODMAN_TMPDIR/disk.img
|
||||||
|
fallocate -l 300m ${loop}
|
||||||
|
run -0 losetup -f --show $loop
|
||||||
|
loop_dev="$output"
|
||||||
|
mkfs.xfs $loop_dev
|
||||||
|
|
||||||
|
safe_opts=$(podman_isolation_opts ${PODMAN_TMPDIR})
|
||||||
|
vol_path=$PODMAN_TMPDIR/volpath
|
||||||
|
mkdir -p $vol_path
|
||||||
|
safe_opts="$safe_opts --volumepath=$vol_path"
|
||||||
|
mount -t xfs -o defaults,pquota $loop_dev $vol_path
|
||||||
|
|
||||||
|
vol_one="testvol1"
|
||||||
|
run_podman $safe_opts volume create --opt o=size=2m $vol_one
|
||||||
|
|
||||||
|
vol_two="testvol2"
|
||||||
|
run_podman $safe_opts volume create --opt o=size=4m $vol_two
|
||||||
|
|
||||||
|
ctrname="testctr"
|
||||||
|
run_podman $safe_opts run -d --name=$ctrname -i -v $vol_one:/one -v $vol_two:/two $IMAGE top
|
||||||
|
|
||||||
|
run_podman $safe_opts exec $ctrname dd if=/dev/zero of=/one/oneMB bs=1M count=1
|
||||||
|
run_podman 1 $safe_opts exec $ctrname dd if=/dev/zero of=/one/twoMB bs=1M count=1
|
||||||
|
assert "$output" =~ "No space left on device"
|
||||||
|
run_podman $safe_opts exec $ctrname dd if=/dev/zero of=/two/threeMB bs=1M count=3
|
||||||
|
run_podman 1 $safe_opts exec $ctrname dd if=/dev/zero of=/two/oneMB bs=1M count=1
|
||||||
|
assert "$output" =~ "No space left on device"
|
||||||
|
|
||||||
|
run_podman $safe_opts rm -f -t 0 $ctrname
|
||||||
|
run_podman $safe_opts volume rm -af
|
||||||
|
}
|
||||||
|
|
||||||
|
# vim: filetype=sh
|
|
@ -92,6 +92,7 @@ Requirements
|
||||||
- socat
|
- socat
|
||||||
- buildah
|
- buildah
|
||||||
- gnupg
|
- gnupg
|
||||||
|
- xfsprogs
|
||||||
|
|
||||||
|
|
||||||
Further Details
|
Further Details
|
||||||
|
|
|
@ -190,7 +190,8 @@ func NewControl(basePath string) (*Control, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetQuota - assign a unique project id to directory and set the quota limits
|
// SetQuota - assign a unique project id to directory and set the quota limits
|
||||||
// for that project id
|
// for that project id.
|
||||||
|
// targetPath must exist, must be a directory, and must be empty.
|
||||||
func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||||
var projectID uint32
|
var projectID uint32
|
||||||
value, ok := q.quotas.Load(targetPath)
|
value, ok := q.quotas.Load(targetPath)
|
||||||
|
@ -200,10 +201,20 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||||
if !ok {
|
if !ok {
|
||||||
projectID = q.nextProjectID
|
projectID = q.nextProjectID
|
||||||
|
|
||||||
|
// The directory we are setting an ID on must be empty, as
|
||||||
|
// the ID will not be propagated to pre-existing subdirectories.
|
||||||
|
dents, err := os.ReadDir(targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("reading directory %s: %w", targetPath, err)
|
||||||
|
}
|
||||||
|
if len(dents) > 0 {
|
||||||
|
return fmt.Errorf("can only set project ID on empty directories, %s is not empty", targetPath)
|
||||||
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// assign project id to new container directory
|
// assign project id to new container directory
|
||||||
//
|
//
|
||||||
err := setProjectID(targetPath, projectID)
|
err = setProjectID(targetPath, projectID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"maps"
|
"maps"
|
||||||
|
"math/bits"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -46,11 +47,13 @@ const (
|
||||||
|
|
||||||
type layerLocations uint8
|
type layerLocations uint8
|
||||||
|
|
||||||
// The backing store is split in two json files, one (the volatile)
|
// The backing store is split in three json files.
|
||||||
// that is written without fsync() meaning it isn't as robust to
|
// The volatile store is written without fsync() meaning it isn't as robust to unclean shutdown.
|
||||||
// unclean shutdown
|
// Optionally, an image store can be configured to store RO layers.
|
||||||
|
// The stable store is used for the remaining layers that don't go into the other stores.
|
||||||
const (
|
const (
|
||||||
stableLayerLocation layerLocations = 1 << iota
|
stableLayerLocation layerLocations = 1 << iota
|
||||||
|
imageStoreLayerLocation
|
||||||
volatileLayerLocation
|
volatileLayerLocation
|
||||||
|
|
||||||
numLayerLocationIndex = iota
|
numLayerLocationIndex = iota
|
||||||
|
@ -60,6 +63,10 @@ func layerLocationFromIndex(index int) layerLocations {
|
||||||
return 1 << index
|
return 1 << index
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func indexFromLayerLocation(location layerLocations) int {
|
||||||
|
return bits.TrailingZeros(uint(location))
|
||||||
|
}
|
||||||
|
|
||||||
// A Layer is a record of a copy-on-write layer that's stored by the lower
|
// A Layer is a record of a copy-on-write layer that's stored by the lower
|
||||||
// level graph driver.
|
// level graph driver.
|
||||||
type Layer struct {
|
type Layer struct {
|
||||||
|
@ -164,8 +171,8 @@ type Layer struct {
|
||||||
// ReadOnly is true if this layer resides in a read-only layer store.
|
// ReadOnly is true if this layer resides in a read-only layer store.
|
||||||
ReadOnly bool `json:"-"`
|
ReadOnly bool `json:"-"`
|
||||||
|
|
||||||
// volatileStore is true if the container is from the volatile json file
|
// location is the location of the store where the layer is present.
|
||||||
volatileStore bool `json:"-"`
|
location layerLocations `json:"-"`
|
||||||
|
|
||||||
// BigDataNames is a list of names of data items that we keep for the
|
// BigDataNames is a list of names of data items that we keep for the
|
||||||
// convenience of the caller. They can be large, and are only in
|
// convenience of the caller. They can be large, and are only in
|
||||||
|
@ -430,14 +437,6 @@ type layerStore struct {
|
||||||
driver drivers.Driver
|
driver drivers.Driver
|
||||||
}
|
}
|
||||||
|
|
||||||
// The caller must hold r.inProcessLock for reading.
|
|
||||||
func layerLocation(l *Layer) layerLocations {
|
|
||||||
if l.volatileStore {
|
|
||||||
return volatileLayerLocation
|
|
||||||
}
|
|
||||||
return stableLayerLocation
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyLayer(l *Layer) *Layer {
|
func copyLayer(l *Layer) *Layer {
|
||||||
return &Layer{
|
return &Layer{
|
||||||
ID: l.ID,
|
ID: l.ID,
|
||||||
|
@ -455,7 +454,7 @@ func copyLayer(l *Layer) *Layer {
|
||||||
TOCDigest: l.TOCDigest,
|
TOCDigest: l.TOCDigest,
|
||||||
CompressionType: l.CompressionType,
|
CompressionType: l.CompressionType,
|
||||||
ReadOnly: l.ReadOnly,
|
ReadOnly: l.ReadOnly,
|
||||||
volatileStore: l.volatileStore,
|
location: l.location,
|
||||||
BigDataNames: copySlicePreferringNil(l.BigDataNames),
|
BigDataNames: copySlicePreferringNil(l.BigDataNames),
|
||||||
Flags: copyMapPreferringNil(l.Flags),
|
Flags: copyMapPreferringNil(l.Flags),
|
||||||
UIDMap: copySlicePreferringNil(l.UIDMap),
|
UIDMap: copySlicePreferringNil(l.UIDMap),
|
||||||
|
@ -658,7 +657,11 @@ func (r *layerStore) layersModified() (lockfile.LastWrite, bool, error) {
|
||||||
// modified manually, then we have to reload the storage in
|
// modified manually, then we have to reload the storage in
|
||||||
// any case.
|
// any case.
|
||||||
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
|
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
|
||||||
info, err := os.Stat(r.jsonPath[locationIndex])
|
rpath := r.jsonPath[locationIndex]
|
||||||
|
if rpath == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
info, err := os.Stat(rpath)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return lockfile.LastWrite{}, false, fmt.Errorf("stat layers file: %w", err)
|
return lockfile.LastWrite{}, false, fmt.Errorf("stat layers file: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -794,6 +797,9 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||||
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
|
for locationIndex := 0; locationIndex < numLayerLocationIndex; locationIndex++ {
|
||||||
location := layerLocationFromIndex(locationIndex)
|
location := layerLocationFromIndex(locationIndex)
|
||||||
rpath := r.jsonPath[locationIndex]
|
rpath := r.jsonPath[locationIndex]
|
||||||
|
if rpath == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
info, err := os.Stat(rpath)
|
info, err := os.Stat(rpath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
|
@ -820,9 +826,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||||
continue // skip invalid duplicated layer
|
continue // skip invalid duplicated layer
|
||||||
}
|
}
|
||||||
// Remember where the layer came from
|
// Remember where the layer came from
|
||||||
if location == volatileLayerLocation {
|
layer.location = location
|
||||||
layer.volatileStore = true
|
|
||||||
}
|
|
||||||
layers = append(layers, layer)
|
layers = append(layers, layer)
|
||||||
ids[layer.ID] = layer
|
ids[layer.ID] = layer
|
||||||
}
|
}
|
||||||
|
@ -843,7 +847,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||||
if conflict, ok := names[name]; ok {
|
if conflict, ok := names[name]; ok {
|
||||||
r.removeName(conflict, name)
|
r.removeName(conflict, name)
|
||||||
errorToResolveBySaving = ErrDuplicateLayerNames
|
errorToResolveBySaving = ErrDuplicateLayerNames
|
||||||
modifiedLocations |= layerLocation(conflict)
|
modifiedLocations |= conflict.location
|
||||||
}
|
}
|
||||||
names[name] = layers[n]
|
names[name] = layers[n]
|
||||||
}
|
}
|
||||||
|
@ -939,7 +943,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||||
incompleteDeletionErrors = errors.Join(incompleteDeletionErrors,
|
incompleteDeletionErrors = errors.Join(incompleteDeletionErrors,
|
||||||
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
fmt.Errorf("deleting layer %#v: %w", layer.ID, err))
|
||||||
}
|
}
|
||||||
modifiedLocations |= layerLocation(layer)
|
modifiedLocations |= layer.location
|
||||||
}
|
}
|
||||||
if err := r.saveLayers(modifiedLocations); err != nil {
|
if err := r.saveLayers(modifiedLocations); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -1008,7 +1012,7 @@ func (r *layerStore) save(saveLocations layerLocations) error {
|
||||||
// The caller must hold r.lockfile locked for writing.
|
// The caller must hold r.lockfile locked for writing.
|
||||||
// The caller must hold r.inProcessLock for WRITING.
|
// The caller must hold r.inProcessLock for WRITING.
|
||||||
func (r *layerStore) saveFor(modifiedLayer *Layer) error {
|
func (r *layerStore) saveFor(modifiedLayer *Layer) error {
|
||||||
return r.save(layerLocation(modifiedLayer))
|
return r.save(modifiedLayer.location)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The caller must hold r.lockfile locked for writing.
|
// The caller must hold r.lockfile locked for writing.
|
||||||
|
@ -1033,12 +1037,15 @@ func (r *layerStore) saveLayers(saveLocations layerLocations) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
rpath := r.jsonPath[locationIndex]
|
rpath := r.jsonPath[locationIndex]
|
||||||
|
if rpath == "" {
|
||||||
|
return fmt.Errorf("internal error: no path for location %v", location)
|
||||||
|
}
|
||||||
if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
|
if err := os.MkdirAll(filepath.Dir(rpath), 0o700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
subsetLayers := make([]*Layer, 0, len(r.layers))
|
subsetLayers := make([]*Layer, 0, len(r.layers))
|
||||||
for _, layer := range r.layers {
|
for _, layer := range r.layers {
|
||||||
if layerLocation(layer) == location {
|
if layer.location == location {
|
||||||
subsetLayers = append(subsetLayers, layer)
|
subsetLayers = append(subsetLayers, layer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1138,12 +1145,17 @@ func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.
|
||||||
if transient {
|
if transient {
|
||||||
volatileDir = rundir
|
volatileDir = rundir
|
||||||
}
|
}
|
||||||
|
layersImageDir := ""
|
||||||
|
if imagedir != "" {
|
||||||
|
layersImageDir = filepath.Join(imagedir, "layers.json")
|
||||||
|
}
|
||||||
rlstore := layerStore{
|
rlstore := layerStore{
|
||||||
lockfile: newMultipleLockFile(lockFiles...),
|
lockfile: newMultipleLockFile(lockFiles...),
|
||||||
mountsLockfile: mountsLockfile,
|
mountsLockfile: mountsLockfile,
|
||||||
rundir: rundir,
|
rundir: rundir,
|
||||||
jsonPath: [numLayerLocationIndex]string{
|
jsonPath: [numLayerLocationIndex]string{
|
||||||
filepath.Join(layerdir, "layers.json"),
|
filepath.Join(layerdir, "layers.json"),
|
||||||
|
layersImageDir,
|
||||||
filepath.Join(volatileDir, "volatile-layers.json"),
|
filepath.Join(volatileDir, "volatile-layers.json"),
|
||||||
},
|
},
|
||||||
layerdir: layerdir,
|
layerdir: layerdir,
|
||||||
|
@ -1181,6 +1193,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
|
||||||
rundir: rundir,
|
rundir: rundir,
|
||||||
jsonPath: [numLayerLocationIndex]string{
|
jsonPath: [numLayerLocationIndex]string{
|
||||||
filepath.Join(layerdir, "layers.json"),
|
filepath.Join(layerdir, "layers.json"),
|
||||||
|
"",
|
||||||
filepath.Join(layerdir, "volatile-layers.json"),
|
filepath.Join(layerdir, "volatile-layers.json"),
|
||||||
},
|
},
|
||||||
layerdir: layerdir,
|
layerdir: layerdir,
|
||||||
|
@ -1329,6 +1342,17 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
|
||||||
return copyLayer(layer), nil
|
return copyLayer(layer), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *layerStore) pickStoreLocation(volatile, writeable bool) layerLocations {
|
||||||
|
switch {
|
||||||
|
case volatile:
|
||||||
|
return volatileLayerLocation
|
||||||
|
case !writeable && r.jsonPath[indexFromLayerLocation(imageStoreLayerLocation)] != "":
|
||||||
|
return imageStoreLayerLocation
|
||||||
|
default:
|
||||||
|
return stableLayerLocation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Requires startWriting.
|
// Requires startWriting.
|
||||||
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) {
|
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) {
|
||||||
if moreOptions == nil {
|
if moreOptions == nil {
|
||||||
|
@ -1421,7 +1445,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
||||||
UIDMap: copySlicePreferringNil(moreOptions.UIDMap),
|
UIDMap: copySlicePreferringNil(moreOptions.UIDMap),
|
||||||
GIDMap: copySlicePreferringNil(moreOptions.GIDMap),
|
GIDMap: copySlicePreferringNil(moreOptions.GIDMap),
|
||||||
BigDataNames: []string{},
|
BigDataNames: []string{},
|
||||||
volatileStore: moreOptions.Volatile,
|
location: r.pickStoreLocation(moreOptions.Volatile, writeable),
|
||||||
}
|
}
|
||||||
layer.Flags[incompleteFlag] = true
|
layer.Flags[incompleteFlag] = true
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/containers/storage/pkg/fileutils"
|
"github.com/containers/storage/pkg/fileutils"
|
||||||
"github.com/containers/storage/pkg/idtools"
|
"github.com/containers/storage/pkg/idtools"
|
||||||
|
@ -67,6 +68,8 @@ type (
|
||||||
CopyPass bool
|
CopyPass bool
|
||||||
// ForceMask, if set, indicates the permission mask used for created files.
|
// ForceMask, if set, indicates the permission mask used for created files.
|
||||||
ForceMask *os.FileMode
|
ForceMask *os.FileMode
|
||||||
|
// Timestamp, if set, will be set in each header as create/mod/access time
|
||||||
|
Timestamp *time.Time
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -494,15 +497,19 @@ type tarWriter struct {
|
||||||
// from the traditional behavior/format to get features like subsecond
|
// from the traditional behavior/format to get features like subsecond
|
||||||
// precision in timestamps.
|
// precision in timestamps.
|
||||||
CopyPass bool
|
CopyPass bool
|
||||||
|
|
||||||
|
// Timestamp, if set, will be set in each header as create/mod/access time
|
||||||
|
Timestamp *time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTarWriter(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarWriter {
|
func newTarWriter(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair, timestamp *time.Time) *tarWriter {
|
||||||
return &tarWriter{
|
return &tarWriter{
|
||||||
SeenFiles: make(map[uint64]string),
|
SeenFiles: make(map[uint64]string),
|
||||||
TarWriter: tar.NewWriter(writer),
|
TarWriter: tar.NewWriter(writer),
|
||||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||||
IDMappings: idMapping,
|
IDMappings: idMapping,
|
||||||
ChownOpts: chownOpts,
|
ChownOpts: chownOpts,
|
||||||
|
Timestamp: timestamp,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -600,6 +607,13 @@ func (ta *tarWriter) addFile(path, name string) error {
|
||||||
hdr.Gname = ""
|
hdr.Gname = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if override timestamp set, replace all times with this
|
||||||
|
if ta.Timestamp != nil {
|
||||||
|
hdr.ModTime = *ta.Timestamp
|
||||||
|
hdr.AccessTime = *ta.Timestamp
|
||||||
|
hdr.ChangeTime = *ta.Timestamp
|
||||||
|
}
|
||||||
|
|
||||||
maybeTruncateHeaderModTime(hdr)
|
maybeTruncateHeaderModTime(hdr)
|
||||||
|
|
||||||
if ta.WhiteoutConverter != nil {
|
if ta.WhiteoutConverter != nil {
|
||||||
|
@ -866,6 +880,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||||
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
|
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
|
||||||
compressWriter,
|
compressWriter,
|
||||||
options.ChownOpts,
|
options.ChownOpts,
|
||||||
|
options.Timestamp,
|
||||||
)
|
)
|
||||||
ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
ta.WhiteoutConverter = GetWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||||
ta.CopyPass = options.CopyPass
|
ta.CopyPass = options.CopyPass
|
||||||
|
|
|
@ -452,7 +452,7 @@ func ChangesSize(newDir string, changes []Change) int64 {
|
||||||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
ta := newTarWriter(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
|
ta := newTarWriter(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil, nil)
|
||||||
|
|
||||||
// this buffer is needed for the duration of this piped stream
|
// this buffer is needed for the duration of this piped stream
|
||||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||||
|
|
|
@ -364,7 +364,7 @@ github.com/containers/psgo/internal/dev
|
||||||
github.com/containers/psgo/internal/host
|
github.com/containers/psgo/internal/host
|
||||||
github.com/containers/psgo/internal/proc
|
github.com/containers/psgo/internal/proc
|
||||||
github.com/containers/psgo/internal/process
|
github.com/containers/psgo/internal/process
|
||||||
# github.com/containers/storage v1.57.2-0.20250214174508-b6f6fb27264f
|
# github.com/containers/storage v1.57.2-0.20250228100055-700b765b2111
|
||||||
## explicit; go 1.22.0
|
## explicit; go 1.22.0
|
||||||
github.com/containers/storage
|
github.com/containers/storage
|
||||||
github.com/containers/storage/drivers
|
github.com/containers/storage/drivers
|
||||||
|
|
Loading…
Reference in New Issue