mirror of https://github.com/containers/podman.git
vendor: update containers/storage
Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
parent
66139fc266
commit
c81c77109b
5
go.mod
5
go.mod
|
@ -20,7 +20,7 @@ require (
|
|||
github.com/containers/libhvee v0.7.1
|
||||
github.com/containers/ocicrypt v1.2.0
|
||||
github.com/containers/psgo v1.9.0
|
||||
github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483
|
||||
github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0
|
||||
github.com/containers/winquit v1.1.0
|
||||
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
|
||||
github.com/coreos/stream-metadata-go v0.4.4
|
||||
|
@ -48,6 +48,7 @@ require (
|
|||
github.com/mattn/go-shellwords v1.0.12
|
||||
github.com/mattn/go-sqlite3 v1.14.23
|
||||
github.com/mdlayher/vsock v1.2.1
|
||||
github.com/moby/sys/capability v0.2.0
|
||||
github.com/moby/sys/user v0.3.0
|
||||
github.com/moby/term v0.5.0
|
||||
github.com/nxadm/tail v1.4.11
|
||||
|
@ -153,7 +154,7 @@ require (
|
|||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jinzhu/copier v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/compress v1.17.10 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
|
|
10
go.sum
10
go.sum
|
@ -99,8 +99,8 @@ github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sir
|
|||
github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
|
||||
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
|
||||
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
|
||||
github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483 h1:hQOAlIad+xjukeGFHQbH/x5I2zuPNCXmjvSrxX5ERF4=
|
||||
github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483/go.mod h1:fRTU33KP5BXpOIWDxDgU5LpHbrOzWxmVmtm/3PYLlgE=
|
||||
github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0 h1:0NNBYNpPFzQUKXVq+oQG6NFQcBwtbs2luxl/bVulbPs=
|
||||
github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0/go.mod h1:Gx8WE9kURdCyEuB9cq8Kq5sRDRbpZi34lnOQ3zAGK2s=
|
||||
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
|
||||
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
|
||||
github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
|
||||
|
@ -310,8 +310,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
|||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
|
||||
github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
|
@ -365,6 +365,8 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N
|
|||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/sys/capability v0.2.0 h1:OJtbqfthavtfh1kycvEhMvY7/M2BHscP2fiXgzKI3sk=
|
||||
github.com/moby/sys/capability v0.2.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
|
|
|
@ -20,9 +20,9 @@ import (
|
|||
"github.com/containers/storage/pkg/idtools"
|
||||
pmount "github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/moby/sys/capability"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ vendor_task:
|
|||
cross_task:
|
||||
alias: cross
|
||||
container:
|
||||
image: golang:1.21
|
||||
image: golang:1.22
|
||||
build_script: make cross
|
||||
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /de
|
|||
# N/B: This value is managed by Renovate, manual changes are
|
||||
# possible, as long as they don't disturb the formatting
|
||||
# (i.e. DO NOT ADD A 'v' prefix!)
|
||||
GOLANGCI_LINT_VERSION := 1.60.3
|
||||
GOLANGCI_LINT_VERSION := 1.61.0
|
||||
|
||||
default all: local-binary docs local-validate local-cross ## validate all checks, build and cross-build\nbinaries and docs
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -769,12 +770,9 @@ func (s *store) Repair(report CheckReport, options *RepairOptions) []error {
|
|||
return d
|
||||
}
|
||||
isUnaccounted := func(errs []error) bool {
|
||||
for _, err := range errs {
|
||||
if errors.Is(err, ErrLayerUnaccounted) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.ContainsFunc(errs, func(err error) bool {
|
||||
return errors.Is(err, ErrLayerUnaccounted)
|
||||
})
|
||||
}
|
||||
sort.Slice(layersToDelete, func(i, j int) bool {
|
||||
// we've not heard of either of them, so remove them in the order the driver suggested
|
||||
|
@ -1005,12 +1003,12 @@ func (c *checkDirectory) remove(path string) {
|
|||
func (c *checkDirectory) header(hdr *tar.Header) {
|
||||
name := path.Clean(hdr.Name)
|
||||
dir, base := path.Split(name)
|
||||
if strings.HasPrefix(base, archive.WhiteoutPrefix) {
|
||||
if file, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok {
|
||||
if base == archive.WhiteoutOpaqueDir {
|
||||
c.remove(path.Clean(dir))
|
||||
c.add(path.Clean(dir), tar.TypeDir, hdr.Uid, hdr.Gid, hdr.Size, os.FileMode(hdr.Mode), hdr.ModTime.Unix())
|
||||
} else {
|
||||
c.remove(path.Join(dir, base[len(archive.WhiteoutPrefix):]))
|
||||
c.remove(path.Join(dir, file))
|
||||
}
|
||||
} else {
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
|
@ -1044,7 +1042,7 @@ func (c *checkDirectory) header(hdr *tar.Header) {
|
|||
|
||||
// headers updates a checkDirectory using information from the passed-in header slice
|
||||
func (c *checkDirectory) headers(hdrs []*tar.Header) {
|
||||
hdrs = append([]*tar.Header{}, hdrs...)
|
||||
hdrs = slices.Clone(hdrs)
|
||||
// sort the headers from the diff to ensure that whiteouts appear
|
||||
// before content when they both appear in the same directory, per
|
||||
// https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts
|
||||
|
|
|
@ -3,8 +3,10 @@ package storage
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -162,17 +164,17 @@ type containerStore struct {
|
|||
func copyContainer(c *Container) *Container {
|
||||
return &Container{
|
||||
ID: c.ID,
|
||||
Names: copyStringSlice(c.Names),
|
||||
Names: slices.Clone(c.Names),
|
||||
ImageID: c.ImageID,
|
||||
LayerID: c.LayerID,
|
||||
Metadata: c.Metadata,
|
||||
BigDataNames: copyStringSlice(c.BigDataNames),
|
||||
BigDataSizes: copyStringInt64Map(c.BigDataSizes),
|
||||
BigDataDigests: copyStringDigestMap(c.BigDataDigests),
|
||||
BigDataNames: slices.Clone(c.BigDataNames),
|
||||
BigDataSizes: maps.Clone(c.BigDataSizes),
|
||||
BigDataDigests: maps.Clone(c.BigDataDigests),
|
||||
Created: c.Created,
|
||||
UIDMap: copyIDMap(c.UIDMap),
|
||||
GIDMap: copyIDMap(c.GIDMap),
|
||||
Flags: copyStringInterfaceMap(c.Flags),
|
||||
Flags: maps.Clone(c.Flags),
|
||||
volatileStore: c.volatileStore,
|
||||
}
|
||||
}
|
||||
|
@ -696,7 +698,7 @@ func (r *containerStore) create(id string, names []string, image, layer string,
|
|||
volatileStore: options.Volatile,
|
||||
}
|
||||
if options.MountOpts != nil {
|
||||
container.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
|
||||
container.Flags[mountOptsFlag] = slices.Clone(options.MountOpts)
|
||||
}
|
||||
if options.Volatile {
|
||||
container.Flags[volatileFlag] = true
|
||||
|
@ -788,13 +790,6 @@ func (r *containerStore) Delete(id string) error {
|
|||
return ErrContainerUnknown
|
||||
}
|
||||
id = container.ID
|
||||
toDeleteIndex := -1
|
||||
for i, candidate := range r.containers {
|
||||
if candidate.ID == id {
|
||||
toDeleteIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
delete(r.byid, id)
|
||||
// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||
// The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
|
||||
|
@ -803,14 +798,9 @@ func (r *containerStore) Delete(id string) error {
|
|||
for _, name := range container.Names {
|
||||
delete(r.byname, name)
|
||||
}
|
||||
if toDeleteIndex != -1 {
|
||||
// delete the container at toDeleteIndex
|
||||
if toDeleteIndex == len(r.containers)-1 {
|
||||
r.containers = r.containers[:len(r.containers)-1]
|
||||
} else {
|
||||
r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...)
|
||||
}
|
||||
}
|
||||
r.containers = slices.DeleteFunc(r.containers, func(candidate *Container) bool {
|
||||
return candidate.ID == id
|
||||
})
|
||||
if err := r.saveFor(container); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -948,14 +938,7 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error {
|
|||
if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
|
||||
save = true
|
||||
}
|
||||
addName := true
|
||||
for _, name := range c.BigDataNames {
|
||||
if name == key {
|
||||
addName = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if addName {
|
||||
if !slices.Contains(c.BigDataNames, key) {
|
||||
c.BigDataNames = append(c.BigDataNames, key)
|
||||
save = true
|
||||
}
|
||||
|
|
|
@ -254,8 +254,8 @@ type Differ interface {
|
|||
type DriverWithDiffer interface {
|
||||
Driver
|
||||
// ApplyDiffWithDiffer applies the changes using the callback function.
|
||||
// If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
|
||||
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
|
||||
// The staging directory created by this function is guaranteed to be usable with ApplyDiffFromStagingDirectory.
|
||||
ApplyDiffWithDiffer(options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
|
||||
// ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory.
|
||||
ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
|
|
|
@ -103,20 +103,20 @@ func mountOverlayFromMain() {
|
|||
// paths, but we don't want to mess with other options.
|
||||
var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string
|
||||
for _, arg := range strings.Split(options.Label, ",") {
|
||||
kv := strings.SplitN(arg, "=", 2)
|
||||
switch kv[0] {
|
||||
key, val, _ := strings.Cut(arg, "=")
|
||||
switch key {
|
||||
case "upperdir":
|
||||
upperk = "upperdir="
|
||||
upperv = kv[1]
|
||||
upperv = val
|
||||
case "workdir":
|
||||
workk = "workdir="
|
||||
workv = kv[1]
|
||||
workv = val
|
||||
case "lowerdir":
|
||||
lowerk = "lowerdir="
|
||||
lowerv = kv[1]
|
||||
lowerv = val
|
||||
case "label":
|
||||
labelk = "label="
|
||||
labelv = kv[1]
|
||||
labelv = val
|
||||
default:
|
||||
if others == "" {
|
||||
others = arg
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -158,30 +159,7 @@ func init() {
|
|||
}
|
||||
|
||||
func hasMetacopyOption(opts []string) bool {
|
||||
for _, s := range opts {
|
||||
if s == "metacopy=on" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func stripOption(opts []string, option string) []string {
|
||||
for i, s := range opts {
|
||||
if s == option {
|
||||
return stripOption(append(opts[:i], opts[i+1:]...), option)
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func hasVolatileOption(opts []string) bool {
|
||||
for _, s := range opts {
|
||||
if s == "volatile" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(opts, "metacopy=on")
|
||||
}
|
||||
|
||||
func getMountProgramFlagFile(path string) string {
|
||||
|
@ -1526,14 +1504,13 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it")
|
||||
}
|
||||
}
|
||||
optsList = stripOption(optsList, "metacopy=on")
|
||||
optsList = slices.DeleteFunc(optsList, func(opt string) bool {
|
||||
return opt == "metacopy=on"
|
||||
})
|
||||
}
|
||||
|
||||
for _, o := range optsList {
|
||||
if o == "ro" {
|
||||
readWrite = false
|
||||
break
|
||||
}
|
||||
if slices.Contains(optsList, "ro") {
|
||||
readWrite = false
|
||||
}
|
||||
|
||||
lowers, err := os.ReadFile(path.Join(dir, lowerFile))
|
||||
|
@ -1732,7 +1709,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
|||
optsList = append(optsList, "userxattr")
|
||||
}
|
||||
|
||||
if options.Volatile && !hasVolatileOption(optsList) {
|
||||
if options.Volatile && !slices.Contains(optsList, "volatile") {
|
||||
supported, err := d.getSupportsVolatile()
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -1896,7 +1873,9 @@ func (d *Driver) getMergedDir(id, dir string, inAdditionalStore bool) string {
|
|||
// and since the rundir cannot be shared for different stores, it is safe to assume the
|
||||
// current process has exclusive access to it.
|
||||
//
|
||||
// LOCKING BUG? the .DiffSize operation does not currently hold an exclusive lock on the primary store.
|
||||
// TO DO: LOCKING BUG: the .DiffSize operation does not currently hold an exclusive lock on the primary store.
|
||||
// (_Some_ of the callers might be better ported to use a metadata-only size computation instead of DiffSize,
|
||||
// but DiffSize probably needs to remain for computing sizes of container’s RW layers.)
|
||||
if inAdditionalStore {
|
||||
return path.Join(d.runhome, id, "merged")
|
||||
}
|
||||
|
@ -2187,7 +2166,7 @@ func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
|
|||
}
|
||||
|
||||
// ApplyDiffWithDiffer applies the changes in the new layer using the specified function
|
||||
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
|
||||
func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) {
|
||||
var idMappings *idtools.IDMappings
|
||||
var forceMask *os.FileMode
|
||||
|
||||
|
@ -2205,44 +2184,36 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
|||
|
||||
var applyDir string
|
||||
|
||||
if id == "" {
|
||||
stagingDir := d.getStagingDir(id)
|
||||
err := os.MkdirAll(stagingDir, 0o700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
layerDir, err := os.MkdirTemp(stagingDir, "")
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
perms := defaultPerms
|
||||
if forceMask != nil {
|
||||
perms = *forceMask
|
||||
}
|
||||
applyDir = filepath.Join(layerDir, "dir")
|
||||
if err := os.Mkdir(applyDir, perms); err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
|
||||
lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
defer func() {
|
||||
if errRet != nil {
|
||||
delete(d.stagingDirsLocks, layerDir)
|
||||
lock.Unlock()
|
||||
}
|
||||
}()
|
||||
d.stagingDirsLocks[layerDir] = lock
|
||||
lock.Lock()
|
||||
} else {
|
||||
var err error
|
||||
applyDir, err = d.getDiffPath(id)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
stagingDir := d.getStagingDir("")
|
||||
err := os.MkdirAll(stagingDir, 0o700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
layerDir, err := os.MkdirTemp(stagingDir, "")
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
perms := defaultPerms
|
||||
if forceMask != nil {
|
||||
perms = *forceMask
|
||||
}
|
||||
applyDir = filepath.Join(layerDir, "dir")
|
||||
if err := os.Mkdir(applyDir, perms); err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
|
||||
lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile))
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
defer func() {
|
||||
if errRet != nil {
|
||||
delete(d.stagingDirsLocks, layerDir)
|
||||
lock.Unlock()
|
||||
}
|
||||
}()
|
||||
d.stagingDirsLocks[layerDir] = lock
|
||||
lock.Lock()
|
||||
|
||||
logrus.Debugf("Applying differ in %s", applyDir)
|
||||
|
||||
|
|
|
@ -764,8 +764,8 @@ func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64,
|
|||
buf := bufio.NewWriter(nil)
|
||||
for err == nil {
|
||||
base := path.Base(hdr.Name)
|
||||
if strings.HasPrefix(base, archive.WhiteoutPrefix) {
|
||||
name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):])
|
||||
if rm, ok := strings.CutPrefix(base, archive.WhiteoutPrefix); ok {
|
||||
name := path.Join(path.Dir(hdr.Name), rm)
|
||||
err = w.Remove(filepath.FromSlash(name))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
|
|
@ -2,8 +2,10 @@ package storage
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -181,18 +183,18 @@ func copyImage(i *Image) *Image {
|
|||
return &Image{
|
||||
ID: i.ID,
|
||||
Digest: i.Digest,
|
||||
Digests: copyDigestSlice(i.Digests),
|
||||
Names: copyStringSlice(i.Names),
|
||||
NamesHistory: copyStringSlice(i.NamesHistory),
|
||||
Digests: slices.Clone(i.Digests),
|
||||
Names: slices.Clone(i.Names),
|
||||
NamesHistory: slices.Clone(i.NamesHistory),
|
||||
TopLayer: i.TopLayer,
|
||||
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
|
||||
MappedTopLayers: slices.Clone(i.MappedTopLayers),
|
||||
Metadata: i.Metadata,
|
||||
BigDataNames: copyStringSlice(i.BigDataNames),
|
||||
BigDataSizes: copyStringInt64Map(i.BigDataSizes),
|
||||
BigDataDigests: copyStringDigestMap(i.BigDataDigests),
|
||||
BigDataNames: slices.Clone(i.BigDataNames),
|
||||
BigDataSizes: maps.Clone(i.BigDataSizes),
|
||||
BigDataDigests: maps.Clone(i.BigDataDigests),
|
||||
Created: i.Created,
|
||||
ReadOnly: i.ReadOnly,
|
||||
Flags: copyStringInterfaceMap(i.Flags),
|
||||
Flags: maps.Clone(i.Flags),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -863,12 +865,6 @@ func (r *imageStore) Delete(id string) error {
|
|||
return fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||||
}
|
||||
id = image.ID
|
||||
toDeleteIndex := -1
|
||||
for i, candidate := range r.images {
|
||||
if candidate.ID == id {
|
||||
toDeleteIndex = i
|
||||
}
|
||||
}
|
||||
delete(r.byid, id)
|
||||
// This can only fail if the ID is already missing, which shouldn’t happen — and in that case the index is already in the desired state anyway.
|
||||
// The store’s Delete method is used on various paths to recover from failures, so this should be robust against partially missing data.
|
||||
|
@ -877,21 +873,18 @@ func (r *imageStore) Delete(id string) error {
|
|||
delete(r.byname, name)
|
||||
}
|
||||
for _, digest := range image.Digests {
|
||||
prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
|
||||
prunedList := slices.DeleteFunc(r.bydigest[digest], func(i *Image) bool {
|
||||
return i == image
|
||||
})
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, digest)
|
||||
} else {
|
||||
r.bydigest[digest] = prunedList
|
||||
}
|
||||
}
|
||||
if toDeleteIndex != -1 {
|
||||
// delete the image at toDeleteIndex
|
||||
if toDeleteIndex == len(r.images)-1 {
|
||||
r.images = r.images[:len(r.images)-1]
|
||||
} else {
|
||||
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
|
||||
}
|
||||
}
|
||||
r.images = slices.DeleteFunc(r.images, func(candidate *Image) bool {
|
||||
return candidate.ID == id
|
||||
})
|
||||
if err := r.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -977,17 +970,6 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
|
|||
return copyStringSlice(image.BigDataNames), nil
|
||||
}
|
||||
|
||||
func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
|
||||
modified := make([]*Image, 0, len(slice))
|
||||
for _, v := range slice {
|
||||
if v == value {
|
||||
continue
|
||||
}
|
||||
modified = append(modified, v)
|
||||
}
|
||||
return modified
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
|
||||
if !r.lockfile.IsReadWrite() {
|
||||
|
@ -1037,21 +1019,16 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
|
|||
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
|
||||
save = true
|
||||
}
|
||||
addName := true
|
||||
for _, name := range image.BigDataNames {
|
||||
if name == key {
|
||||
addName = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if addName {
|
||||
if !slices.Contains(image.BigDataNames, key) {
|
||||
image.BigDataNames = append(image.BigDataNames, key)
|
||||
save = true
|
||||
}
|
||||
for _, oldDigest := range image.Digests {
|
||||
// remove the image from the list of images in the digest-based index
|
||||
if list, ok := r.bydigest[oldDigest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
prunedList := slices.DeleteFunc(list, func(i *Image) bool {
|
||||
return i == image
|
||||
})
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, oldDigest)
|
||||
} else {
|
||||
|
@ -1066,9 +1043,7 @@ func (r *imageStore) setBigData(image *Image, key string, data []byte, newDigest
|
|||
// add the image to the list of images in the digest-based index which
|
||||
// corresponds to the new digest for this item, unless it's already there
|
||||
list := r.bydigest[newDigest]
|
||||
if len(list) == len(imageSliceWithoutValue(list, image)) {
|
||||
// the list isn't shortened by trying to prune this image from it,
|
||||
// so it's not in there yet
|
||||
if !slices.Contains(list, image) {
|
||||
r.bydigest[newDigest] = append(list, image)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,10 +5,12 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -312,9 +314,8 @@ type rwLayerStore interface {
|
|||
// applies its changes to a specified layer.
|
||||
ApplyDiff(to string, diff io.Reader) (int64, error)
|
||||
|
||||
// ApplyDiffWithDiffer applies the changes through the differ callback function.
|
||||
// If to is the empty string, then a staging directory is created by the driver.
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
// applyDiffWithDifferNoLock applies the changes through the differ callback function.
|
||||
applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
|
@ -435,7 +436,7 @@ func layerLocation(l *Layer) layerLocations {
|
|||
func copyLayer(l *Layer) *Layer {
|
||||
return &Layer{
|
||||
ID: l.ID,
|
||||
Names: copyStringSlice(l.Names),
|
||||
Names: slices.Clone(l.Names),
|
||||
Parent: l.Parent,
|
||||
Metadata: l.Metadata,
|
||||
MountLabel: l.MountLabel,
|
||||
|
@ -450,8 +451,8 @@ func copyLayer(l *Layer) *Layer {
|
|||
CompressionType: l.CompressionType,
|
||||
ReadOnly: l.ReadOnly,
|
||||
volatileStore: l.volatileStore,
|
||||
BigDataNames: copyStringSlice(l.BigDataNames),
|
||||
Flags: copyStringInterfaceMap(l.Flags),
|
||||
BigDataNames: slices.Clone(l.BigDataNames),
|
||||
Flags: maps.Clone(l.Flags),
|
||||
UIDMap: copyIDMap(l.UIDMap),
|
||||
GIDMap: copyIDMap(l.GIDMap),
|
||||
UIDs: copyUint32Slice(l.UIDs),
|
||||
|
@ -1372,7 +1373,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
|||
templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize
|
||||
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
|
||||
templateCompressionType = templateLayer.CompressionType
|
||||
templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
|
||||
templateUIDs, templateGIDs = slices.Clone(templateLayer.UIDs), slices.Clone(templateLayer.GIDs)
|
||||
templateTSdata, err = os.ReadFile(r.tspath(templateLayer.ID))
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, -1, err
|
||||
|
@ -1564,19 +1565,9 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
|
|||
// - r.layers[].MountPoint (directly and via loadMounts / saveMounts)
|
||||
// - r.bymount (via loadMounts / saveMounts)
|
||||
|
||||
// check whether options include ro option
|
||||
hasReadOnlyOpt := func(opts []string) bool {
|
||||
for _, item := range opts {
|
||||
if item == "ro" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// You are not allowed to mount layers from readonly stores if they
|
||||
// are not mounted read/only.
|
||||
if !r.lockfile.IsReadWrite() && !hasReadOnlyOpt(options.Options) {
|
||||
if !r.lockfile.IsReadWrite() && !slices.Contains(options.Options, "ro") {
|
||||
return "", fmt.Errorf("not allowed to update mount locations for layers at %q: %w", r.mountspath(), ErrStoreIsReadOnly)
|
||||
}
|
||||
r.mountsLockfile.Lock()
|
||||
|
@ -1836,14 +1827,7 @@ func (r *layerStore) setBigData(layer *Layer, key string, data io.Reader) error
|
|||
return fmt.Errorf("closing bigdata file for the layer: %w", err)
|
||||
}
|
||||
|
||||
addName := true
|
||||
for _, name := range layer.BigDataNames {
|
||||
if name == key {
|
||||
addName = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if addName {
|
||||
if !slices.Contains(layer.BigDataNames, key) {
|
||||
layer.BigDataNames = append(layer.BigDataNames, key)
|
||||
return r.saveFor(layer)
|
||||
}
|
||||
|
@ -1938,32 +1922,13 @@ func (r *layerStore) deleteInternal(id string) error {
|
|||
delete(r.bymount, layer.MountPoint)
|
||||
}
|
||||
r.deleteInDigestMap(id)
|
||||
toDeleteIndex := -1
|
||||
for i, candidate := range r.layers {
|
||||
if candidate.ID == id {
|
||||
toDeleteIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if toDeleteIndex != -1 {
|
||||
// delete the layer at toDeleteIndex
|
||||
if toDeleteIndex == len(r.layers)-1 {
|
||||
r.layers = r.layers[:len(r.layers)-1]
|
||||
} else {
|
||||
r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
|
||||
}
|
||||
}
|
||||
if mountLabel != "" {
|
||||
var found bool
|
||||
for _, candidate := range r.layers {
|
||||
if candidate.MountLabel == mountLabel {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
selinux.ReleaseLabel(mountLabel)
|
||||
}
|
||||
r.layers = slices.DeleteFunc(r.layers, func(candidate *Layer) bool {
|
||||
return candidate.ID == id
|
||||
})
|
||||
if mountLabel != "" && !slices.ContainsFunc(r.layers, func(candidate *Layer) bool {
|
||||
return candidate.MountLabel == mountLabel
|
||||
}) {
|
||||
selinux.ReleaseLabel(mountLabel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1971,21 +1936,15 @@ func (r *layerStore) deleteInternal(id string) error {
|
|||
// Requires startWriting.
|
||||
func (r *layerStore) deleteInDigestMap(id string) {
|
||||
for digest, layers := range r.bycompressedsum {
|
||||
for i, layerID := range layers {
|
||||
if layerID == id {
|
||||
layers = append(layers[:i], layers[i+1:]...)
|
||||
r.bycompressedsum[digest] = layers
|
||||
break
|
||||
}
|
||||
if i := slices.Index(layers, id); i != -1 {
|
||||
layers = slices.Delete(layers, i, i+1)
|
||||
r.bycompressedsum[digest] = layers
|
||||
}
|
||||
}
|
||||
for digest, layers := range r.byuncompressedsum {
|
||||
for i, layerID := range layers {
|
||||
if layerID == id {
|
||||
layers = append(layers[:i], layers[i+1:]...)
|
||||
r.byuncompressedsum[digest] = layers
|
||||
break
|
||||
}
|
||||
if i := slices.Index(layers, id); i != -1 {
|
||||
layers = slices.Delete(layers, i, i+1)
|
||||
r.byuncompressedsum[digest] = layers
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2545,9 +2504,7 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
|
|||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
for k, v := range options.Flags {
|
||||
layer.Flags[k] = v
|
||||
}
|
||||
maps.Copy(layer.Flags, options.Flags)
|
||||
}
|
||||
if err = r.saveFor(layer); err != nil {
|
||||
return err
|
||||
|
@ -2585,37 +2542,14 @@ func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *driver
|
|||
return err
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
// It must be called without any c/storage locks held to allow differ to make c/storage calls.
|
||||
func (r *layerStore) applyDiffWithDifferNoLock(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
|
||||
if !ok {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
if to == "" {
|
||||
output, err := ddriver.ApplyDiffWithDiffer("", "", options, differ)
|
||||
return &output, err
|
||||
}
|
||||
|
||||
layer, ok := r.lookup(to)
|
||||
if !ok {
|
||||
return nil, ErrLayerUnknown
|
||||
}
|
||||
if options == nil {
|
||||
options = &drivers.ApplyDiffWithDifferOpts{
|
||||
ApplyDiffOpts: drivers.ApplyDiffOpts{
|
||||
Mappings: r.layerMappings(layer),
|
||||
MountLabel: layer.MountLabel,
|
||||
},
|
||||
}
|
||||
}
|
||||
output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layer.UIDs = output.UIDs
|
||||
layer.GIDs = output.GIDs
|
||||
err = r.saveFor(layer)
|
||||
output, err := ddriver.ApplyDiffWithDiffer(options, differ)
|
||||
return &output, err
|
||||
}
|
||||
|
||||
|
|
|
@ -124,8 +124,7 @@ func (overlayWhiteoutConverter) ConvertReadWithHandler(hdr *tar.Header, path str
|
|||
}
|
||||
|
||||
// if a file was deleted and we are using overlay, we need to create a character device
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
if originalBase, ok := strings.CutPrefix(base, WhiteoutPrefix); ok {
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
|
||||
if err := handler.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
@ -97,8 +98,7 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
|||
f := filepath.Base(path)
|
||||
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(f, WhiteoutPrefix) {
|
||||
originalFile := f[len(WhiteoutPrefix):]
|
||||
if originalFile, ok := strings.CutPrefix(f, WhiteoutPrefix); ok {
|
||||
return filepath.Join(filepath.Dir(path), originalFile), nil
|
||||
}
|
||||
|
||||
|
@ -319,9 +319,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
|||
// otherwise any previous delete/change is considered recursive
|
||||
oldChildren := make(map[string]*FileInfo)
|
||||
if oldInfo != nil && info.isDir() {
|
||||
for k, v := range oldInfo.children {
|
||||
oldChildren[k] = v
|
||||
}
|
||||
maps.Copy(oldChildren, oldInfo.children)
|
||||
}
|
||||
|
||||
for name, newChild := range info.children {
|
||||
|
|
|
@ -31,7 +31,7 @@ func collectFileInfoForChanges(oldDir, newDir string, oldIDMap, newIDMap *idtool
|
|||
}()
|
||||
|
||||
// block until both routines have returned
|
||||
for i := 0; i < 2; i++ {
|
||||
for range 2 {
|
||||
if err := <-errs; err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
@ -80,9 +80,9 @@ func parseFileFlags(fflags string) (uint32, uint32, error) {
|
|||
var set, clear uint32 = 0, 0
|
||||
for _, fflag := range strings.Split(fflags, ",") {
|
||||
isClear := false
|
||||
if strings.HasPrefix(fflag, "no") {
|
||||
if clean, ok := strings.CutPrefix(fflag, "no"); ok {
|
||||
isClear = true
|
||||
fflag = strings.TrimPrefix(fflag, "no")
|
||||
fflag = clean
|
||||
}
|
||||
if value, ok := flagNameToValue[fflag]; ok {
|
||||
if isClear {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"github.com/moby/sys/capability"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
|
|
@ -65,11 +65,10 @@ type layer struct {
|
|||
}
|
||||
|
||||
type layersCache struct {
|
||||
layers []*layer
|
||||
refs int
|
||||
store storage.Store
|
||||
mutex sync.RWMutex
|
||||
created time.Time
|
||||
layers []*layer
|
||||
refs int
|
||||
store storage.Store
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -83,6 +82,7 @@ func (c *layer) release() {
|
|||
if err := unix.Munmap(c.mmapBuffer); err != nil {
|
||||
logrus.Warnf("Error Munmap: layer %q: %v", c.id, err)
|
||||
}
|
||||
c.mmapBuffer = nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,14 +107,13 @@ func (c *layersCache) release() {
|
|||
func getLayersCacheRef(store storage.Store) *layersCache {
|
||||
cacheMutex.Lock()
|
||||
defer cacheMutex.Unlock()
|
||||
if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 {
|
||||
if cache != nil && cache.store == store {
|
||||
cache.refs++
|
||||
return cache
|
||||
}
|
||||
cache := &layersCache{
|
||||
store: store,
|
||||
refs: 1,
|
||||
created: time.Now(),
|
||||
cache = &layersCache{
|
||||
store: store,
|
||||
refs: 1,
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
@ -291,7 +290,7 @@ func (c *layersCache) load() error {
|
|||
if r.ReadOnly {
|
||||
// If the layer is coming from a read-only store, do not attempt
|
||||
// to write to it.
|
||||
// Therefore,we won’t find any matches in read-only-store layers,
|
||||
// Therefore, we won’t find any matches in read-only-store layers,
|
||||
// unless the read-only store layer comes prepopulated with cacheKey data.
|
||||
continue
|
||||
}
|
||||
|
@ -781,14 +780,14 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64,
|
|||
return "", "", -1, nil
|
||||
}
|
||||
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
|
||||
binaryDigest, err := makeBinaryDigest(digest)
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
|
||||
for _, layer := range c.layers {
|
||||
if !layer.cacheFile.bloomFilter.maybeContains(binaryDigest) {
|
||||
continue
|
||||
|
|
|
@ -1331,7 +1331,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
|||
wg.Wait()
|
||||
}()
|
||||
|
||||
for i := 0; i < copyGoRoutines; i++ {
|
||||
for range copyGoRoutines {
|
||||
wg.Add(1)
|
||||
jobs := copyFileJobs
|
||||
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
@ -27,7 +27,7 @@ func Usage(dir string) (usage *DiskUsage, err error) {
|
|||
if err != nil {
|
||||
// if dir does not exist, Usage() returns the error.
|
||||
// if dir/x disappeared while walking, Usage() ignores dir/x.
|
||||
if os.IsNotExist(err) && d != dir {
|
||||
if errors.Is(err, fs.ErrNotExist) && d != dir {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
@ -35,6 +35,9 @@ func Usage(dir string) (usage *DiskUsage, err error) {
|
|||
|
||||
fileInfo, err := entry.Info()
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
|
@ -25,7 +25,7 @@ func Usage(dir string) (usage *DiskUsage, err error) {
|
|||
if err != nil {
|
||||
// if dir does not exist, Size() returns the error.
|
||||
// if dir/x disappeared while walking, Size() ignores dir/x.
|
||||
if os.IsNotExist(err) && path != dir {
|
||||
if errors.Is(err, fs.ErrNotExist) && path != dir {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
@ -40,6 +40,9 @@ func Usage(dir string) (usage *DiskUsage, err error) {
|
|||
|
||||
fileInfo, err := d.Info()
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
usage.Size += fileInfo.Size()
|
||||
|
|
|
@ -97,14 +97,14 @@ func MergeTmpfsOptions(options []string) ([]string, error) {
|
|||
}
|
||||
continue
|
||||
}
|
||||
opt := strings.SplitN(option, "=", 2)
|
||||
if len(opt) != 2 || !validFlags[opt[0]] {
|
||||
opt, _, ok := strings.Cut(option, "=")
|
||||
if !ok || !validFlags[opt] {
|
||||
return nil, fmt.Errorf("invalid tmpfs option %q", opt)
|
||||
}
|
||||
if !dataCollisions[opt[0]] {
|
||||
if !dataCollisions[opt] {
|
||||
// We prepend the option and add to collision map
|
||||
newOptions = append([]string{option}, newOptions...)
|
||||
dataCollisions[opt[0]] = true
|
||||
dataCollisions[opt] = true
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,8 +140,8 @@ func ParseOptions(options string) (int, string) {
|
|||
func ParseTmpfsOptions(options string) (int, string, error) {
|
||||
flags, data := ParseOptions(options)
|
||||
for _, o := range strings.Split(data, ",") {
|
||||
opt := strings.SplitN(o, "=", 2)
|
||||
if !validFlags[opt[0]] {
|
||||
opt, _, _ := strings.Cut(o, "=")
|
||||
if !validFlags[opt] {
|
||||
return 0, "", fmt.Errorf("invalid tmpfs option %q", opt)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,13 +40,9 @@ func mount(device, target, mType string, flag uintptr, data string) error {
|
|||
isNullFS = true
|
||||
continue
|
||||
}
|
||||
opt := strings.SplitN(x, "=", 2)
|
||||
options = append(options, opt[0])
|
||||
if len(opt) == 2 {
|
||||
options = append(options, opt[1])
|
||||
} else {
|
||||
options = append(options, "")
|
||||
}
|
||||
name, val, _ := strings.Cut(x, "=")
|
||||
options = append(options, name)
|
||||
options = append(options, val)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
func unmount(target string, flags int) error {
|
||||
var err error
|
||||
for i := 0; i < 50; i++ {
|
||||
for range 50 {
|
||||
err = unix.Unmount(target, flags)
|
||||
switch err {
|
||||
case unix.EBUSY:
|
||||
|
|
|
@ -11,11 +11,11 @@ import (
|
|||
|
||||
// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value)
|
||||
func ParseKeyValueOpt(opt string) (string, string, error) {
|
||||
parts := strings.SplitN(opt, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
k, v, ok := strings.Cut(opt, "=")
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("unable to parse key/value option: %s", opt)
|
||||
}
|
||||
return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
|
||||
return strings.TrimSpace(k), strings.TrimSpace(v), nil
|
||||
}
|
||||
|
||||
// ParseUintList parses and validates the specified string as the value
|
||||
|
@ -42,19 +42,19 @@ func ParseUintList(val string) (map[int]bool, error) {
|
|||
errInvalidFormat := fmt.Errorf("invalid format: %s", val)
|
||||
|
||||
for _, r := range split {
|
||||
if !strings.Contains(r, "-") {
|
||||
minS, maxS, ok := strings.Cut(r, "-")
|
||||
if !ok {
|
||||
v, err := strconv.Atoi(r)
|
||||
if err != nil {
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
availableInts[v] = true
|
||||
} else {
|
||||
split := strings.SplitN(r, "-", 2)
|
||||
min, err := strconv.Atoi(split[0])
|
||||
min, err := strconv.Atoi(minS)
|
||||
if err != nil {
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
max, err := strconv.Atoi(split[1])
|
||||
max, err := strconv.Atoi(maxS)
|
||||
if err != nil {
|
||||
return nil, errInvalidFormat
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package stringutils
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"math/rand/v2"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -13,7 +13,7 @@ func GenerateRandomAlphaOnlyString(n int) string {
|
|||
letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = letters[rand.Intn(len(letters))]
|
||||
b[i] = letters[rand.IntN(len(letters))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ func GenerateRandomASCIIString(n int) string {
|
|||
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
|
||||
res := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
res[i] = chars[rand.Intn(len(chars))]
|
||||
res[i] = chars[rand.IntN(len(chars))]
|
||||
}
|
||||
return string(res)
|
||||
}
|
||||
|
|
|
@ -21,9 +21,9 @@ import (
|
|||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/moby/sys/capability"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and
|
||||
|
|
|
@ -6,9 +6,11 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
@ -339,11 +341,17 @@ type Store interface {
|
|||
// }
|
||||
ApplyDiff(to string, diff io.Reader) (int64, error)
|
||||
|
||||
// ApplyDiffer applies a diff to a layer.
|
||||
// ApplyDiffWithDiffer applies a diff to a layer.
|
||||
// It is the caller responsibility to clean the staging directory if it is not
|
||||
// successfully applied with ApplyDiffFromStagingDirectory.
|
||||
// successfully applied with ApplyStagedLayer.
|
||||
// Deprecated: Use PrepareStagedLayer instead. ApplyDiffWithDiffer is going to be removed in a future release
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
|
||||
// PrepareStagedLayer applies a diff to a layer.
|
||||
// It is the caller responsibility to clean the staging directory if it is not
|
||||
// successfully applied with ApplyStagedLayer.
|
||||
PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
|
||||
// ApplyStagedLayer combines the functions of creating a layer and using the staging
|
||||
// directory to populate it.
|
||||
// It marks the layer for automatic removal if applying the diff fails for any reason.
|
||||
|
@ -939,9 +947,7 @@ func (s *store) GraphOptions() []string {
|
|||
|
||||
func (s *store) PullOptions() map[string]string {
|
||||
cp := make(map[string]string, len(s.pullOptions))
|
||||
for k, v := range s.pullOptions {
|
||||
cp[k] = v
|
||||
}
|
||||
maps.Copy(cp, s.pullOptions)
|
||||
return cp
|
||||
}
|
||||
|
||||
|
@ -1464,7 +1470,7 @@ func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, pare
|
|||
if lOptions != nil {
|
||||
options = *lOptions
|
||||
options.BigData = copyLayerBigDataOptionSlice(lOptions.BigData)
|
||||
options.Flags = copyStringInterfaceMap(lOptions.Flags)
|
||||
options.Flags = maps.Clone(lOptions.Flags)
|
||||
}
|
||||
if options.HostUIDMapping {
|
||||
options.UIDMap = nil
|
||||
|
@ -1605,7 +1611,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
|
|||
CreationDate: i.Created,
|
||||
Digest: i.Digest,
|
||||
Digests: copyDigestSlice(i.Digests),
|
||||
NamesHistory: copyStringSlice(i.NamesHistory),
|
||||
NamesHistory: slices.Clone(i.NamesHistory),
|
||||
}
|
||||
for _, key := range i.BigDataNames {
|
||||
data, err := store.BigData(id, key)
|
||||
|
@ -1622,7 +1628,7 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
|
|||
Digest: dataDigest,
|
||||
})
|
||||
}
|
||||
namesToAddAfterCreating = dedupeStrings(append(append([]string{}, i.Names...), names...))
|
||||
namesToAddAfterCreating = dedupeStrings(slices.Concat(i.Names, names))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -1636,18 +1642,16 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, i
|
|||
if iOptions.Digest != "" {
|
||||
options.Digest = iOptions.Digest
|
||||
}
|
||||
options.Digests = append(options.Digests, copyDigestSlice(iOptions.Digests)...)
|
||||
options.Digests = append(options.Digests, iOptions.Digests...)
|
||||
if iOptions.Metadata != "" {
|
||||
options.Metadata = iOptions.Metadata
|
||||
}
|
||||
options.BigData = append(options.BigData, copyImageBigDataOptionSlice(iOptions.BigData)...)
|
||||
options.NamesHistory = append(options.NamesHistory, copyStringSlice(iOptions.NamesHistory)...)
|
||||
options.NamesHistory = append(options.NamesHistory, iOptions.NamesHistory...)
|
||||
if options.Flags == nil {
|
||||
options.Flags = make(map[string]interface{})
|
||||
}
|
||||
for k, v := range iOptions.Flags {
|
||||
options.Flags[k] = v
|
||||
}
|
||||
maps.Copy(options.Flags, iOptions.Flags)
|
||||
}
|
||||
|
||||
if options.CreationDate.IsZero() {
|
||||
|
@ -1782,7 +1786,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
|||
options.IDMappingOptions.UIDMap = copyIDMap(cOptions.IDMappingOptions.UIDMap)
|
||||
options.IDMappingOptions.GIDMap = copyIDMap(cOptions.IDMappingOptions.GIDMap)
|
||||
options.LabelOpts = copyStringSlice(cOptions.LabelOpts)
|
||||
options.Flags = copyStringInterfaceMap(cOptions.Flags)
|
||||
options.Flags = maps.Clone(cOptions.Flags)
|
||||
options.MountOpts = copyStringSlice(cOptions.MountOpts)
|
||||
options.StorageOpt = copyStringStringMap(cOptions.StorageOpt)
|
||||
options.BigData = copyContainerBigDataOptionSlice(cOptions.BigData)
|
||||
|
@ -3105,13 +3109,19 @@ func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) e
|
|||
return err
|
||||
}
|
||||
|
||||
func (s *store) PrepareStagedLayer(options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
rlstore, err := s.getLayerStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rlstore.applyDiffWithDifferNoLock(options, differ)
|
||||
}
|
||||
|
||||
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) {
|
||||
if to != "" && !rlstore.Exists(to) {
|
||||
return nil, ErrLayerUnknown
|
||||
}
|
||||
return rlstore.ApplyDiffWithDiffer(to, options, differ)
|
||||
})
|
||||
if to != "" {
|
||||
return nil, fmt.Errorf("ApplyDiffWithDiffer does not support non-empty 'layer' parameter")
|
||||
}
|
||||
return s.PrepareStagedLayer(options, differ)
|
||||
}
|
||||
|
||||
func (s *store) DifferTarget(id string) (string, error) {
|
||||
|
@ -3683,22 +3693,6 @@ func copyStringSlice(slice []string) []string {
|
|||
return ret
|
||||
}
|
||||
|
||||
func copyStringInt64Map(m map[string]int64) map[string]int64 {
|
||||
ret := make(map[string]int64, len(m))
|
||||
for k, v := range m {
|
||||
ret[k] = v
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
|
||||
ret := make(map[string]digest.Digest, len(m))
|
||||
for k, v := range m {
|
||||
ret[k] = v
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func copyStringStringMap(m map[string]string) map[string]string {
|
||||
ret := make(map[string]string, len(m))
|
||||
for k, v := range m {
|
||||
|
@ -3736,7 +3730,7 @@ func copyImageBigDataOptionSlice(slice []ImageBigDataOption) []ImageBigDataOptio
|
|||
ret := make([]ImageBigDataOption, len(slice))
|
||||
for i := range slice {
|
||||
ret[i].Key = slice[i].Key
|
||||
ret[i].Data = append([]byte{}, slice[i].Data...)
|
||||
ret[i].Data = slices.Clone(slice[i].Data)
|
||||
ret[i].Digest = slice[i].Digest
|
||||
}
|
||||
return ret
|
||||
|
@ -3746,7 +3740,7 @@ func copyContainerBigDataOptionSlice(slice []ContainerBigDataOption) []Container
|
|||
ret := make([]ContainerBigDataOption, len(slice))
|
||||
for i := range slice {
|
||||
ret[i].Key = slice[i].Key
|
||||
ret[i].Data = append([]byte{}, slice[i].Data...)
|
||||
ret[i].Data = slices.Clone(slice[i].Data)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
@ -3800,10 +3794,8 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro
|
|||
return nil, err
|
||||
}
|
||||
key = strings.ToLower(key)
|
||||
for _, m := range mountOpts {
|
||||
if m == key {
|
||||
return strings.Split(val, ","), nil
|
||||
}
|
||||
if slices.Contains(mountOpts, key) {
|
||||
return strings.Split(val, ","), nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -3811,11 +3803,8 @@ func GetMountOptions(driver string, graphDriverOptions []string) ([]string, erro
|
|||
|
||||
// Free removes the store from the list of stores
|
||||
func (s *store) Free() {
|
||||
for i := 0; i < len(stores); i++ {
|
||||
if stores[i] == s {
|
||||
stores = append(stores[:i], stores[i+1:]...)
|
||||
return
|
||||
}
|
||||
if i := slices.Index(stores, s); i != -1 {
|
||||
stores = slices.Delete(stores, i, i+1)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -344,8 +344,8 @@ func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) {
|
|||
dirEntries, err := os.ReadDir(opts.GraphRoot)
|
||||
if err == nil {
|
||||
for _, entry := range dirEntries {
|
||||
if strings.HasSuffix(entry.Name(), "-images") {
|
||||
opts.GraphDriverName = strings.TrimSuffix(entry.Name(), "-images")
|
||||
if name, ok := strings.CutSuffix(entry.Name(), "-images"); ok {
|
||||
opts.GraphDriverName = name
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
@ -89,7 +89,7 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
|
|||
passwdFile = filepath.Join(containerMount, "etc/passwd")
|
||||
}
|
||||
if groupFile == "" {
|
||||
groupFile = filepath.Join(groupFile, "etc/group")
|
||||
groupFile = filepath.Join(containerMount, "etc/group")
|
||||
}
|
||||
|
||||
size := 0
|
||||
|
@ -99,14 +99,14 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
|
|||
for _, u := range users {
|
||||
// Skip the "nobody" user otherwise we end up with 65536
|
||||
// ids with most images
|
||||
if u.Name == "nobody" {
|
||||
if u.Name == "nobody" || u.Name == "nogroup" {
|
||||
continue
|
||||
}
|
||||
if u.Uid > size && u.Uid != nobodyUser {
|
||||
size = u.Uid
|
||||
size = u.Uid + 1
|
||||
}
|
||||
if u.Gid > size && u.Gid != nobodyUser {
|
||||
size = u.Gid
|
||||
size = u.Gid + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -114,11 +114,11 @@ func parseMountedFiles(containerMount, passwdFile, groupFile string) uint32 {
|
|||
groups, err := libcontainerUser.ParseGroupFile(groupFile)
|
||||
if err == nil {
|
||||
for _, g := range groups {
|
||||
if g.Name == "nobody" {
|
||||
if g.Name == "nobody" || g.Name == "nogroup" {
|
||||
continue
|
||||
}
|
||||
if g.Gid > size && g.Gid != nobodyUser {
|
||||
size = g.Gid
|
||||
size = g.Gid + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package storage
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/containers/storage/types"
|
||||
)
|
||||
|
@ -41,22 +42,12 @@ func applyNameOperation(oldNames []string, opParameters []string, op updateNameO
|
|||
// remove given names from old names
|
||||
result = make([]string, 0, len(oldNames))
|
||||
for _, name := range oldNames {
|
||||
// only keep names in final result which do not intersect with input names
|
||||
// basically `result = oldNames - opParameters`
|
||||
nameShouldBeRemoved := false
|
||||
for _, opName := range opParameters {
|
||||
if name == opName {
|
||||
nameShouldBeRemoved = true
|
||||
}
|
||||
}
|
||||
if !nameShouldBeRemoved {
|
||||
if !slices.Contains(opParameters, name) {
|
||||
result = append(result, name)
|
||||
}
|
||||
}
|
||||
case addNames:
|
||||
result = make([]string, 0, len(opParameters)+len(oldNames))
|
||||
result = append(result, opParameters...)
|
||||
result = append(result, oldNames...)
|
||||
result = slices.Concat(opParameters, oldNames)
|
||||
default:
|
||||
return result, errInvalidUpdateNameOperation
|
||||
}
|
||||
|
|
|
@ -16,6 +16,20 @@ This package provides various compression algorithms.
|
|||
|
||||
# changelog
|
||||
|
||||
* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
|
||||
* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
|
||||
* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
|
||||
* Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971
|
||||
* zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951
|
||||
|
||||
* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)
|
||||
* zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885
|
||||
* zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938
|
||||
|
||||
* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)
|
||||
* s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927
|
||||
* s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930
|
||||
|
||||
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
|
||||
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
|
||||
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
|
||||
|
@ -81,7 +95,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
|||
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
|
||||
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
|
||||
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
|
||||
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
|
||||
* gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
|
||||
|
||||
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
||||
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
||||
|
@ -136,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
|||
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
|
||||
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
|
||||
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
|
||||
* zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
|
||||
* zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
|
||||
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
|
||||
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
|
||||
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
|
||||
|
@ -339,7 +353,7 @@ While the release has been extensively tested, it is recommended to testing when
|
|||
* s2: Fix binaries.
|
||||
|
||||
* Feb 25, 2021 (v1.11.8)
|
||||
* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
|
||||
* s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
|
||||
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
|
||||
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
|
||||
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
|
||||
|
@ -518,7 +532,7 @@ While the release has been extensively tested, it is recommended to testing when
|
|||
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
|
||||
* Feb 19, 2016: Handle small payloads faster in level 1-3.
|
||||
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
|
||||
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
|
||||
* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
|
||||
* Feb 14, 2016: Snappy: Merge upstream changes.
|
||||
* Feb 14, 2016: Snappy: Fix aggressive skipping.
|
||||
* Feb 14, 2016: Snappy: Update benchmark.
|
||||
|
|
|
@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) {
|
|||
}
|
||||
switch d.compressionLevel.chain {
|
||||
case 0:
|
||||
// level was NoCompression or ConstantCompresssion.
|
||||
// level was NoCompression or ConstantCompression.
|
||||
d.windowEnd = 0
|
||||
default:
|
||||
s := d.state
|
||||
|
|
|
@ -298,6 +298,14 @@ const (
|
|||
huffmanGenericReader
|
||||
)
|
||||
|
||||
// flushMode tells decompressor when to return data
|
||||
type flushMode uint8
|
||||
|
||||
const (
|
||||
syncFlush flushMode = iota // return data after sync flush block
|
||||
partialFlush // return data after each block
|
||||
)
|
||||
|
||||
// Decompress state.
|
||||
type decompressor struct {
|
||||
// Input source.
|
||||
|
@ -332,6 +340,8 @@ type decompressor struct {
|
|||
|
||||
nb uint
|
||||
final bool
|
||||
|
||||
flushMode flushMode
|
||||
}
|
||||
|
||||
func (f *decompressor) nextBlock() {
|
||||
|
@ -618,7 +628,10 @@ func (f *decompressor) dataBlock() {
|
|||
}
|
||||
|
||||
if n == 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
if f.flushMode == syncFlush {
|
||||
f.toRead = f.dict.readFlush()
|
||||
}
|
||||
|
||||
f.finishBlock()
|
||||
return
|
||||
}
|
||||
|
@ -657,8 +670,12 @@ func (f *decompressor) finishBlock() {
|
|||
if f.dict.availRead() > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
}
|
||||
|
||||
f.err = io.EOF
|
||||
} else if f.flushMode == partialFlush && f.dict.availRead() > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
}
|
||||
|
||||
f.step = nextBlock
|
||||
}
|
||||
|
||||
|
@ -789,6 +806,41 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type ReaderOpt func(*decompressor)
|
||||
|
||||
// WithPartialBlock tells decompressor to return after each block,
|
||||
// so it can read data written with partial flush
|
||||
func WithPartialBlock() ReaderOpt {
|
||||
return func(f *decompressor) {
|
||||
f.flushMode = partialFlush
|
||||
}
|
||||
}
|
||||
|
||||
// WithDict initializes the reader with a preset dictionary
|
||||
func WithDict(dict []byte) ReaderOpt {
|
||||
return func(f *decompressor) {
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
}
|
||||
}
|
||||
|
||||
// NewReaderOpts returns new reader with provided options
|
||||
func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser {
|
||||
fixedHuffmanDecoderInit()
|
||||
|
||||
var f decompressor
|
||||
f.r = makeReader(r)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = nextBlock
|
||||
f.dict.init(maxMatchOffset, nil)
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(&f)
|
||||
}
|
||||
|
||||
return &f
|
||||
}
|
||||
|
||||
// NewReader returns a new ReadCloser that can be used
|
||||
// to read the uncompressed version of r.
|
||||
// If r does not also implement io.ByteReader,
|
||||
|
@ -798,15 +850,7 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error {
|
|||
//
|
||||
// The ReadCloser returned by NewReader also implements Resetter.
|
||||
func NewReader(r io.Reader) io.ReadCloser {
|
||||
fixedHuffmanDecoderInit()
|
||||
|
||||
var f decompressor
|
||||
f.r = makeReader(r)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = nextBlock
|
||||
f.dict.init(maxMatchOffset, nil)
|
||||
return &f
|
||||
return NewReaderOpts(r)
|
||||
}
|
||||
|
||||
// NewReaderDict is like NewReader but initializes the reader
|
||||
|
@ -817,13 +861,5 @@ func NewReader(r io.Reader) io.ReadCloser {
|
|||
//
|
||||
// The ReadCloser returned by NewReader also implements Resetter.
|
||||
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
|
||||
fixedHuffmanDecoderInit()
|
||||
|
||||
var f decompressor
|
||||
f.r = makeReader(r)
|
||||
f.bits = new([maxNumLit + maxNumDist]int)
|
||||
f.codebits = new([numCodes]int)
|
||||
f.step = nextBlock
|
||||
f.dict.init(maxMatchOffset, dict)
|
||||
return &f
|
||||
return NewReaderOpts(r, WithDict(dict))
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ const (
|
|||
// It is possible, but by no way guaranteed that corrupt data will
|
||||
// return an error.
|
||||
// It is up to the caller to verify integrity of the returned data.
|
||||
// Use a predefined Scrach to set maximum acceptable output size.
|
||||
// Use a predefined Scratch to set maximum acceptable output size.
|
||||
func Decompress(b []byte, s *Scratch) ([]byte, error) {
|
||||
s, err := s.prepare(b)
|
||||
if err != nil {
|
||||
|
|
|
@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
|
|||
errs++
|
||||
}
|
||||
if errs > 0 {
|
||||
fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
|
||||
fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
|
||||
continue
|
||||
}
|
||||
// Ensure that all combinations are covered.
|
||||
|
@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
|
|||
errs++
|
||||
}
|
||||
if errs > 20 {
|
||||
fmt.Fprintf(w, "%d errros, stopping\n", errs)
|
||||
fmt.Fprintf(w, "%d errors, stopping\n", errs)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
|||
printf("RLE set to 0x%x, code: %v", symb, v)
|
||||
}
|
||||
case compModeFSE:
|
||||
println("Reading table for", tableIndex(i))
|
||||
if debugDecoder {
|
||||
println("Reading table for", tableIndex(i))
|
||||
}
|
||||
if seq.fse == nil || seq.fse.preDefined {
|
||||
seq.fse = fseDecoderPool.Get().(*fseDecoder)
|
||||
}
|
||||
|
|
|
@ -179,9 +179,9 @@ encodeLoop:
|
|||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||
|
||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
||||
seq.matchLen = uint32(length - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
|
@ -210,12 +210,12 @@ encodeLoop:
|
|||
|
||||
// Index match start+1 (long) -> s - 1
|
||||
index0 := s + repOff
|
||||
s += lenght + repOff
|
||||
s += length + repOff
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debugEncoder {
|
||||
println("repeat ended", s, lenght)
|
||||
println("repeat ended", s, length)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
|
@ -241,9 +241,9 @@ encodeLoop:
|
|||
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
||||
length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
||||
|
||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
||||
seq.matchLen = uint32(length - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
|
@ -270,11 +270,11 @@ encodeLoop:
|
|||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
s += lenght + repOff2
|
||||
s += length + repOff2
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debugEncoder {
|
||||
println("repeat ended", s, lenght)
|
||||
println("repeat ended", s, length)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
|
@ -708,9 +708,9 @@ encodeLoop:
|
|||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||
|
||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
||||
seq.matchLen = uint32(length - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
|
@ -738,12 +738,12 @@ encodeLoop:
|
|||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
// Index match start+1 (long) -> s - 1
|
||||
s += lenght + repOff
|
||||
s += length + repOff
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debugEncoder {
|
||||
println("repeat ended", s, lenght)
|
||||
println("repeat ended", s, length)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
|
@ -772,9 +772,9 @@ encodeLoop:
|
|||
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
||||
length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
|
||||
|
||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
||||
seq.matchLen = uint32(length - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
|
@ -801,11 +801,11 @@ encodeLoop:
|
|||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
s += lenght + repOff2
|
||||
s += length + repOff2
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debugEncoder {
|
||||
println("repeat ended", s, lenght)
|
||||
println("repeat ended", s, length)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
|
|
|
@ -138,9 +138,9 @@ encodeLoop:
|
|||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||
|
||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
||||
seq.matchLen = uint32(length - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
|
@ -166,11 +166,11 @@ encodeLoop:
|
|||
println("repeat sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
s += lenght + repOff
|
||||
s += length + repOff
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debugEncoder {
|
||||
println("repeat ended", s, lenght)
|
||||
println("repeat ended", s, length)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
|
@ -798,9 +798,9 @@ encodeLoop:
|
|||
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||
length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||
|
||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
||||
seq.matchLen = uint32(length - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
|
@ -826,11 +826,11 @@ encodeLoop:
|
|||
println("repeat sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
s += lenght + repOff
|
||||
s += length + repOff
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debugEncoder {
|
||||
println("repeat ended", s, lenght)
|
||||
println("repeat ended", s, length)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
|
|
|
@ -202,7 +202,7 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||
return nil
|
||||
}
|
||||
if final && len(s.filling) > 0 {
|
||||
s.current = e.EncodeAll(s.filling, s.current[:0])
|
||||
s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
|
||||
var n2 int
|
||||
n2, s.err = s.w.Write(s.current)
|
||||
if s.err != nil {
|
||||
|
@ -469,6 +469,15 @@ func (e *Encoder) Close() error {
|
|||
// Data compressed with EncodeAll can be decoded with the Decoder,
|
||||
// using either a stream or DecodeAll.
|
||||
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
||||
e.init.Do(e.initialize)
|
||||
enc := <-e.encoders
|
||||
defer func() {
|
||||
e.encoders <- enc
|
||||
}()
|
||||
return e.encodeAll(enc, src, dst)
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
|
||||
if len(src) == 0 {
|
||||
if e.o.fullZero {
|
||||
// Add frame header.
|
||||
|
@ -491,13 +500,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||
}
|
||||
return dst
|
||||
}
|
||||
e.init.Do(e.initialize)
|
||||
enc := <-e.encoders
|
||||
defer func() {
|
||||
// Release encoder reference to last block.
|
||||
// If a non-single block is needed the encoder will reset again.
|
||||
e.encoders <- enc
|
||||
}()
|
||||
|
||||
// Use single segments when above minimum window and below window size.
|
||||
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
|
||||
if e.o.single != nil {
|
||||
|
|
|
@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
|
||||
if debugDecoder {
|
||||
printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
|
||||
}
|
||||
windowLog := 10 + (wd >> 3)
|
||||
windowBase := uint64(1) << windowLog
|
||||
windowAdd := (windowBase / 8) * uint64(wd&0x7)
|
||||
|
|
|
@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
|
||||
default:
|
||||
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
|
||||
return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
|
||||
}
|
||||
|
||||
s.seqSize += ctx.litRemain
|
||||
|
@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
|
||||
return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
|
||||
}
|
||||
|
||||
if ctx.litRemain < 0 {
|
||||
|
|
|
@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
|
|||
MOVQ 40(SP), AX
|
||||
ADDQ AX, 48(SP)
|
||||
|
||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
||||
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||
ADDQ R10, 32(SP)
|
||||
|
||||
// outBase += outPosition
|
||||
|
@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
|
|||
MOVQ 40(SP), CX
|
||||
ADDQ CX, 48(SP)
|
||||
|
||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
||||
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||
ADDQ R9, 32(SP)
|
||||
|
||||
// outBase += outPosition
|
||||
|
@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
|
|||
MOVQ 40(SP), AX
|
||||
ADDQ AX, 48(SP)
|
||||
|
||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
||||
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||
ADDQ R10, 32(SP)
|
||||
|
||||
// outBase += outPosition
|
||||
|
@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
|
|||
MOVQ 40(SP), CX
|
||||
ADDQ CX, 48(SP)
|
||||
|
||||
// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
|
||||
// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
|
||||
ADDQ R9, 32(SP)
|
||||
|
||||
// outBase += outPosition
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
[codespell]
|
||||
skip = ./.git
|
||||
ignore-words-list = nd
|
|
@ -0,0 +1,6 @@
|
|||
linters:
|
||||
enable:
|
||||
- unconvert
|
||||
- unparam
|
||||
- gofumpt
|
||||
- errorlint
|
|
@ -0,0 +1,72 @@
|
|||
# Changelog
|
||||
This file documents all notable changes made to this project since the initial fork
|
||||
from https://github.com/syndtr/gocapability/commit/42c35b4376354fd5.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## 0.2.0 - 2024-09-16
|
||||
|
||||
This is the first release after the move to a new home in
|
||||
github.com/moby/sys/capability.
|
||||
|
||||
### Fixed
|
||||
* Fixed URLs in documentation to reflect the new home.
|
||||
|
||||
## [0.1.1] - 2024-08-01
|
||||
|
||||
This is a maintenance release, fixing a few minor issues.
|
||||
|
||||
### Fixed
|
||||
* Fixed future kernel compatibility, for real this time. [#11]
|
||||
* Fixed [LastCap] to be a function. [#12]
|
||||
|
||||
## [0.1.0] - 2024-07-31
|
||||
|
||||
This is an initial release since the fork.
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* The `CAP_LAST_CAP` variable is removed; users need to modify the code to
|
||||
use [LastCap] to get the value. [#6]
|
||||
* The code now requires Go >= 1.21.
|
||||
|
||||
### Added
|
||||
* `go.mod` and `go.sum` files. [#2]
|
||||
* New [LastCap] function. [#6]
|
||||
* Basic CI using GHA infra. [#8], [#9]
|
||||
* README and CHANGELOG. [#10]
|
||||
|
||||
### Fixed
|
||||
* Fixed ambient capabilities error handling in [Apply]. [#3]
|
||||
* Fixed future kernel compatibility. [#1]
|
||||
* Fixed various linter warnings. [#4], [#7]
|
||||
|
||||
### Changed
|
||||
* Go build tags changed from old-style (`+build`) to new Go 1.17+ style (`go:build`). [#2]
|
||||
|
||||
### Removed
|
||||
* Removed support for capabilities v1 and v2. [#1]
|
||||
* Removed init function so programs that use this package start faster. [#6]
|
||||
* Removed `CAP_LAST_CAP` (use [LastCap] instead). [#6]
|
||||
|
||||
<!-- Doc links. -->
|
||||
[Apply]: https://pkg.go.dev/github.com/moby/sys/capability#Capabilities.Apply
|
||||
[LastCap]: https://pkg.go.dev/github.com/moby/sys/capability#LastCap
|
||||
|
||||
<!-- Minor releases. -->
|
||||
[0.1.1]: https://github.com/kolyshkin/capability/compare/v0.1.0...v0.1.1
|
||||
[0.1.0]: https://github.com/kolyshkin/capability/compare/42c35b4376354fd5...v0.1.0
|
||||
|
||||
<!-- PRs in 0.1.x releases. -->
|
||||
[#1]: https://github.com/kolyshkin/capability/pull/1
|
||||
[#2]: https://github.com/kolyshkin/capability/pull/2
|
||||
[#3]: https://github.com/kolyshkin/capability/pull/3
|
||||
[#4]: https://github.com/kolyshkin/capability/pull/4
|
||||
[#6]: https://github.com/kolyshkin/capability/pull/6
|
||||
[#7]: https://github.com/kolyshkin/capability/pull/7
|
||||
[#8]: https://github.com/kolyshkin/capability/pull/8
|
||||
[#9]: https://github.com/kolyshkin/capability/pull/9
|
||||
[#10]: https://github.com/kolyshkin/capability/pull/10
|
||||
[#11]: https://github.com/kolyshkin/capability/pull/11
|
||||
[#12]: https://github.com/kolyshkin/capability/pull/12
|
|
@ -0,0 +1,25 @@
|
|||
Copyright 2023 The Capability Authors.
|
||||
Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,11 @@
|
|||
This is a fork of (apparently no longer maintained)
|
||||
https://github.com/syndtr/gocapability package. It provides basic primitives to
|
||||
work with [Linux capabilities][capabilities(7)].
|
||||
|
||||
[](https://pkg.go.dev/github.com/moby/sys/capability)
|
||||
|
||||
## Alternatives
|
||||
|
||||
* https://pkg.go.dev/kernel.org/pub/linux/libs/security/libcap/cap
|
||||
|
||||
[capabilities(7)]: https://man7.org/linux/man-pages/man7/capabilities.7.html
|
|
@ -0,0 +1,134 @@
|
|||
// Copyright 2023 The Capability Authors.
|
||||
// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package capability provides utilities for manipulating POSIX capabilities.
|
||||
package capability
|
||||
|
||||
type Capabilities interface {
|
||||
// Get check whether a capability present in the given
|
||||
// capabilities set. The 'which' value should be one of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||
Get(which CapType, what Cap) bool
|
||||
|
||||
// Empty check whether all capability bits of the given capabilities
|
||||
// set are zero. The 'which' value should be one of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||
Empty(which CapType) bool
|
||||
|
||||
// Full check whether all capability bits of the given capabilities
|
||||
// set are one. The 'which' value should be one of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||
Full(which CapType) bool
|
||||
|
||||
// Set sets capabilities of the given capabilities sets. The
|
||||
// 'which' value should be one or combination (OR'ed) of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||
Set(which CapType, caps ...Cap)
|
||||
|
||||
// Unset unsets capabilities of the given capabilities sets. The
|
||||
// 'which' value should be one or combination (OR'ed) of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||
Unset(which CapType, caps ...Cap)
|
||||
|
||||
// Fill sets all bits of the given capabilities kind to one. The
|
||||
// 'kind' value should be one or combination (OR'ed) of CAPS,
|
||||
// BOUNDS or AMBS.
|
||||
Fill(kind CapType)
|
||||
|
||||
// Clear sets all bits of the given capabilities kind to zero. The
|
||||
// 'kind' value should be one or combination (OR'ed) of CAPS,
|
||||
// BOUNDS or AMBS.
|
||||
Clear(kind CapType)
|
||||
|
||||
// String return current capabilities state of the given capabilities
|
||||
// set as string. The 'which' value should be one of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE BOUNDING or AMBIENT
|
||||
StringCap(which CapType) string
|
||||
|
||||
// String return current capabilities state as string.
|
||||
String() string
|
||||
|
||||
// Load load actual capabilities value. This will overwrite all
|
||||
// outstanding changes.
|
||||
Load() error
|
||||
|
||||
// Apply apply the capabilities settings, so all changes will take
|
||||
// effect.
|
||||
Apply(kind CapType) error
|
||||
}
|
||||
|
||||
// NewPid initializes a new Capabilities object for given pid when
|
||||
// it is nonzero, or for the current process if pid is 0.
|
||||
//
|
||||
// Deprecated: Replace with NewPid2. For example, replace:
|
||||
//
|
||||
// c, err := NewPid(0)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// with:
|
||||
//
|
||||
// c, err := NewPid2(0)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// err = c.Load()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
func NewPid(pid int) (Capabilities, error) {
|
||||
c, err := newPid(pid)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
err = c.Load()
|
||||
return c, err
|
||||
}
|
||||
|
||||
// NewPid2 initializes a new Capabilities object for given pid when
|
||||
// it is nonzero, or for the current process if pid is 0. This
|
||||
// does not load the process's current capabilities; to do that you
|
||||
// must call Load explicitly.
|
||||
func NewPid2(pid int) (Capabilities, error) {
|
||||
return newPid(pid)
|
||||
}
|
||||
|
||||
// NewFile initializes a new Capabilities object for given file path.
|
||||
//
|
||||
// Deprecated: Replace with NewFile2. For example, replace:
|
||||
//
|
||||
// c, err := NewFile(path)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// with:
|
||||
//
|
||||
// c, err := NewFile2(path)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// err = c.Load()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
func NewFile(path string) (Capabilities, error) {
|
||||
c, err := newFile(path)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
err = c.Load()
|
||||
return c, err
|
||||
}
|
||||
|
||||
// NewFile2 creates a new initialized Capabilities object for given
|
||||
// file path. This does not load the process's current capabilities;
|
||||
// to do that you must call Load explicitly.
|
||||
func NewFile2(path string) (Capabilities, error) {
|
||||
return newFile(path)
|
||||
}
|
|
@ -0,0 +1,546 @@
|
|||
// Copyright 2023 The Capability Authors.
|
||||
// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package capability
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
linuxCapVer1 = 0x19980330 // No longer supported.
|
||||
linuxCapVer2 = 0x20071026 // No longer supported.
|
||||
linuxCapVer3 = 0x20080522
|
||||
)
|
||||
|
||||
// LastCap returns highest valid capability of the running kernel.
|
||||
func LastCap() (Cap, error) {
|
||||
return lastCap()
|
||||
}
|
||||
|
||||
var lastCap = sync.OnceValues(func() (Cap, error) {
|
||||
f, err := os.Open("/proc/sys/kernel/cap_last_cap")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
buf := make([]byte, 11)
|
||||
l, err := f.Read(buf)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
buf = buf[:l]
|
||||
|
||||
last, err := strconv.Atoi(strings.TrimSpace(string(buf)))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return Cap(last), nil
|
||||
})
|
||||
|
||||
func capUpperMask() uint32 {
|
||||
last, err := lastCap()
|
||||
if err != nil || last < 32 {
|
||||
return 0
|
||||
}
|
||||
return (uint32(1) << (uint(last) - 31)) - 1
|
||||
}
|
||||
|
||||
func mkStringCap(c Capabilities, which CapType) (ret string) {
|
||||
last, err := lastCap()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
for i, first := Cap(0), true; i <= last; i++ {
|
||||
if !c.Get(which, i) {
|
||||
continue
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
ret += ", "
|
||||
}
|
||||
ret += i.String()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func mkString(c Capabilities, max CapType) (ret string) {
|
||||
ret = "{"
|
||||
for i := CapType(1); i <= max; i <<= 1 {
|
||||
ret += " " + i.String() + "=\""
|
||||
if c.Empty(i) {
|
||||
ret += "empty"
|
||||
} else if c.Full(i) {
|
||||
ret += "full"
|
||||
} else {
|
||||
ret += c.StringCap(i)
|
||||
}
|
||||
ret += "\""
|
||||
}
|
||||
ret += " }"
|
||||
return
|
||||
}
|
||||
|
||||
var capVersion = sync.OnceValues(func() (uint32, error) {
|
||||
var hdr capHeader
|
||||
err := capget(&hdr, nil)
|
||||
return hdr.version, err
|
||||
})
|
||||
|
||||
func newPid(pid int) (c Capabilities, retErr error) {
|
||||
ver, err := capVersion()
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("unable to get capability version from the kernel: %w", err)
|
||||
return
|
||||
}
|
||||
switch ver {
|
||||
case linuxCapVer1, linuxCapVer2:
|
||||
retErr = errors.New("old/unsupported capability version (kernel older than 2.6.26?)")
|
||||
default:
|
||||
// Either linuxCapVer3, or an unknown/future version (such as v4).
|
||||
// In the latter case, we fall back to v3 as the latest version known
|
||||
// to this package, as kernel should be backward-compatible to v3.
|
||||
p := new(capsV3)
|
||||
p.hdr.version = linuxCapVer3
|
||||
p.hdr.pid = int32(pid)
|
||||
c = p
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type capsV3 struct {
|
||||
hdr capHeader
|
||||
data [2]capData
|
||||
bounds [2]uint32
|
||||
ambient [2]uint32
|
||||
}
|
||||
|
||||
func (c *capsV3) Get(which CapType, what Cap) bool {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
switch which {
|
||||
case EFFECTIVE:
|
||||
return (1<<uint(what))&c.data[i].effective != 0
|
||||
case PERMITTED:
|
||||
return (1<<uint(what))&c.data[i].permitted != 0
|
||||
case INHERITABLE:
|
||||
return (1<<uint(what))&c.data[i].inheritable != 0
|
||||
case BOUNDING:
|
||||
return (1<<uint(what))&c.bounds[i] != 0
|
||||
case AMBIENT:
|
||||
return (1<<uint(what))&c.ambient[i] != 0
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *capsV3) getData(which CapType, dest []uint32) {
|
||||
switch which {
|
||||
case EFFECTIVE:
|
||||
dest[0] = c.data[0].effective
|
||||
dest[1] = c.data[1].effective
|
||||
case PERMITTED:
|
||||
dest[0] = c.data[0].permitted
|
||||
dest[1] = c.data[1].permitted
|
||||
case INHERITABLE:
|
||||
dest[0] = c.data[0].inheritable
|
||||
dest[1] = c.data[1].inheritable
|
||||
case BOUNDING:
|
||||
dest[0] = c.bounds[0]
|
||||
dest[1] = c.bounds[1]
|
||||
case AMBIENT:
|
||||
dest[0] = c.ambient[0]
|
||||
dest[1] = c.ambient[1]
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV3) Empty(which CapType) bool {
|
||||
var data [2]uint32
|
||||
c.getData(which, data[:])
|
||||
return data[0] == 0 && data[1] == 0
|
||||
}
|
||||
|
||||
func (c *capsV3) Full(which CapType) bool {
|
||||
var data [2]uint32
|
||||
c.getData(which, data[:])
|
||||
if (data[0] & 0xffffffff) != 0xffffffff {
|
||||
return false
|
||||
}
|
||||
mask := capUpperMask()
|
||||
return (data[1] & mask) == mask
|
||||
}
|
||||
|
||||
func (c *capsV3) Set(which CapType, caps ...Cap) {
|
||||
for _, what := range caps {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
if which&EFFECTIVE != 0 {
|
||||
c.data[i].effective |= 1 << uint(what)
|
||||
}
|
||||
if which&PERMITTED != 0 {
|
||||
c.data[i].permitted |= 1 << uint(what)
|
||||
}
|
||||
if which&INHERITABLE != 0 {
|
||||
c.data[i].inheritable |= 1 << uint(what)
|
||||
}
|
||||
if which&BOUNDING != 0 {
|
||||
c.bounds[i] |= 1 << uint(what)
|
||||
}
|
||||
if which&AMBIENT != 0 {
|
||||
c.ambient[i] |= 1 << uint(what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV3) Unset(which CapType, caps ...Cap) {
|
||||
for _, what := range caps {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
if which&EFFECTIVE != 0 {
|
||||
c.data[i].effective &= ^(1 << uint(what))
|
||||
}
|
||||
if which&PERMITTED != 0 {
|
||||
c.data[i].permitted &= ^(1 << uint(what))
|
||||
}
|
||||
if which&INHERITABLE != 0 {
|
||||
c.data[i].inheritable &= ^(1 << uint(what))
|
||||
}
|
||||
if which&BOUNDING != 0 {
|
||||
c.bounds[i] &= ^(1 << uint(what))
|
||||
}
|
||||
if which&AMBIENT != 0 {
|
||||
c.ambient[i] &= ^(1 << uint(what))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV3) Fill(kind CapType) {
|
||||
if kind&CAPS == CAPS {
|
||||
c.data[0].effective = 0xffffffff
|
||||
c.data[0].permitted = 0xffffffff
|
||||
c.data[0].inheritable = 0
|
||||
c.data[1].effective = 0xffffffff
|
||||
c.data[1].permitted = 0xffffffff
|
||||
c.data[1].inheritable = 0
|
||||
}
|
||||
|
||||
if kind&BOUNDS == BOUNDS {
|
||||
c.bounds[0] = 0xffffffff
|
||||
c.bounds[1] = 0xffffffff
|
||||
}
|
||||
if kind&AMBS == AMBS {
|
||||
c.ambient[0] = 0xffffffff
|
||||
c.ambient[1] = 0xffffffff
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV3) Clear(kind CapType) {
|
||||
if kind&CAPS == CAPS {
|
||||
c.data[0].effective = 0
|
||||
c.data[0].permitted = 0
|
||||
c.data[0].inheritable = 0
|
||||
c.data[1].effective = 0
|
||||
c.data[1].permitted = 0
|
||||
c.data[1].inheritable = 0
|
||||
}
|
||||
|
||||
if kind&BOUNDS == BOUNDS {
|
||||
c.bounds[0] = 0
|
||||
c.bounds[1] = 0
|
||||
}
|
||||
if kind&AMBS == AMBS {
|
||||
c.ambient[0] = 0
|
||||
c.ambient[1] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV3) StringCap(which CapType) (ret string) {
|
||||
return mkStringCap(c, which)
|
||||
}
|
||||
|
||||
func (c *capsV3) String() (ret string) {
|
||||
return mkString(c, BOUNDING)
|
||||
}
|
||||
|
||||
func (c *capsV3) Load() (err error) {
|
||||
err = capget(&c.hdr, &c.data[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
path := "/proc/self/status"
|
||||
if c.hdr.pid != 0 {
|
||||
path = fmt.Sprintf("/proc/%d/status", c.hdr.pid)
|
||||
}
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
b := bufio.NewReader(f)
|
||||
for {
|
||||
line, e := b.ReadString('\n')
|
||||
if e != nil {
|
||||
if e != io.EOF {
|
||||
err = e
|
||||
}
|
||||
break
|
||||
}
|
||||
if strings.HasPrefix(line, "CapB") {
|
||||
_, err = fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(line, "CapA") {
|
||||
_, err = fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
f.Close()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *capsV3) Apply(kind CapType) (err error) {
|
||||
last, err := LastCap()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if kind&BOUNDS == BOUNDS {
|
||||
var data [2]capData
|
||||
err = capget(&c.hdr, &data[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if (1<<uint(CAP_SETPCAP))&data[0].effective != 0 {
|
||||
for i := Cap(0); i <= last; i++ {
|
||||
if c.Get(BOUNDING, i) {
|
||||
continue
|
||||
}
|
||||
err = prctl(syscall.PR_CAPBSET_DROP, uintptr(i), 0, 0, 0)
|
||||
if err != nil {
|
||||
// Ignore EINVAL since the capability may not be supported in this system.
|
||||
if err == syscall.EINVAL { //nolint:errorlint // Errors from syscall are bare.
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if kind&CAPS == CAPS {
|
||||
err = capset(&c.hdr, &c.data[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if kind&AMBS == AMBS {
|
||||
for i := Cap(0); i <= last; i++ {
|
||||
action := pr_CAP_AMBIENT_LOWER
|
||||
if c.Get(AMBIENT, i) {
|
||||
action = pr_CAP_AMBIENT_RAISE
|
||||
}
|
||||
err = prctl(pr_CAP_AMBIENT, action, uintptr(i), 0, 0)
|
||||
if err != nil {
|
||||
// Ignore EINVAL as not supported on kernels before 4.3
|
||||
if err == syscall.EINVAL { //nolint:errorlint // Errors from syscall are bare.
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func newFile(path string) (c Capabilities, err error) {
|
||||
c = &capsFile{path: path}
|
||||
return
|
||||
}
|
||||
|
||||
type capsFile struct {
|
||||
path string
|
||||
data vfscapData
|
||||
}
|
||||
|
||||
func (c *capsFile) Get(which CapType, what Cap) bool {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
if c.data.version == 1 {
|
||||
return false
|
||||
}
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
switch which {
|
||||
case EFFECTIVE:
|
||||
return (1<<uint(what))&c.data.effective[i] != 0
|
||||
case PERMITTED:
|
||||
return (1<<uint(what))&c.data.data[i].permitted != 0
|
||||
case INHERITABLE:
|
||||
return (1<<uint(what))&c.data.data[i].inheritable != 0
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *capsFile) getData(which CapType, dest []uint32) {
|
||||
switch which {
|
||||
case EFFECTIVE:
|
||||
dest[0] = c.data.effective[0]
|
||||
dest[1] = c.data.effective[1]
|
||||
case PERMITTED:
|
||||
dest[0] = c.data.data[0].permitted
|
||||
dest[1] = c.data.data[1].permitted
|
||||
case INHERITABLE:
|
||||
dest[0] = c.data.data[0].inheritable
|
||||
dest[1] = c.data.data[1].inheritable
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsFile) Empty(which CapType) bool {
|
||||
var data [2]uint32
|
||||
c.getData(which, data[:])
|
||||
return data[0] == 0 && data[1] == 0
|
||||
}
|
||||
|
||||
func (c *capsFile) Full(which CapType) bool {
|
||||
var data [2]uint32
|
||||
c.getData(which, data[:])
|
||||
if c.data.version == 0 {
|
||||
return (data[0] & 0x7fffffff) == 0x7fffffff
|
||||
}
|
||||
if (data[0] & 0xffffffff) != 0xffffffff {
|
||||
return false
|
||||
}
|
||||
mask := capUpperMask()
|
||||
return (data[1] & mask) == mask
|
||||
}
|
||||
|
||||
func (c *capsFile) Set(which CapType, caps ...Cap) {
|
||||
for _, what := range caps {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
if c.data.version == 1 {
|
||||
continue
|
||||
}
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
if which&EFFECTIVE != 0 {
|
||||
c.data.effective[i] |= 1 << uint(what)
|
||||
}
|
||||
if which&PERMITTED != 0 {
|
||||
c.data.data[i].permitted |= 1 << uint(what)
|
||||
}
|
||||
if which&INHERITABLE != 0 {
|
||||
c.data.data[i].inheritable |= 1 << uint(what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsFile) Unset(which CapType, caps ...Cap) {
|
||||
for _, what := range caps {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
if c.data.version == 1 {
|
||||
continue
|
||||
}
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
if which&EFFECTIVE != 0 {
|
||||
c.data.effective[i] &= ^(1 << uint(what))
|
||||
}
|
||||
if which&PERMITTED != 0 {
|
||||
c.data.data[i].permitted &= ^(1 << uint(what))
|
||||
}
|
||||
if which&INHERITABLE != 0 {
|
||||
c.data.data[i].inheritable &= ^(1 << uint(what))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsFile) Fill(kind CapType) {
|
||||
if kind&CAPS == CAPS {
|
||||
c.data.effective[0] = 0xffffffff
|
||||
c.data.data[0].permitted = 0xffffffff
|
||||
c.data.data[0].inheritable = 0
|
||||
if c.data.version == 2 {
|
||||
c.data.effective[1] = 0xffffffff
|
||||
c.data.data[1].permitted = 0xffffffff
|
||||
c.data.data[1].inheritable = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsFile) Clear(kind CapType) {
|
||||
if kind&CAPS == CAPS {
|
||||
c.data.effective[0] = 0
|
||||
c.data.data[0].permitted = 0
|
||||
c.data.data[0].inheritable = 0
|
||||
if c.data.version == 2 {
|
||||
c.data.effective[1] = 0
|
||||
c.data.data[1].permitted = 0
|
||||
c.data.data[1].inheritable = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsFile) StringCap(which CapType) (ret string) {
|
||||
return mkStringCap(c, which)
|
||||
}
|
||||
|
||||
func (c *capsFile) String() (ret string) {
|
||||
return mkString(c, INHERITABLE)
|
||||
}
|
||||
|
||||
func (c *capsFile) Load() (err error) {
|
||||
return getVfsCap(c.path, &c.data)
|
||||
}
|
||||
|
||||
func (c *capsFile) Apply(kind CapType) (err error) {
|
||||
if kind&CAPS == CAPS {
|
||||
return setVfsCap(c.path, &c.data)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
// Copyright 2023 The Capability Authors.
|
||||
// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !linux
|
||||
|
||||
package capability
|
||||
|
||||
import "errors"
|
||||
|
||||
func newPid(pid int) (Capabilities, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
||||
func newFile(path string) (Capabilities, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
|
@ -0,0 +1,303 @@
|
|||
// Copyright 2024 The Capability Authors.
|
||||
// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package capability
|
||||
|
||||
type CapType uint
|
||||
|
||||
func (c CapType) String() string {
|
||||
switch c {
|
||||
case EFFECTIVE:
|
||||
return "effective"
|
||||
case PERMITTED:
|
||||
return "permitted"
|
||||
case INHERITABLE:
|
||||
return "inheritable"
|
||||
case BOUNDING:
|
||||
return "bounding"
|
||||
case CAPS:
|
||||
return "caps"
|
||||
case AMBIENT:
|
||||
return "ambient"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
const (
|
||||
EFFECTIVE CapType = 1 << iota
|
||||
PERMITTED
|
||||
INHERITABLE
|
||||
BOUNDING
|
||||
AMBIENT
|
||||
|
||||
CAPS = EFFECTIVE | PERMITTED | INHERITABLE
|
||||
BOUNDS = BOUNDING
|
||||
AMBS = AMBIENT
|
||||
)
|
||||
|
||||
//go:generate go run enumgen/gen.go
|
||||
type Cap int
|
||||
|
||||
// POSIX-draft defined capabilities and Linux extensions.
|
||||
//
|
||||
// Defined in https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h
|
||||
const (
|
||||
// In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this
|
||||
// overrides the restriction of changing file ownership and group
|
||||
// ownership.
|
||||
CAP_CHOWN = Cap(0)
|
||||
|
||||
// Override all DAC access, including ACL execute access if
|
||||
// [_POSIX_ACL] is defined. Excluding DAC access covered by
|
||||
// CAP_LINUX_IMMUTABLE.
|
||||
CAP_DAC_OVERRIDE = Cap(1)
|
||||
|
||||
// Overrides all DAC restrictions regarding read and search on files
|
||||
// and directories, including ACL restrictions if [_POSIX_ACL] is
|
||||
// defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE.
|
||||
CAP_DAC_READ_SEARCH = Cap(2)
|
||||
|
||||
// Overrides all restrictions about allowed operations on files, where
|
||||
// file owner ID must be equal to the user ID, except where CAP_FSETID
|
||||
// is applicable. It doesn't override MAC and DAC restrictions.
|
||||
CAP_FOWNER = Cap(3)
|
||||
|
||||
// Overrides the following restrictions that the effective user ID
|
||||
// shall match the file owner ID when setting the S_ISUID and S_ISGID
|
||||
// bits on that file; that the effective group ID (or one of the
|
||||
// supplementary group IDs) shall match the file owner ID when setting
|
||||
// the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are
|
||||
// cleared on successful return from chown(2) (not implemented).
|
||||
CAP_FSETID = Cap(4)
|
||||
|
||||
// Overrides the restriction that the real or effective user ID of a
|
||||
// process sending a signal must match the real or effective user ID
|
||||
// of the process receiving the signal.
|
||||
CAP_KILL = Cap(5)
|
||||
|
||||
// Allows setgid(2) manipulation
|
||||
// Allows setgroups(2)
|
||||
// Allows forged gids on socket credentials passing.
|
||||
CAP_SETGID = Cap(6)
|
||||
|
||||
// Allows set*uid(2) manipulation (including fsuid).
|
||||
// Allows forged pids on socket credentials passing.
|
||||
CAP_SETUID = Cap(7)
|
||||
|
||||
// Linux-specific capabilities
|
||||
|
||||
// Without VFS support for capabilities:
|
||||
// Transfer any capability in your permitted set to any pid,
|
||||
// remove any capability in your permitted set from any pid
|
||||
// With VFS support for capabilities (neither of above, but)
|
||||
// Add any capability from current's capability bounding set
|
||||
// to the current process' inheritable set
|
||||
// Allow taking bits out of capability bounding set
|
||||
// Allow modification of the securebits for a process
|
||||
CAP_SETPCAP = Cap(8)
|
||||
|
||||
// Allow modification of S_IMMUTABLE and S_APPEND file attributes
|
||||
CAP_LINUX_IMMUTABLE = Cap(9)
|
||||
|
||||
// Allows binding to TCP/UDP sockets below 1024
|
||||
// Allows binding to ATM VCIs below 32
|
||||
CAP_NET_BIND_SERVICE = Cap(10)
|
||||
|
||||
// Allow broadcasting, listen to multicast
|
||||
CAP_NET_BROADCAST = Cap(11)
|
||||
|
||||
// Allow interface configuration
|
||||
// Allow administration of IP firewall, masquerading and accounting
|
||||
// Allow setting debug option on sockets
|
||||
// Allow modification of routing tables
|
||||
// Allow setting arbitrary process / process group ownership on
|
||||
// sockets
|
||||
// Allow binding to any address for transparent proxying (also via NET_RAW)
|
||||
// Allow setting TOS (type of service)
|
||||
// Allow setting promiscuous mode
|
||||
// Allow clearing driver statistics
|
||||
// Allow multicasting
|
||||
// Allow read/write of device-specific registers
|
||||
// Allow activation of ATM control sockets
|
||||
CAP_NET_ADMIN = Cap(12)
|
||||
|
||||
// Allow use of RAW sockets
|
||||
// Allow use of PACKET sockets
|
||||
// Allow binding to any address for transparent proxying (also via NET_ADMIN)
|
||||
CAP_NET_RAW = Cap(13)
|
||||
|
||||
// Allow locking of shared memory segments
|
||||
// Allow mlock and mlockall (which doesn't really have anything to do
|
||||
// with IPC)
|
||||
CAP_IPC_LOCK = Cap(14)
|
||||
|
||||
// Override IPC ownership checks
|
||||
CAP_IPC_OWNER = Cap(15)
|
||||
|
||||
// Insert and remove kernel modules - modify kernel without limit
|
||||
CAP_SYS_MODULE = Cap(16)
|
||||
|
||||
// Allow ioperm/iopl access
|
||||
// Allow sending USB messages to any device via /proc/bus/usb
|
||||
CAP_SYS_RAWIO = Cap(17)
|
||||
|
||||
// Allow use of chroot()
|
||||
CAP_SYS_CHROOT = Cap(18)
|
||||
|
||||
// Allow ptrace() of any process
|
||||
CAP_SYS_PTRACE = Cap(19)
|
||||
|
||||
// Allow configuration of process accounting
|
||||
CAP_SYS_PACCT = Cap(20)
|
||||
|
||||
// Allow configuration of the secure attention key
|
||||
// Allow administration of the random device
|
||||
// Allow examination and configuration of disk quotas
|
||||
// Allow setting the domainname
|
||||
// Allow setting the hostname
|
||||
// Allow calling bdflush()
|
||||
// Allow mount() and umount(), setting up new smb connection
|
||||
// Allow some autofs root ioctls
|
||||
// Allow nfsservctl
|
||||
// Allow VM86_REQUEST_IRQ
|
||||
// Allow to read/write pci config on alpha
|
||||
// Allow irix_prctl on mips (setstacksize)
|
||||
// Allow flushing all cache on m68k (sys_cacheflush)
|
||||
// Allow removing semaphores
|
||||
// Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores
|
||||
// and shared memory
|
||||
// Allow locking/unlocking of shared memory segment
|
||||
// Allow turning swap on/off
|
||||
// Allow forged pids on socket credentials passing
|
||||
// Allow setting readahead and flushing buffers on block devices
|
||||
// Allow setting geometry in floppy driver
|
||||
// Allow turning DMA on/off in xd driver
|
||||
// Allow administration of md devices (mostly the above, but some
|
||||
// extra ioctls)
|
||||
// Allow tuning the ide driver
|
||||
// Allow access to the nvram device
|
||||
// Allow administration of apm_bios, serial and bttv (TV) device
|
||||
// Allow manufacturer commands in isdn CAPI support driver
|
||||
// Allow reading non-standardized portions of pci configuration space
|
||||
// Allow DDI debug ioctl on sbpcd driver
|
||||
// Allow setting up serial ports
|
||||
// Allow sending raw qic-117 commands
|
||||
// Allow enabling/disabling tagged queuing on SCSI controllers and sending
|
||||
// arbitrary SCSI commands
|
||||
// Allow setting encryption key on loopback filesystem
|
||||
// Allow setting zone reclaim policy
|
||||
// Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility
|
||||
CAP_SYS_ADMIN = Cap(21)
|
||||
|
||||
// Allow use of reboot()
|
||||
CAP_SYS_BOOT = Cap(22)
|
||||
|
||||
// Allow raising priority and setting priority on other (different
|
||||
// UID) processes
|
||||
// Allow use of FIFO and round-robin (realtime) scheduling on own
|
||||
// processes and setting the scheduling algorithm used by another
|
||||
// process.
|
||||
// Allow setting cpu affinity on other processes
|
||||
CAP_SYS_NICE = Cap(23)
|
||||
|
||||
// Override resource limits. Set resource limits.
|
||||
// Override quota limits.
|
||||
// Override reserved space on ext2 filesystem
|
||||
// Modify data journaling mode on ext3 filesystem (uses journaling
|
||||
// resources)
|
||||
// NOTE: ext2 honors fsuid when checking for resource overrides, so
|
||||
// you can override using fsuid too
|
||||
// Override size restrictions on IPC message queues
|
||||
// Allow more than 64hz interrupts from the real-time clock
|
||||
// Override max number of consoles on console allocation
|
||||
// Override max number of keymaps
|
||||
// Control memory reclaim behavior
|
||||
CAP_SYS_RESOURCE = Cap(24)
|
||||
|
||||
// Allow manipulation of system clock
|
||||
// Allow irix_stime on mips
|
||||
// Allow setting the real-time clock
|
||||
CAP_SYS_TIME = Cap(25)
|
||||
|
||||
// Allow configuration of tty devices
|
||||
// Allow vhangup() of tty
|
||||
CAP_SYS_TTY_CONFIG = Cap(26)
|
||||
|
||||
// Allow the privileged aspects of mknod()
|
||||
CAP_MKNOD = Cap(27)
|
||||
|
||||
// Allow taking of leases on files
|
||||
CAP_LEASE = Cap(28)
|
||||
|
||||
CAP_AUDIT_WRITE = Cap(29)
|
||||
CAP_AUDIT_CONTROL = Cap(30)
|
||||
CAP_SETFCAP = Cap(31)
|
||||
|
||||
// Override MAC access.
|
||||
// The base kernel enforces no MAC policy.
|
||||
// An LSM may enforce a MAC policy, and if it does and it chooses
|
||||
// to implement capability based overrides of that policy, this is
|
||||
// the capability it should use to do so.
|
||||
CAP_MAC_OVERRIDE = Cap(32)
|
||||
|
||||
// Allow MAC configuration or state changes.
|
||||
// The base kernel requires no MAC configuration.
|
||||
// An LSM may enforce a MAC policy, and if it does and it chooses
|
||||
// to implement capability based checks on modifications to that
|
||||
// policy or the data required to maintain it, this is the
|
||||
// capability it should use to do so.
|
||||
CAP_MAC_ADMIN = Cap(33)
|
||||
|
||||
// Allow configuring the kernel's syslog (printk behaviour)
|
||||
CAP_SYSLOG = Cap(34)
|
||||
|
||||
// Allow triggering something that will wake the system
|
||||
CAP_WAKE_ALARM = Cap(35)
|
||||
|
||||
// Allow preventing system suspends
|
||||
CAP_BLOCK_SUSPEND = Cap(36)
|
||||
|
||||
// Allow reading the audit log via multicast netlink socket
|
||||
CAP_AUDIT_READ = Cap(37)
|
||||
|
||||
// Allow system performance and observability privileged operations
|
||||
// using perf_events, i915_perf and other kernel subsystems
|
||||
CAP_PERFMON = Cap(38)
|
||||
|
||||
// CAP_BPF allows the following BPF operations:
|
||||
// - Creating all types of BPF maps
|
||||
// - Advanced verifier features
|
||||
// - Indirect variable access
|
||||
// - Bounded loops
|
||||
// - BPF to BPF function calls
|
||||
// - Scalar precision tracking
|
||||
// - Larger complexity limits
|
||||
// - Dead code elimination
|
||||
// - And potentially other features
|
||||
// - Loading BPF Type Format (BTF) data
|
||||
// - Retrieve xlated and JITed code of BPF programs
|
||||
// - Use bpf_spin_lock() helper
|
||||
//
|
||||
// CAP_PERFMON relaxes the verifier checks further:
|
||||
// - BPF progs can use of pointer-to-integer conversions
|
||||
// - speculation attack hardening measures are bypassed
|
||||
// - bpf_probe_read to read arbitrary kernel memory is allowed
|
||||
// - bpf_trace_printk to print kernel memory is allowed
|
||||
//
|
||||
// CAP_SYS_ADMIN is required to use bpf_probe_write_user.
|
||||
//
|
||||
// CAP_SYS_ADMIN is required to iterate system wide loaded
|
||||
// programs, maps, links, BTFs and convert their IDs to file descriptors.
|
||||
//
|
||||
// CAP_PERFMON and CAP_BPF are required to load tracing programs.
|
||||
// CAP_NET_ADMIN and CAP_BPF are required to load networking programs.
|
||||
CAP_BPF = Cap(39)
|
||||
|
||||
// Allow checkpoint/restore related operations.
|
||||
// Introduced in kernel 5.9
|
||||
CAP_CHECKPOINT_RESTORE = Cap(40)
|
||||
)
|
|
@ -0,0 +1,138 @@
|
|||
// generated file; DO NOT EDIT - use go generate in directory with source
|
||||
|
||||
package capability
|
||||
|
||||
func (c Cap) String() string {
|
||||
switch c {
|
||||
case CAP_CHOWN:
|
||||
return "chown"
|
||||
case CAP_DAC_OVERRIDE:
|
||||
return "dac_override"
|
||||
case CAP_DAC_READ_SEARCH:
|
||||
return "dac_read_search"
|
||||
case CAP_FOWNER:
|
||||
return "fowner"
|
||||
case CAP_FSETID:
|
||||
return "fsetid"
|
||||
case CAP_KILL:
|
||||
return "kill"
|
||||
case CAP_SETGID:
|
||||
return "setgid"
|
||||
case CAP_SETUID:
|
||||
return "setuid"
|
||||
case CAP_SETPCAP:
|
||||
return "setpcap"
|
||||
case CAP_LINUX_IMMUTABLE:
|
||||
return "linux_immutable"
|
||||
case CAP_NET_BIND_SERVICE:
|
||||
return "net_bind_service"
|
||||
case CAP_NET_BROADCAST:
|
||||
return "net_broadcast"
|
||||
case CAP_NET_ADMIN:
|
||||
return "net_admin"
|
||||
case CAP_NET_RAW:
|
||||
return "net_raw"
|
||||
case CAP_IPC_LOCK:
|
||||
return "ipc_lock"
|
||||
case CAP_IPC_OWNER:
|
||||
return "ipc_owner"
|
||||
case CAP_SYS_MODULE:
|
||||
return "sys_module"
|
||||
case CAP_SYS_RAWIO:
|
||||
return "sys_rawio"
|
||||
case CAP_SYS_CHROOT:
|
||||
return "sys_chroot"
|
||||
case CAP_SYS_PTRACE:
|
||||
return "sys_ptrace"
|
||||
case CAP_SYS_PACCT:
|
||||
return "sys_pacct"
|
||||
case CAP_SYS_ADMIN:
|
||||
return "sys_admin"
|
||||
case CAP_SYS_BOOT:
|
||||
return "sys_boot"
|
||||
case CAP_SYS_NICE:
|
||||
return "sys_nice"
|
||||
case CAP_SYS_RESOURCE:
|
||||
return "sys_resource"
|
||||
case CAP_SYS_TIME:
|
||||
return "sys_time"
|
||||
case CAP_SYS_TTY_CONFIG:
|
||||
return "sys_tty_config"
|
||||
case CAP_MKNOD:
|
||||
return "mknod"
|
||||
case CAP_LEASE:
|
||||
return "lease"
|
||||
case CAP_AUDIT_WRITE:
|
||||
return "audit_write"
|
||||
case CAP_AUDIT_CONTROL:
|
||||
return "audit_control"
|
||||
case CAP_SETFCAP:
|
||||
return "setfcap"
|
||||
case CAP_MAC_OVERRIDE:
|
||||
return "mac_override"
|
||||
case CAP_MAC_ADMIN:
|
||||
return "mac_admin"
|
||||
case CAP_SYSLOG:
|
||||
return "syslog"
|
||||
case CAP_WAKE_ALARM:
|
||||
return "wake_alarm"
|
||||
case CAP_BLOCK_SUSPEND:
|
||||
return "block_suspend"
|
||||
case CAP_AUDIT_READ:
|
||||
return "audit_read"
|
||||
case CAP_PERFMON:
|
||||
return "perfmon"
|
||||
case CAP_BPF:
|
||||
return "bpf"
|
||||
case CAP_CHECKPOINT_RESTORE:
|
||||
return "checkpoint_restore"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// List returns list of all supported capabilities
|
||||
func List() []Cap {
|
||||
return []Cap{
|
||||
CAP_CHOWN,
|
||||
CAP_DAC_OVERRIDE,
|
||||
CAP_DAC_READ_SEARCH,
|
||||
CAP_FOWNER,
|
||||
CAP_FSETID,
|
||||
CAP_KILL,
|
||||
CAP_SETGID,
|
||||
CAP_SETUID,
|
||||
CAP_SETPCAP,
|
||||
CAP_LINUX_IMMUTABLE,
|
||||
CAP_NET_BIND_SERVICE,
|
||||
CAP_NET_BROADCAST,
|
||||
CAP_NET_ADMIN,
|
||||
CAP_NET_RAW,
|
||||
CAP_IPC_LOCK,
|
||||
CAP_IPC_OWNER,
|
||||
CAP_SYS_MODULE,
|
||||
CAP_SYS_RAWIO,
|
||||
CAP_SYS_CHROOT,
|
||||
CAP_SYS_PTRACE,
|
||||
CAP_SYS_PACCT,
|
||||
CAP_SYS_ADMIN,
|
||||
CAP_SYS_BOOT,
|
||||
CAP_SYS_NICE,
|
||||
CAP_SYS_RESOURCE,
|
||||
CAP_SYS_TIME,
|
||||
CAP_SYS_TTY_CONFIG,
|
||||
CAP_MKNOD,
|
||||
CAP_LEASE,
|
||||
CAP_AUDIT_WRITE,
|
||||
CAP_AUDIT_CONTROL,
|
||||
CAP_SETFCAP,
|
||||
CAP_MAC_OVERRIDE,
|
||||
CAP_MAC_ADMIN,
|
||||
CAP_SYSLOG,
|
||||
CAP_WAKE_ALARM,
|
||||
CAP_BLOCK_SUSPEND,
|
||||
CAP_AUDIT_READ,
|
||||
CAP_PERFMON,
|
||||
CAP_BPF,
|
||||
CAP_CHECKPOINT_RESTORE,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
// Copyright 2024 The Capability Authors.
|
||||
// Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package capability
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type capHeader struct {
|
||||
version uint32
|
||||
pid int32
|
||||
}
|
||||
|
||||
type capData struct {
|
||||
effective uint32
|
||||
permitted uint32
|
||||
inheritable uint32
|
||||
}
|
||||
|
||||
func capget(hdr *capHeader, data *capData) (err error) {
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func capset(hdr *capHeader, data *capData) (err error) {
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// not yet in syscall
|
||||
const (
|
||||
pr_CAP_AMBIENT = 47
|
||||
pr_CAP_AMBIENT_IS_SET = uintptr(1)
|
||||
pr_CAP_AMBIENT_RAISE = uintptr(2)
|
||||
pr_CAP_AMBIENT_LOWER = uintptr(3)
|
||||
pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4)
|
||||
)
|
||||
|
||||
func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
vfsXattrName = "security.capability"
|
||||
|
||||
vfsCapVerMask = 0xff000000
|
||||
vfsCapVer1 = 0x01000000
|
||||
vfsCapVer2 = 0x02000000
|
||||
|
||||
vfsCapFlagMask = ^vfsCapVerMask
|
||||
vfsCapFlageffective = 0x000001
|
||||
|
||||
vfscapDataSizeV1 = 4 * (1 + 2*1)
|
||||
vfscapDataSizeV2 = 4 * (1 + 2*2)
|
||||
)
|
||||
|
||||
type vfscapData struct {
|
||||
magic uint32
|
||||
data [2]struct {
|
||||
permitted uint32
|
||||
inheritable uint32
|
||||
}
|
||||
effective [2]uint32
|
||||
version int8
|
||||
}
|
||||
|
||||
var _vfsXattrName *byte
|
||||
|
||||
func init() {
|
||||
_vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName)
|
||||
}
|
||||
|
||||
func getVfsCap(path string, dest *vfscapData) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0)
|
||||
if e1 != 0 {
|
||||
if e1 == syscall.ENODATA {
|
||||
dest.version = 2
|
||||
return
|
||||
}
|
||||
err = e1
|
||||
}
|
||||
switch dest.magic & vfsCapVerMask {
|
||||
case vfsCapVer1:
|
||||
dest.version = 1
|
||||
if r0 != vfscapDataSizeV1 {
|
||||
return syscall.EINVAL
|
||||
}
|
||||
dest.data[1].permitted = 0
|
||||
dest.data[1].inheritable = 0
|
||||
case vfsCapVer2:
|
||||
dest.version = 2
|
||||
if r0 != vfscapDataSizeV2 {
|
||||
return syscall.EINVAL
|
||||
}
|
||||
default:
|
||||
return syscall.EINVAL
|
||||
}
|
||||
if dest.magic&vfsCapFlageffective != 0 {
|
||||
dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable
|
||||
dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable
|
||||
} else {
|
||||
dest.effective[0] = 0
|
||||
dest.effective[1] = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setVfsCap(path string, data *vfscapData) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var size uintptr
|
||||
if data.version == 1 {
|
||||
data.magic = vfsCapVer1
|
||||
size = vfscapDataSizeV1
|
||||
} else if data.version == 2 {
|
||||
data.magic = vfsCapVer2
|
||||
if data.effective[0] != 0 || data.effective[1] != 0 {
|
||||
data.magic |= vfsCapFlageffective
|
||||
}
|
||||
size = vfscapDataSizeV2
|
||||
} else {
|
||||
return syscall.EINVAL
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
|
@ -355,8 +355,8 @@ github.com/containers/psgo/internal/dev
|
|||
github.com/containers/psgo/internal/host
|
||||
github.com/containers/psgo/internal/proc
|
||||
github.com/containers/psgo/internal/process
|
||||
# github.com/containers/storage v1.55.1-0.20240903205438-465c38f89483
|
||||
## explicit; go 1.21
|
||||
# github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0
|
||||
## explicit; go 1.22.0
|
||||
github.com/containers/storage
|
||||
github.com/containers/storage/drivers
|
||||
github.com/containers/storage/drivers/aufs
|
||||
|
@ -718,8 +718,8 @@ github.com/josharian/intern
|
|||
# github.com/json-iterator/go v1.1.12
|
||||
## explicit; go 1.12
|
||||
github.com/json-iterator/go
|
||||
# github.com/klauspost/compress v1.17.9
|
||||
## explicit; go 1.20
|
||||
# github.com/klauspost/compress v1.17.10
|
||||
## explicit; go 1.21
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/flate
|
||||
github.com/klauspost/compress/fse
|
||||
|
@ -806,6 +806,9 @@ github.com/moby/docker-image-spec/specs-go/v1
|
|||
# github.com/moby/patternmatcher v0.6.0
|
||||
## explicit; go 1.19
|
||||
github.com/moby/patternmatcher
|
||||
# github.com/moby/sys/capability v0.2.0
|
||||
## explicit; go 1.21
|
||||
github.com/moby/sys/capability
|
||||
# github.com/moby/sys/mountinfo v0.7.2
|
||||
## explicit; go 1.17
|
||||
github.com/moby/sys/mountinfo
|
||||
|
|
Loading…
Reference in New Issue