vendor: update containers/common

Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
Giuseppe Scrivano 2024-10-11 23:04:19 +02:00
parent d512e44147
commit 8896ace2a4
No known key found for this signature in database
GPG Key ID: 67E38F7A8BA21772
196 changed files with 3047 additions and 1612 deletions

10
go.mod
View File

@ -13,14 +13,14 @@ require (
github.com/checkpoint-restore/go-criu/v7 v7.2.0
github.com/containernetworking/plugins v1.5.1
github.com/containers/buildah v1.37.1-0.20241002152719-c68e17b4ffed
github.com/containers/common v0.60.1-0.20241001171026-c3edf18f3339
github.com/containers/common v0.60.1-0.20241011155906-25644f144d66
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/gvisor-tap-vsock v0.7.5
github.com/containers/image/v5 v5.32.3-0.20240923171149-9e1153a28c46
github.com/containers/libhvee v0.7.1
github.com/containers/ocicrypt v1.2.0
github.com/containers/psgo v1.9.0
github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0
github.com/containers/storage v1.55.1-0.20241008185503-a397602515fd
github.com/containers/winquit v1.1.0
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
github.com/coreos/stream-metadata-go v0.4.4
@ -87,7 +87,7 @@ require (
require (
dario.cat/mergo v1.0.1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/hcsshim v0.12.6 // indirect
github.com/Microsoft/hcsshim v0.12.7 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect
@ -205,7 +205,7 @@ require (
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/vbatts/tar-split v0.11.5 // indirect
github.com/vbatts/tar-split v0.11.6 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
@ -221,7 +221,7 @@ require (
golang.org/x/time v0.6.0 // indirect
golang.org/x/tools v0.25.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect
google.golang.org/grpc v1.65.0 // indirect
google.golang.org/grpc v1.66.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
tags.cncf.io/container-device-interface/specs-go v0.8.0 // indirect

20
go.sum
View File

@ -12,8 +12,8 @@ github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.12.6 h1:qEnZjoHXv+4/s0LmKZWE0/AiZmMWEIkFfWBSf1a0wlU=
github.com/Microsoft/hcsshim v0.12.6/go.mod h1:ZABCLVcvLMjIkzr9rUGcQ1QA0p0P3Ps+d3N1g2DsFfk=
github.com/Microsoft/hcsshim v0.12.7 h1:MP6R1spmjxTE4EU4J3YsrTxn8CjvN9qwjTKJXldFaRg=
github.com/Microsoft/hcsshim v0.12.7/go.mod h1:HPbAuJ9BvQYYZbB4yEQcyGIsTP5L4yHKeO9XO149AEM=
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
@ -79,8 +79,8 @@ github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+
github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM=
github.com/containers/buildah v1.37.1-0.20241002152719-c68e17b4ffed h1:qiE4J6RukT5+a2wV+Xeimu0c4Xx6DZrdt8JiP3c9CY8=
github.com/containers/buildah v1.37.1-0.20241002152719-c68e17b4ffed/go.mod h1:ytj7qYHUdP/p+2lAXVaFSHDyYFJZ3y1ikpFERypXbCI=
github.com/containers/common v0.60.1-0.20241001171026-c3edf18f3339 h1:VjK9wBKZTbmZqZ0qW2QlbW81xOu8YxXecek5MUSLGKc=
github.com/containers/common v0.60.1-0.20241001171026-c3edf18f3339/go.mod h1:vuBEtzP83Fa7mgk0BJdHF2BDfFRfNayeYyVHRJw8hSM=
github.com/containers/common v0.60.1-0.20241011155906-25644f144d66 h1:3Op65/b+uB4ech61GRBHNggW5aGDoChPUDG2++tkHB8=
github.com/containers/common v0.60.1-0.20241011155906-25644f144d66/go.mod h1:GRT29AbW4CdqEWP/jSxHyUvV5fprOzsCdhsFhqJiU4s=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/gvisor-tap-vsock v0.7.5 h1:bTy4u3DOmmUPwurL6me2rsgfypAFDhyeJleUcQmBR/E=
@ -97,8 +97,8 @@ github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sir
github.com/containers/ocicrypt v1.2.0/go.mod h1:ZNviigQajtdlxIZGibvblVuIFBKIuUI2M0QM12SD31U=
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0 h1:0NNBYNpPFzQUKXVq+oQG6NFQcBwtbs2luxl/bVulbPs=
github.com/containers/storage v1.55.1-0.20240924180116-5924c6f0adf0/go.mod h1:Gx8WE9kURdCyEuB9cq8Kq5sRDRbpZi34lnOQ3zAGK2s=
github.com/containers/storage v1.55.1-0.20241008185503-a397602515fd h1:Yh3v4wrVxMpccXjA451OsF4CdKuQEEGCNDHtK84y+10=
github.com/containers/storage v1.55.1-0.20241008185503-a397602515fd/go.mod h1:H3XVD+Fwqe26DEP+Ev3s9VmdtXlAd9rV/WFC+dgALSI=
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
@ -508,8 +508,8 @@ github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65E
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
github.com/vbauerster/mpb/v8 v8.8.3 h1:dTOByGoqwaTJYPubhVz3lO5O6MK553XVgUo33LdnNsQ=
github.com/vbauerster/mpb/v8 v8.8.3/go.mod h1:JfCCrtcMsJwP6ZwMn9e5LMnNyp3TVNpUWWkN+nd4EWk=
github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
@ -698,8 +698,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c=
google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -27,16 +27,19 @@
#
#apparmor_profile = "container-default"
# The hosts entries from the base hosts file are added to the containers hosts
# file. This must be either an absolute path or as special values "image" which
# uses the hosts file from the container image or "none" which means
# no base hosts file is used. The default is "" which will use /etc/hosts.
# Base file to create the `/etc/hosts` file inside the container. This must either
# be an absolute path to a file on the host system, or one of the following
# special flags:
# "" Use the host's `/etc/hosts` file (the default)
# `none` Do not use a base file (i.e. start with an empty file)
# `image` Use the container image's `/etc/hosts` file as base file
#
#base_hosts_file = ""
# List of cgroup_conf entries specifying a list of cgroup files to write to and
# their values. For example `memory.high=1073741824` sets the
# memory.high limit to 1GB.
#
# cgroup_conf = []
# Default way to to create a cgroup namespace for the container
@ -126,13 +129,25 @@ default_sysctls = [
#
#env_host = false
# Set the ip for the host.containers.internal entry in the containers /etc/hosts
# file. This can be set to "none" to disable adding this entry. By default it
# will automatically choose the host ip.
# Set the IP address the container should expect to connect to the host. The IP
# address is used by Podman to automatically add the `host.containers.internal`
# and `host.docker.internal` hostnames to the container's `/etc/hosts` file. It
# is also used for the *host-gateway* flag of Podman's `--add-host` CLI option.
# If no IP address is configured (the default), Podman will try to determine it
# automatically, but might fail to do so depending on the container's network
# setup. Adding these internal hostnames to `/etc/hosts` is silently skipped then.
# Set this config to `none` to never add the internal hostnames to `/etc/hosts`.
#
# NOTE: When using podman machine this entry will never be added to the containers
# hosts file instead the gvproxy dns resolver will resolve this hostname. Therefore
# it is not possible to disable the entry in this case.
# Note: If Podman is running in a virtual machine using `podman machine` (this
# includes Mac and Windows hosts), Podman will silently skip adding the internal
# hostnames to `/etc/hosts`, unless an IP address was configured manually. The
# internal hostnames are resolved by the gvproxy DNS resolver instead. This config
# has no effect on gvproxy. However, since `/etc/hosts` bypasses the DNS resolver,
# a manually configured IP address still takes precedence.
#
# Note: This config doesn't affect the actual network setup, it just tells Podman
# the IP address it should expect. Configuring an IP address here doesn't ensure
# that the container can actually reach the host using this IP address.
#
#host_containers_internal_ip = ""
@ -221,8 +236,10 @@ default_sysctls = [
#
#netns = "private"
# Create /etc/hosts for the container. By default, container engine manage
# /etc/hosts, automatically adding the container's own IP address.
# Do not modify the `/etc/hosts` file in the container. Podman assumes control
# over the container's `/etc/hosts` file by default; refer to the `--add-host`
# CLI option for details. To disable this, either set this config to `true`, or
# use the functionally identical `--no-hosts` CLI option.
#
#no_hosts = false

View File

@ -120,12 +120,12 @@ var (
additionalHelperBinariesDir string
defaultUnixComposeProviders = []string{
"docker-compose",
"$HOME/.docker/cli-plugins/docker-compose",
"/usr/local/lib/docker/cli-plugins/docker-compose",
"/usr/local/libexec/docker/cli-plugins/docker-compose",
"/usr/lib/docker/cli-plugins/docker-compose",
"/usr/libexec/docker/cli-plugins/docker-compose",
"docker-compose",
"podman-compose",
}
@ -231,7 +231,6 @@ func defaultConfig() (*Config, error) {
DNSServers: attributedstring.Slice{},
DefaultCapabilities: attributedstring.NewSlice(DefaultCapabilities),
DefaultSysctls: attributedstring.Slice{},
DefaultUlimits: attributedstring.NewSlice(getDefaultProcessLimits()),
Devices: attributedstring.Slice{},
EnableKeyring: true,
EnableLabeling: selinuxEnabled(),

View File

@ -1,47 +1,13 @@
package config
import (
"fmt"
"os"
"strconv"
"strings"
"golang.org/x/sys/unix"
)
const (
oldMaxSize = uint64(1048576)
)
func getDefaultCgroupsMode() string {
return "enabled"
}
// getDefaultProcessLimits returns the nproc for the current process in ulimits format
// Note that nfile sometimes cannot be set to unlimited, and the limit is hardcoded
// to (oldMaxSize) 1048576 (2^20), see: http://stackoverflow.com/a/1213069/1811501
// In rootless containers this will fail, and the process will just use its current limits
func getDefaultProcessLimits() []string {
rlim := unix.Rlimit{Cur: oldMaxSize, Max: oldMaxSize}
oldrlim := rlim
// Attempt to set file limit and process limit to pid_max in OS
dat, err := os.ReadFile("/proc/sys/kernel/pid_max")
if err == nil {
val := strings.TrimSuffix(string(dat), "\n")
maxLimit, err := strconv.ParseUint(val, 10, 64)
if err == nil {
rlim = unix.Rlimit{Cur: maxLimit, Max: maxLimit}
}
}
defaultLimits := []string{}
if err := unix.Setrlimit(unix.RLIMIT_NPROC, &rlim); err == nil {
defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", rlim.Cur, rlim.Max))
} else if err := unix.Setrlimit(unix.RLIMIT_NPROC, &oldrlim); err == nil {
defaultLimits = append(defaultLimits, fmt.Sprintf("nproc=%d:%d", oldrlim.Cur, oldrlim.Max))
}
return defaultLimits
}
// getDefaultTmpDir for linux
func getDefaultTmpDir() string {
// first check the TMPDIR env var

View File

@ -167,13 +167,19 @@ vendor_task:
build_script: make vendor
test_script: hack/tree_status.sh
cross_task:
alias: cross
container:
image: golang:1.22
build_script: make cross
gofix_task:
alias: gofix
container:
image: golang:1.22
build_script: go fix ./...
test_script: git diff --exit-code
# Status aggregator for all tests. This task simply ensures a defined
# set of tasks all passed, and allows confirming that based on the status
@ -190,6 +196,7 @@ success_task:
- meta
- vendor
- cross
- gofix
container:
image: golang:1.21
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed

View File

@ -1,32 +1,14 @@
approvers:
- Luap99
- TomSweeneyRedHat
- cevich
- edsantiago
- flouthoc
- giuseppe
- haircommander
- kolyshkin
- mrunalp
- mtrmac
- nalind
- rhatdan
- saschagrunert
- umohnani8
- vrothberg
reviewers:
- Luap99
- Honny1
- TomSweeneyRedHat
- cevich
- edsantiago
- flouthoc
- giuseppe
- haircommander
- kolyshkin
- mrunalp
- mtrmac
- nalind
- rhatdan
- saschagrunert
- umohnani8
- vrothberg

View File

@ -164,11 +164,11 @@ type containerStore struct {
func copyContainer(c *Container) *Container {
return &Container{
ID: c.ID,
Names: slices.Clone(c.Names),
Names: copyStringSlice(c.Names),
ImageID: c.ImageID,
LayerID: c.LayerID,
Metadata: c.Metadata,
BigDataNames: slices.Clone(c.BigDataNames),
BigDataNames: copyStringSlice(c.BigDataNames),
BigDataSizes: maps.Clone(c.BigDataSizes),
BigDataDigests: maps.Clone(c.BigDataDigests),
Created: c.Created,

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
/*

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package aufs

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package aufs

View File

@ -1,5 +1,4 @@
//go:build linux && cgo
// +build linux,cgo
package btrfs

View File

@ -1,4 +1,3 @@
//go:build !linux || !cgo
// +build !linux !cgo
package btrfs

View File

@ -1,5 +1,4 @@
//go:build linux && !btrfs_noversion && cgo
// +build linux,!btrfs_noversion,cgo
package btrfs

View File

@ -1,5 +1,4 @@
//go:build linux && btrfs_noversion && cgo
// +build linux,btrfs_noversion,cgo
package btrfs

View File

@ -1,5 +1,4 @@
//go:build darwin
// +build darwin
package graphdriver

View File

@ -1,5 +1,4 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package graphdriver

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package graphdriver

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package graphdriver

View File

@ -1,5 +1,4 @@
//go:build cgo
// +build cgo
package copy

View File

@ -1,5 +1,4 @@
//go:build !linux || !cgo
// +build !linux !cgo
package copy //nolint: predeclared

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package graphdriver

View File

@ -1,5 +1,4 @@
//go:build solaris && cgo
// +build solaris,cgo
package graphdriver

View File

@ -1,5 +1,4 @@
//go:build !linux && !windows && !freebsd && !solaris && !darwin
// +build !linux,!windows,!freebsd,!solaris,!darwin
package graphdriver

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay

View File

@ -1,5 +1,4 @@
//go:build linux && cgo
// +build linux,cgo
package overlay

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay
@ -127,6 +126,7 @@ type Driver struct {
naiveDiff graphdriver.DiffDriver
supportsDType bool
supportsVolatile *bool
supportsDataOnly *bool
usingMetacopy bool
usingComposefs bool
@ -272,6 +272,18 @@ func (d *Driver) getSupportsVolatile() (bool, error) {
return supportsVolatile, nil
}
func (d *Driver) getSupportsDataOnly() (bool, error) {
if d.supportsDataOnly != nil {
return *d.supportsDataOnly, nil
}
supportsDataOnly, err := supportsDataOnlyLayersCached(d.home, d.runhome)
if err != nil {
return false, err
}
d.supportsDataOnly = &supportsDataOnly
return supportsDataOnly, nil
}
// isNetworkFileSystem checks if the specified file system is supported by native overlay
// as backing store when running in a user namespace.
func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
@ -360,13 +372,6 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
if unshare.IsRootless() {
return nil, fmt.Errorf("composefs is not supported in user namespaces")
}
supportsDataOnly, err := supportsDataOnlyLayersCached(home, runhome)
if err != nil {
return nil, err
}
if !supportsDataOnly {
return nil, fmt.Errorf("composefs is not supported on this kernel: %w", graphdriver.ErrIncompatibleFS)
}
if _, err := getComposeFsHelper(); err != nil {
return nil, fmt.Errorf("composefs helper program not found: %w", err)
}
@ -869,11 +874,11 @@ func (d *Driver) pruneStagingDirectories() bool {
anyPresent := false
homeStagingDir := filepath.Join(d.home, stagingDir)
dirs, err := os.ReadDir(homeStagingDir)
stagingDirBase := filepath.Join(d.homeDirForImageStore(), stagingDir)
dirs, err := os.ReadDir(stagingDirBase)
if err == nil {
for _, dir := range dirs {
stagingDirToRemove := filepath.Join(homeStagingDir, dir.Name())
stagingDirToRemove := filepath.Join(stagingDirBase, dir.Name())
lock, err := lockfile.GetLockFile(filepath.Join(stagingDirToRemove, stagingLockFile))
if err != nil {
anyPresent = true
@ -1205,17 +1210,22 @@ func (d *Driver) getAllImageStores() []string {
return additionalImageStores
}
func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
var homedir string
if useImageStore && d.imageStore != "" {
homedir = path.Join(d.imageStore, d.name)
} else {
homedir = d.home
// homeDirForImageStore returns the home directory to use when an image store is configured
func (d *Driver) homeDirForImageStore() string {
if d.imageStore != "" {
return path.Join(d.imageStore, d.name)
}
// If there is not an image store configured, use the same
// store
return d.home
}
func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
homedir := d.home
if useImageStore {
homedir = d.homeDirForImageStore()
}
newpath := path.Join(homedir, id)
if err := fileutils.Exists(newpath); err != nil {
for _, p := range d.getAllImageStores() {
l := path.Join(p, d.name, id)
@ -1433,6 +1443,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if err := fileutils.Exists(dir); err != nil {
return "", err
}
if _, err := redirectDiffIfAdditionalLayer(path.Join(dir, "diff"), true); err != nil {
return "", err
}
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
@ -1770,8 +1783,16 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
lowerDirs := strings.Join(absLowers, ":")
if len(composeFsLayers) > 0 {
composeFsLayersLowerDirs := strings.Join(composeFsLayers, "::")
lowerDirs = lowerDirs + "::" + composeFsLayersLowerDirs
sep := "::"
supportsDataOnly, err := d.getSupportsDataOnly()
if err != nil {
return "", err
}
if !supportsDataOnly {
sep = ":"
}
composeFsLayersLowerDirs := strings.Join(composeFsLayers, sep)
lowerDirs = lowerDirs + sep + composeFsLayersLowerDirs
}
// absLowers is not valid anymore now as we have added composeFsLayers to it, so prevent
// its usage.
@ -2087,9 +2108,14 @@ func (g *overlayFileGetter) Close() error {
return errs.ErrorOrNil()
}
func (d *Driver) getStagingDir(id string) string {
_, homedir, _ := d.dir2(id, d.imageStore != "")
return filepath.Join(homedir, stagingDir)
// newStagingDir creates a new staging directory and returns the path to it.
func (d *Driver) newStagingDir() (string, error) {
stagingDirBase := filepath.Join(d.homeDirForImageStore(), stagingDir)
err := os.MkdirAll(stagingDirBase, 0o700)
if err != nil && !os.IsExist(err) {
return "", err
}
return os.MkdirTemp(stagingDirBase, "")
}
// DiffGetter returns a FileGetCloser that can read files from the directory that
@ -2149,14 +2175,14 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
feature := "dataonly-layers"
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
overlayCacheResult, _, err := cachedFeatureCheck(runhome, feature)
if err == nil {
if overlayCacheResult {
logrus.Debugf("Cached value indicated that data-only layers for overlay are supported")
return true, nil
}
logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported")
return false, errors.New(overlayCacheText)
return false, nil
}
supportsDataOnly, err := supportsDataOnlyLayers(home)
if err2 := cachedFeatureRecord(runhome, feature, supportsDataOnly, ""); err2 != nil {
@ -2182,14 +2208,7 @@ func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpt
idMappings = &idtools.IDMappings{}
}
var applyDir string
stagingDir := d.getStagingDir("")
err := os.MkdirAll(stagingDir, 0o700)
if err != nil && !os.IsExist(err) {
return graphdriver.DriverWithDifferOutput{}, err
}
layerDir, err := os.MkdirTemp(stagingDir, "")
layerDir, err := d.newStagingDir()
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
@ -2197,7 +2216,7 @@ func (d *Driver) ApplyDiffWithDiffer(options *graphdriver.ApplyDiffWithDifferOpt
if forceMask != nil {
perms = *forceMask
}
applyDir = filepath.Join(layerDir, "dir")
applyDir := filepath.Join(layerDir, "dir")
if err := os.Mkdir(applyDir, perms); err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
@ -2250,10 +2269,6 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *gr
}
}()
if filepath.Dir(parentStagingDir) != d.getStagingDir(id) {
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
}
diffPath, err := d.getDiffPath(id)
if err != nil {
return err
@ -2338,7 +2353,7 @@ func (d *Driver) getComposefsData(id string) string {
func (d *Driver) getDiffPath(id string) (string, error) {
dir := d.dir(id)
return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"))
return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"), false)
}
func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
@ -2347,7 +2362,7 @@ func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
return nil, err
}
for i, l := range layers {
layers[i], err = redirectDiffIfAdditionalLayer(l)
layers[i], err = redirectDiffIfAdditionalLayer(l, false)
if err != nil {
return nil, err
}
@ -2690,12 +2705,17 @@ func notifyReleaseAdditionalLayer(al string) {
// redirectDiffIfAdditionalLayer checks if the passed diff path is Additional Layer and
// returns the redirected path. If the passed diff is not the one in Additional Layer
// Store, it returns the original path without changes.
func redirectDiffIfAdditionalLayer(diffPath string) (string, error) {
func redirectDiffIfAdditionalLayer(diffPath string, checkExistence bool) (string, error) {
if ld, err := os.Readlink(diffPath); err == nil {
// diff is the link to Additional Layer Store
if !path.IsAbs(ld) {
return "", fmt.Errorf("linkpath must be absolute (got: %q)", ld)
}
if checkExistence {
if err := fileutils.Exists(ld); err != nil {
return "", fmt.Errorf("failed to access to the linked additional layer: %w", err)
}
}
diffPath = ld
} else if err.(*os.PathError).Err != syscall.EINVAL {
return "", err

View File

@ -1,5 +1,4 @@
//go:build linux && cgo && !exclude_disk_quota
// +build linux,cgo,!exclude_disk_quota
package overlay

View File

@ -1,6 +1,4 @@
//go:build linux && (!cgo || exclude_disk_quota)
// +build linux
// +build !cgo exclude_disk_quota
package overlay

View File

@ -1,5 +1,4 @@
//go:build linux && !cgo
// +build linux,!cgo
package overlay

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package overlay

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlay

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package overlayutils

View File

@ -1,5 +1,4 @@
//go:build linux && !exclude_disk_quota && cgo
// +build linux,!exclude_disk_quota,cgo
//
// projectquota.go - implements XFS project quota controls

View File

@ -1,5 +1,4 @@
//go:build !linux || exclude_disk_quota || !cgo
// +build !linux exclude_disk_quota !cgo
package quota

View File

@ -1,5 +1,4 @@
//go:build !exclude_graphdriver_aufs && linux
// +build !exclude_graphdriver_aufs,linux
package register

View File

@ -1,5 +1,4 @@
//go:build !exclude_graphdriver_btrfs && linux
// +build !exclude_graphdriver_btrfs,linux
package register

View File

@ -1,5 +1,4 @@
//go:build !exclude_graphdriver_overlay && linux && cgo
// +build !exclude_graphdriver_overlay,linux,cgo
package register

View File

@ -1,5 +1,4 @@
//go:build (!exclude_graphdriver_zfs && linux) || (!exclude_graphdriver_zfs && freebsd) || solaris
// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd solaris
package register

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package vfs // import "github.com/containers/storage/drivers/vfs"

View File

@ -1,5 +1,4 @@
//go:build linux || freebsd
// +build linux freebsd
package zfs
@ -393,12 +392,18 @@ func (d *Driver) Remove(id string) error {
name := d.zfsPath(id)
dataset := zfs.Dataset{Name: name}
err := dataset.Destroy(zfs.DestroyRecursive)
if err == nil {
d.Lock()
delete(d.filesystemsCache, name)
d.Unlock()
if err != nil {
// We must be tolerant in case the image has already been removed,
// for example, accidentally by hand.
if _, err1 := zfs.GetDataset(name); err1 == nil {
return err
}
logrus.WithField("storage-driver", "zfs").Debugf("Layer %s has already been removed; ignore it and continue to delete the cache", id)
}
return err
d.Lock()
delete(d.filesystemsCache, name)
d.Unlock()
return nil
}
// Get returns the mountpoint for the given id after creating the target directories if necessary.

View File

@ -1,4 +1,3 @@
//go:build !linux && !freebsd
// +build !linux,!freebsd
package zfs

View File

@ -183,13 +183,13 @@ func copyImage(i *Image) *Image {
return &Image{
ID: i.ID,
Digest: i.Digest,
Digests: slices.Clone(i.Digests),
Names: slices.Clone(i.Names),
NamesHistory: slices.Clone(i.NamesHistory),
Digests: copyDigestSlice(i.Digests),
Names: copyStringSlice(i.Names),
NamesHistory: copyStringSlice(i.NamesHistory),
TopLayer: i.TopLayer,
MappedTopLayers: slices.Clone(i.MappedTopLayers),
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
Metadata: i.Metadata,
BigDataNames: slices.Clone(i.BigDataNames),
BigDataNames: copyStringSlice(i.BigDataNames),
BigDataSizes: maps.Clone(i.BigDataSizes),
BigDataDigests: maps.Clone(i.BigDataDigests),
Created: i.Created,

View File

@ -436,7 +436,7 @@ func layerLocation(l *Layer) layerLocations {
func copyLayer(l *Layer) *Layer {
return &Layer{
ID: l.ID,
Names: slices.Clone(l.Names),
Names: copyStringSlice(l.Names),
Parent: l.Parent,
Metadata: l.Metadata,
MountLabel: l.MountLabel,
@ -451,7 +451,7 @@ func copyLayer(l *Layer) *Layer {
CompressionType: l.CompressionType,
ReadOnly: l.ReadOnly,
volatileStore: l.volatileStore,
BigDataNames: slices.Clone(l.BigDataNames),
BigDataNames: copyStringSlice(l.BigDataNames),
Flags: maps.Clone(l.Flags),
UIDMap: copyIDMap(l.UIDMap),
GIDMap: copyIDMap(l.GIDMap),

View File

@ -1,5 +1,4 @@
//go:build go1.10
// +build go1.10
package archive

View File

@ -1,5 +1,4 @@
//go:build !go1.10
// +build !go1.10
package archive

View File

@ -1,5 +1,4 @@
//go:build netbsd || freebsd || darwin
// +build netbsd freebsd darwin
package archive

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package archive

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package archive

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package archive

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package archive

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package archive

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package archive

View File

@ -1,5 +1,4 @@
//go:build freebsd
// +build freebsd
package archive

View File

@ -1,5 +1,4 @@
//go:build !freebsd
// +build !freebsd
package archive

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package archive

View File

@ -1,5 +1,4 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package chrootarchive

View File

@ -1,5 +1,4 @@
//go:build !windows && !linux && !darwin
// +build !windows,!linux,!darwin
package chrootarchive

View File

@ -1,5 +1,4 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package chrootarchive

View File

@ -1,5 +1,4 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package chrootarchive

View File

@ -1,5 +1,4 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package chrootarchive

View File

@ -297,7 +297,7 @@ func (c *layersCache) load() error {
// the cache file is either not present or broken. Try to generate it from the TOC.
l, err = c.createCacheFileFromTOC(r.ID)
if err != nil {
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
logrus.Warningf("Error creating cache file for layer %q: %v", r.ID, err)
}
if l != nil {

View File

@ -2,6 +2,7 @@ package chunked
import (
archivetar "archive/tar"
"bytes"
"errors"
"fmt"
"io"
@ -14,6 +15,8 @@ import (
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
expMaps "golang.org/x/exp/maps"
)
@ -256,7 +259,8 @@ func ensureTOCMatchesTarSplit(toc *internal.TOC, tarSplit []byte) error {
}
}
if err := iterateTarSplit(tarSplit, func(hdr *tar.Header) error {
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
if err := asm.IterateHeaders(unpacker, func(hdr *tar.Header) error {
e, ok := pendingFiles[hdr.Name]
if !ok {
return fmt.Errorf("tar-split contains an entry for %q missing in TOC", hdr.Name)

View File

@ -23,3 +23,22 @@ type ErrBadRequest struct { //nolint: errname
func (e ErrBadRequest) Error() string {
return "bad request"
}
// ErrFallbackToOrdinaryLayerDownload is a custom error type that
// suggests to the caller that a fallback mechanism can be used
// instead of a hard failure.
type ErrFallbackToOrdinaryLayerDownload struct {
Err error
}
func (c ErrFallbackToOrdinaryLayerDownload) Error() string {
return c.Err.Error()
}
func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error {
return c.Err
}
func newErrFallbackToOrdinaryLayerDownload(err error) error {
return ErrFallbackToOrdinaryLayerDownload{Err: err}
}

View File

@ -143,11 +143,13 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o
}
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
// If it returns an error that implements IsErrFallbackToOrdinaryLayerDownload, the caller can
// retry the operation with a different method.
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
pullOptions := store.PullOptions()
if !parseBooleanPullOption(pullOptions, "enable_partial_images", true) {
return nil, errors.New("enable_partial_images not configured")
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("partial images are disabled"))
}
zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
@ -157,29 +159,54 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges
return nil, errors.New("both zstd:chunked and eStargz TOC found")
}
if hasZstdChunkedTOC {
zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString)
if err != nil {
return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err)
}
return makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
}
if hasEstargzTOC {
estargzTOCDigest, err := digest.Parse(estargzTOCDigestString)
if err != nil {
return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err)
}
return makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
convertImages := parseBooleanPullOption(pullOptions, "convert_images", false)
if !hasZstdChunkedTOC && !hasEstargzTOC && !convertImages {
return nil, newErrFallbackToOrdinaryLayerDownload(errors.New("no TOC found and convert_images is not configured"))
}
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
var err error
var differ graphdriver.Differ
// At this point one of hasZstdChunkedTOC, hasEstargzTOC or convertImages is true.
if hasZstdChunkedTOC {
zstdChunkedTOCDigest, err2 := digest.Parse(zstdChunkedTOCDigestString)
if err2 != nil {
return nil, err2
}
differ, err = makeZstdChunkedDiffer(store, blobSize, zstdChunkedTOCDigest, annotations, iss, pullOptions)
if err == nil {
logrus.Debugf("Created zstd:chunked differ for blob %q", blobDigest)
return differ, err
}
} else if hasEstargzTOC {
estargzTOCDigest, err2 := digest.Parse(estargzTOCDigestString)
if err2 != nil {
return nil, err
}
differ, err = makeEstargzChunkedDiffer(store, blobSize, estargzTOCDigest, iss, pullOptions)
if err == nil {
logrus.Debugf("Created eStargz differ for blob %q", blobDigest)
return differ, err
}
}
// If convert_images is enabled, always attempt to convert it instead of returning an error or falling back to a different method.
if convertImages {
logrus.Debugf("Created differ to convert blob %q", blobDigest)
return makeConvertFromRawDiffer(store, blobDigest, blobSize, iss, pullOptions)
}
logrus.Debugf("Could not create differ for blob %q: %v", blobDigest, err)
// If the error is a bad request to the server, then signal to the caller that it can try a different method. This can be done
// only when convert_images is disabled.
var badRequestErr ErrBadRequest
if errors.As(err, &badRequestErr) {
err = newErrFallbackToOrdinaryLayerDownload(err)
}
return nil, err
}
func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blobSize int64, iss ImageSourceSeekable, pullOptions map[string]string) (*chunkedDiffer, error) {
if !parseBooleanPullOption(pullOptions, "convert_images", false) {
return nil, errors.New("convert_images not configured")
}
layersCache, err := getLayersCache(store)
if err != nil {
return nil, err
@ -947,11 +974,9 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dirfd i
}
if _, ok := err.(ErrBadRequest); ok {
// If the server cannot handle at least 64 chunks in a single request, just give up.
if len(chunksToRequest) < 64 {
if len(chunksToRequest) == 1 {
return err
}
// Merge more chunks to request
missingParts = mergeMissingChunks(missingParts, len(chunksToRequest)/2)
calculateChunksToRequest()

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package chunked

View File

@ -1,68 +0,0 @@
package chunked
import (
"bytes"
"fmt"
"io"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/storage"
)
// iterateTarSplit calls handler for each tar header in tarSplit
func iterateTarSplit(tarSplit []byte, handler func(hdr *tar.Header) error) error {
// This, strictly speaking, hard-codes undocumented assumptions about how github.com/vbatts/tar-split/tar/asm.NewInputTarStream
// forms the tar-split contents. Pragmatically, NewInputTarStream should always produce storage.FileType entries at least
// for every non-empty file, which constraints it basically to the output we expect.
//
// Specifically, we assume:
// - There is a separate SegmentType entry for every tar header, but only one SegmentType entry for the full header incl. any extensions
// - (There is a FileType entry for every tar header, we ignore it)
// - Trailing padding of a file, if any, is included in the next SegmentType entry
// - At the end, there may be SegmentType entries just for the terminating zero blocks.
unpacker := storage.NewJSONUnpacker(bytes.NewReader(tarSplit))
for {
tsEntry, err := unpacker.Next()
if err != nil {
if err == io.EOF {
return nil
}
return fmt.Errorf("reading tar-split entries: %w", err)
}
switch tsEntry.Type {
case storage.SegmentType:
payload := tsEntry.Payload
// This is horrible, but we dont know how much padding to skip. (It can be computed from the previous hdr.Size for non-sparse
// files, but for sparse files that is set to the logical size.)
//
// First, assume that all padding is zero bytes.
// A tar header starts with a file name, which might in principle be empty, but
// at least https://github.com/opencontainers/image-spec/blob/main/layer.md#populate-initial-filesystem suggests that
// the tar name should never be empty (it should be ".", or maybe "./").
//
// This will cause us to skip all zero bytes in the trailing blocks, but thats fine.
i := 0
for i < len(payload) && payload[i] == 0 {
i++
}
payload = payload[i:]
tr := tar.NewReader(bytes.NewReader(payload))
hdr, err := tr.Next()
if err != nil {
if err == io.EOF { // Probably the last entry, but lets let the unpacker drive that.
break
}
return fmt.Errorf("decoding a tar header from a tar-split entry: %w", err)
}
if err := handler(hdr); err != nil {
return err
}
case storage.FileType:
// Nothing
default:
return fmt.Errorf("unexpected tar-split entry type %q", tsEntry.Type)
}
}
}

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package directory

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package directory

View File

@ -1,5 +1,4 @@
//go:build !windows && !freebsd
// +build !windows,!freebsd
package fileutils

View File

@ -1,5 +1,4 @@
//go:build linux || freebsd
// +build linux freebsd
package fileutils

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package fsutils

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package fsverity

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package homedir

View File

@ -1,5 +1,4 @@
//go:build linux
// +build linux
package idmap

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package idmap

View File

@ -1,5 +1,4 @@
//go:build linux && cgo && libsubid
// +build linux,cgo,libsubid
package idtools

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package idtools

View File

@ -1,5 +1,4 @@
//go:build !linux || !libsubid || !cgo
// +build !linux !libsubid !cgo
package idtools

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package idtools

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package idtools

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package idtools

View File

@ -1,5 +1,4 @@
//go:build !linux
// +build !linux
package ioutils

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package ioutils

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package ioutils

View File

@ -128,9 +128,8 @@ func GetROLockfile(path string) (Locker, error) {
func (l *LockFile) Lock() {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
l.lock(writeLock)
}
l.lock(writeLock)
}
// RLock locks the lockfile as a reader.
@ -142,9 +141,8 @@ func (l *LockFile) RLock() {
func (l *LockFile) TryLock() error {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
return l.tryLock(writeLock)
}
return l.tryLock(writeLock)
}
// TryRLock attempts to lock the lockfile as a reader.

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package lockfile

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package lockfile

View File

@ -1,5 +1,4 @@
//go:build linux && cgo
// +build linux,cgo
package loopback

View File

@ -1,5 +1,4 @@
//go:build linux && cgo
// +build linux,cgo
package loopback

View File

@ -1,5 +1,4 @@
//go:build linux && cgo
// +build linux,cgo
package loopback

View File

@ -1,5 +1,4 @@
//go:build linux && cgo
// +build linux,cgo
package loopback

View File

@ -1,5 +1,4 @@
//go:build !linux && !freebsd
// +build !linux,!freebsd
package mount

View File

@ -1,5 +1,4 @@
//go:build freebsd && cgo
// +build freebsd,cgo
package mount

View File

@ -1,6 +1,4 @@
//go:build !linux && !(freebsd && cgo)
// +build !linux
// +build !freebsd !cgo
package mount

View File

@ -1,5 +1,4 @@
//go:build !windows
// +build !windows
package mount

View File

@ -1,5 +1,4 @@
//go:build windows
// +build windows
package mount

Some files were not shown because too many files have changed in this diff Show More